hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b37315c092291462fb88dadcb559cba929629c77
| 8,278
|
py
|
Python
|
hr_little_api/builders.py
|
jwoo92/hr-little-api
|
624c6054223cbc90ae6327e24236beebbc205fe6
|
[
"Apache-2.0"
] | 9
|
2019-12-28T14:02:23.000Z
|
2022-02-17T01:46:39.000Z
|
hr_little_api/builders.py
|
jwoo92/hr-little-api
|
624c6054223cbc90ae6327e24236beebbc205fe6
|
[
"Apache-2.0"
] | 2
|
2020-01-19T03:39:10.000Z
|
2021-09-19T05:28:56.000Z
|
hr_little_api/builders.py
|
jwoo92/hr-little-api
|
624c6054223cbc90ae6327e24236beebbc205fe6
|
[
"Apache-2.0"
] | 5
|
2019-10-28T16:35:23.000Z
|
2021-05-31T10:03:02.000Z
|
# Copyright 2019 Hanson Robotics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: James Diprose
from abc import ABC, abstractmethod
from enum import Enum, auto
from typing import List
class CallbackType(Enum):
""" The type of callback command. """
start = 'TS'
end = 'TE'
class WaitType(Enum):
""" The type of wait command. """
wait_for_motors = auto()
wait_for_motors_and_speaking = auto()
wait = auto()
wait_for_say_start = auto()
wait_for_say_done = auto()
class WalkDirection(Enum):
""" The directions the robot can walk. """
forward = auto()
backward = auto()
left = auto()
right = auto()
class MotorId(Enum):
""" The ids of the robot's motors. """
right_arm = 'AR'
lip_corners = 'CH'
eyebrows = 'EB'
eyelids = 'EL'
head_pitch = 'HN'
head_turn = 'HT'
mouth = 'MO'
class CommandBuilder(ABC):
""" Abstract CommandBuilder interface. CommandBuilders are used to generate the actions the robot can perform. """
@abstractmethod
def duration(self) -> float:
""" Implement, calculates the duration in seconds that the command will take to execute on the robot.
:return: duration in seconds.
"""
raise NotImplementedError("Please implement the `duration` method")
@abstractmethod
def build(self) -> str:
""" Implement, builds a string command that makes the robot perform an action.
:return: a string command.
"""
raise NotImplementedError("Please implement the `build` method")
class WaitCommandBuilder(CommandBuilder):
""" WaitCommandBuilder """
__COMMANDS = {WaitType.wait_for_motors: "<PM>",
WaitType.wait_for_motors_and_speaking: "<PA>",
WaitType.wait: "<PA={}>",
WaitType.wait_for_say_start: "<SS>",
WaitType.wait_for_say_done: "<PS>"}
def __init__(self, wait_type: WaitType, seconds: float = 0.):
CommandBuilder.__init__(self)
self.wait_type = wait_type
self.seconds = seconds
def duration(self) -> float:
return self.seconds
def build(self) -> str:
if self.wait_type is WaitType.wait:
return WaitCommandBuilder.__COMMANDS[self.wait_type].format(self.seconds)
return WaitCommandBuilder.__COMMANDS[self.wait_type]
class CallbackCommandBuilder(CommandBuilder):
""" Builds a command that triggers a callback when certain commands have started or finished. """
def __init__(self, callback_type: CallbackType, callback_id: str):
""" Constructor for CallbackCommandBuilder.
:param callback_type: the type of callback to be triggered, either at the start of a command or the end.
:param callback_id: the string id to give the callback.
"""
CommandBuilder.__init__(self)
self.callback_type = callback_type
self.callback_id = callback_id
def duration(self) -> float:
return 0
def build(self) -> str:
return "<{}={}>".format(self.callback_type.value, self.callback_id)
class CommandListBuilder(CommandBuilder):
""" Builds a list of commands by concatenating them together """
def __init__(self, commands: List[CommandBuilder]):
""" Constructor for CommandListBuilder.
:param commands: the list of commands to concatenate together.
"""
CommandBuilder.__init__(self)
self.commands = commands
def duration(self) -> float:
return 0
def build(self) -> str:
str_commands = []
for cmd in self.commands:
str_commands.append(cmd.build())
command = ''.join(str_commands)
return command
class WalkCommandBuilder(CommandBuilder):
""" Builds commands that make the robot walk. """
__STEP_TIMES = {WalkDirection.forward: 2.3,
WalkDirection.backward: 2.4,
WalkDirection.left: 1.1,
WalkDirection.right: 1.1}
__COMMANDS = {WalkDirection.forward: "<WK=W2,{}>",
WalkDirection.backward: "<WK=WB,{}>",
WalkDirection.left: "<WK=WL,{}>",
WalkDirection.right: "<WK=WR,{}>"}
def __init__(self, direction: WalkDirection, steps: int = 4):
""" Constructor for WalkCommandBuilder.
:param direction: the direction to walk, one of forward, backward, left or right.
:param steps: the number of steps to take. Must be between 1 and 10 steps. For forward and backward, a step
means both feet step forward one time each. For left and right a step means that the foot in question moves
the number of times given by the step parameter.
"""
if not (0 < steps < 11):
raise ValueError(
'WalkCommandBuilder.__init__ expected steps to be between 1 and 10, however received {}'.format(steps))
CommandBuilder.__init__(self)
self.steps = steps
self.direction = direction
def duration(self) -> float:
return WalkCommandBuilder.__STEP_TIMES[self.direction] * self.steps
def build(self) -> str:
steps = self.steps
if self.steps == 1:
steps = 0
cmd = WalkCommandBuilder.__COMMANDS[self.direction].format(steps, self.duration())
return cmd
class MotorCommandBuilder(CommandBuilder):
""" Builds commands to move the robot's motors. """
def __init__(self, motor_id: MotorId, position: float, seconds: float):
""" Constructor for MotorCommandBuilder.
:param motor_id: the ID of the motor.
:param position: the position to move the motor to, between 0.0 and 1.0.
:param seconds: the time in seconds that the motor should take to move to the desired position, between 0.0 and
10.0.
"""
if not (0. <= position <= 1.):
raise ValueError(
"MotorCommandBuilder.__init__ expected 'position' to be between "
"0.0 and 1.0, however received '{}'".format(position))
if not (0. <= seconds <= 10.):
raise ValueError(
"MotorCommandBuilder.__init__ expected 'seconds' to be between "
"0.0 and 10.0, however received '{}'".format(seconds))
CommandBuilder.__init__(self)
self.motor_id = motor_id
self.position = position
self.seconds = seconds
def duration(self) -> float:
return self.seconds
def build(self) -> str:
cmd = "<MO={motor_id},{position:.1f},{time:.1f}>".format(motor_id=self.motor_id.value, position=self.position,
time=self.seconds)
return cmd
class SayCommandBuilder(CommandBuilder):
""" Builds commands to make the robot speak. """
def __init__(self, text: str, wpm: int = 112):
""" Constructor for SayCommandBuilder.
:param text: the text for the robot to speak.
:param wpm: the estimated words per minute that the robot speaks at. This does not modify the speed that the
robot speaks at, just the estimate of how long it will speak for. The default value of 112 is the estimated
WPM for the Professor Einstein text to speech engine.
"""
if wpm <= 0:
raise ValueError(
"SayCommandBuilder.__init__ expected 'wpm' to be greater than 0, however received '{}'".format(wpm))
CommandBuilder.__init__(self)
self.text = text
self.wpm = wpm
def duration(self) -> float:
num_words = len(self.text.split(' '))
seconds = num_words / self.wpm * 60.
return seconds
def build(self) -> str:
return self.text
| 32.719368
| 119
| 0.633366
|
1dd562f671e32ea1c8536d882815a94fa5b9041f
| 5,560
|
py
|
Python
|
arachniclient/__init__.py
|
catatonicprime/arachniclient
|
2d56fbc0eafe8065f57f577fdae0f6c6ce11f02d
|
[
"MIT"
] | null | null | null |
arachniclient/__init__.py
|
catatonicprime/arachniclient
|
2d56fbc0eafe8065f57f577fdae0f6c6ce11f02d
|
[
"MIT"
] | null | null | null |
arachniclient/__init__.py
|
catatonicprime/arachniclient
|
2d56fbc0eafe8065f57f577fdae0f6c6ce11f02d
|
[
"MIT"
] | null | null | null |
import requests
from requests.auth import HTTPBasicAuth
from io import StringIO
class Scan:
def __init__(self, scanId=None, scanOptions=None, client=None, scan=None):
self.id = scanId
self.scanOptions = scanOptions
self.client = client
self.scan = scan
def display(self):
print ("{0} : {1}".format(self.id, self.status))
@property
def status(self):
self.updateScan()
if self.scan is None:
return None
return self.scan['status']
@property
def url(self):
return self.scanOptions['url']
@property
def runtime(self):
self.updateScan()
try:
return self.scan['statistics']['runtime']
except ValueError:
return None
def updateScan(self):
if self.client is None:
raise Exception("client cannot be None")
if self.id is None:
raise Exception("id cannot be None")
self.scan = self.client.getScan(self.id)
def pauseScan(self):
if self.client is None:
raise Exception("client cannot be None")
if self.id is None:
raise Exception("id cannot be None")
self.client.pauseScan(self.id)
def resumeScan(self):
if self.client is None:
raise Exception("client cannot be None")
if self.id is None:
self.startScan()
else:
self.client.resumeScan(self.id)
def startScan(self):
if self.client is None:
raise Exception("client cannot be None")
if self.id is not None:
self.resumeScan()
return
if self.scanOptions is None:
raise Exception("scanOptions cannot be None")
if self.scanOptions['url'] is None:
raise Exception('Scan URL is required')
self.id = self.client.addScan(self.scanOptions)
def deleteScan(self):
if self.client is None:
raise Exception("client cannot be None")
if self.id is None:
raise Exception("id cannot be None")
self.client.deleteScan(self.id)
self.id = None
def downloadReport(self, fmt=None, dest=None):
if self.client is None:
raise Exception("client cannot be None")
if self.id is None:
raise Exception("id cannot be None")
extensions = {'json': '.json',
'xml': '.xml',
'yaml': '.yaml',
'html.zip': '.html.zip'}
# Default to '.json' extension.
extension = extensions['json']
fmt = str(fmt)
if fmt in extensions:
extension = extensions[fmt]
report_data = self.client.getReport(self.id, fmt)
with open("{0}{1}".format(self.id, extension), "wb") as handle:
handle.write(report_data)
class Client:
def __init__(self, hostname='127.0.0.1', port=7331, username=None, password=None):
if hostname is None:
raise Exception('hostname cannot be None')
if port is None:
raise Exception('port cannot be None')
self.hostname = hostname
self.port = port
# Build the client requests session with auth etc.
session = requests.Session()
if (username or password):
session.auth = HTTPBasicAuth(username, password)
self._session = session
def getUrl(self, endpoint=''):
return 'http://{0}:{1}/{2}'.format(self.hostname, self.port, endpoint)
def getScans(self):
r = self._session.get(self.getUrl('scans'))
scanIds = r.json()
scans = []
for scanId in scanIds:
scans.append(Scan(scanId=scanId, scan=self.getScan(scanId), client=self))
return scans
def addScan(self, scanOptions):
r = self._session.post(self.getUrl('scans'), json=scanOptions)
if not r.status_code == 200:
raise Exception('An error occurred: \r\n{0}'.format(r.content))
try:
scan_id = r.json()['id']
except ValueError:
raise Exception('An error occurred retreiving scan id from json: {0}'.format(r.content))
return scan_id
def getScan(self, scanId):
r = self._session.get(self.getUrl('scans/{0}'.format(scanId)))
if r.status_code == 200:
return r.json()
raise Exception('An error occurred: \r\n{0}'.format(r.content))
def pauseScan(self, scanId):
r = self._session.put(self.getUrl('scans/{0}/pause'.format(scanId)))
if r.status_code == 200:
return
raise Exception('Failed to pause scan with id {0}'.format(scanId))
def resumeScan(self, scanId):
r = self._session.put(self.getUrl('scans/{0}/resume'.format(scanId)))
if r.status_code == 200:
return
raise Exception('Failed to resume scan with id {0}'.format(scanId))
def deleteScan(self, scanId):
r = self._session.delete(self.getUrl('scans/{0}'.format(scanId)))
if r.status_code == 200:
return
raise Exception('Failed to delete scan with id {0}'.format(scanId))
def getReport(self, scanId, fmt=None):
url = self.getUrl('scans/{0}/report'.format(scanId))
if fmt in ['json', 'xml', 'yaml', 'html.zip']:
url = "{0}.{1}".format(url, fmt)
r = self._session.get(url)
if r.status_code == 200:
return r.content
raise Exception('Failed to download report for scan id {0}'.format(scanId))
| 33.902439
| 100
| 0.580755
|
04afe43423b51cba2f37cdf5c2bd73a7f6c7806a
| 6,422
|
py
|
Python
|
PythonScripts/CS_Flux_Contours.py
|
UBC-MOAD/outputanalysisnotebooks
|
50839cde3832d26bac6641427fed03c818fbe170
|
[
"Apache-2.0"
] | null | null | null |
PythonScripts/CS_Flux_Contours.py
|
UBC-MOAD/outputanalysisnotebooks
|
50839cde3832d26bac6641427fed03c818fbe170
|
[
"Apache-2.0"
] | null | null | null |
PythonScripts/CS_Flux_Contours.py
|
UBC-MOAD/outputanalysisnotebooks
|
50839cde3832d26bac6641427fed03c818fbe170
|
[
"Apache-2.0"
] | null | null | null |
## generate colors of time dep phase and adv phase of CS transport and flux
from math import *
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import pylab as pl
import scipy.io
import scipy as spy
import seaborn as sns
import sys
lib_path = os.path.abspath('../../Building_canyon/BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
#lib_path = os.path.abspath('../BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
sys.path.append(lib_path)
import ReadOutTools_MITgcm as rout
import MetricsPythonTools as mpt
# Plotting options
sns.set()
sns.set_style('dark')
sns.set_context('talk')
# Files
CGrid = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run03/gridGlob.nc' # Smallest volume grid, closed bdy, no canyon.
CGridOut = Dataset(CGrid)
FluxTR01 = ('/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run03/FluxTR01Glob.nc' )
FluxTR01NoC = ('/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run07/FluxTR01Glob.nc' )
# General input
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
rc = CGridOut.variables['RC']
xc = rout.getField(CGrid, 'XC') # x coords tracer cells
yc = rout.getField(CGrid, 'YC') # y coords tracer cells
depth = rout.getField(CGrid, 'Depth') # y coords tracer cells
drF = CGridOut.variables['drF'] # vertical distance between faces
drC = CGridOut.variables['drC'] # vertical distance between centers
times = np.arange(0,nt,1)
MaskCan = rout.getMask(CGrid,'HFacC')
hFacCCan = rout.getField(CGrid,'HFacC')
#Transect definitions (indices x,y,z,t)
CS1 = [0,40,227,227,0,30,0,18]
CS2 = [40,120,227,227,0,30,0,18]
CS3 = [120,240,267,267,0,30,0,18]
CS3sb = [120,240,227,227,0,30,0,18]
CS4 = [240,320,227,227,0,30,0,18]
CS5 = [320,359,227,227,0,30,0,18]
AS1 = [120,120,227,267,0,30,0,18]
AS2 = [240,240,227,267,0,30,0,18]
LID1 = [120,180,227,267,30,30,0,18]
LID2 = [180,240,227,267,30,30,0,18]
day = [0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., 6.5, 7., 7.5, 8., 8.5, 9.] # Fluxes are calculated between two outputs
TracerList = ['Tr1']
fluxfile = [FluxTR01]
fluxtr = ['1']
for f,tr,trstr in zip (fluxfile,fluxtr,TracerList):
keyw = ('WTRAC0%s' %tr)
keyv = ('VTRAC0%s' %tr)
keyu = ('UTRAC0%s' %tr)
Wnm,Vnm,Unm = mpt.get_TRAC(f, keyw ,keyv, keyu)
MaskExp = mpt.maskExpand(MaskCan,Unm)
U = np.ma.MaskedArray(Unm,mask=MaskExp)
V = np.ma.MaskedArray(Vnm,mask=MaskExp)
W = np.ma.MaskedArray(Wnm,mask=MaskExp)
#Get slices
V_CS1a = mpt.slice_TRAC(V,CS1[0],CS1[1],CS1[2],CS1[3],CS1[4],CS1[5],CS1[6],CS1[7])
V_CS2a = mpt.slice_TRAC(V,CS2[0],CS2[1],CS2[2],CS2[3],CS2[4],CS2[5],CS2[6],CS2[7])
V_CS3a = mpt.slice_TRAC(V,CS3[0],CS3[1],CS3[2],CS3[3],CS3[4],CS3[5],CS3[6],CS3[7])
V_CS4a = mpt.slice_TRAC(V,CS4[0],CS4[1],CS4[2],CS4[3],CS4[4],CS4[5],CS4[6],CS4[7])
V_CS5a = mpt.slice_TRAC(V,CS5[0],CS5[1],CS5[2],CS5[3],CS5[4],CS5[5],CS5[6],CS5[7])
V_CS3sba = mpt.slice_TRAC(V,CS3sb[0],CS3sb[1],CS3sb[2],CS3sb[3],CS3sb[4],CS3sb[5],CS3sb[6],CS3sb[7])
U_AS1a = mpt.slice_TRAC(U,AS1[0],AS1[1],AS1[2],AS1[3],AS1[4],AS1[5],AS1[6],AS1[7])
U_AS2a = mpt.slice_TRAC(U,AS2[0],AS2[1],AS2[2],AS2[3],AS2[4],AS2[5],AS2[6],AS2[7])
W_LID1a = mpt.slice_TRAC(W,LID1[0],LID1[1],LID1[2],LID1[3],LID1[4],LID1[5],LID1[6],LID1[7])
W_LID2a = mpt.slice_TRAC(W,LID2[0],LID2[1],LID2[2],LID2[3],LID2[4],LID2[5],LID2[6],LID2[7])
# Concatenate arrays to plot
Up = np.concatenate((V_CS1a,V_CS2a),axis = 2)
ASup = -U_AS1a
Head = V_CS3a
ASdown = U_AS2a[:,:,::-1]
Down= np.concatenate((V_CS4a,V_CS5a),axis = 2)
Vert = np.concatenate((W_LID1a,W_LID2a),axis = 2)
## FIGURE
sns.set_context("talk", font_scale=0.9, rc={"lines.linewidth": 2.5})
sns.set_palette( sns.hls_palette(11, l=.4, s=.8))
sns.set_style("ticks")
plt.clf()
fig45=plt.figure(figsize=(24,6))
tt = 12
ax1 = plt.subplot(1,6,1)
cn = ax1.contourf(xc[227,0:120]/1000.0,rc[0:30],np.mean(Up[10:,:,:],axis=0),15, vmax = np.max(np.mean(Up[10:,:,:],axis=0)),
vmin = -np.max(np.mean(Up[10:,:,:],axis=0)), cmap = 'RdYlBu_r')
cb = plt.colorbar(cn, orientation = 'horizontal',ticks=[-1, 0, 1])
cb.label('Mol/l m/s')
plt.ylabel('Depth (m) ')
plt.xlabel('Alongshore distance (km) ')
ax2 = plt.subplot(1,6,2)
cn = ax2.contourf(yc[227:267,120]/1000.0,rc[0:30],np.mean(ASup[10:,:,:],axis=0),15, vmax = -np.min(np.mean(ASup[10:,:,:],axis=0)),
vmin = np.min(np.mean(ASup[10:,:,:],axis=0)),cmap = 'RdYlBu_r')
cb = plt.colorbar(cn,orientation = 'horizontal')
plt.xlabel('Cross-shore distance (km) ')
ax3 = plt.subplot(1,6,3)
cn = ax3.contourf(xc[267,120:240]/1000.0,rc[0:30],np.mean(Head[10:,:,:],axis=0),15, vmax = np.max(np.mean(Head[10:,:,:],axis=0)),
vmin = -np.max(np.mean(Head[10:,:,:],axis=0)),cmap = 'RdYlBu_r')
cb = plt.colorbar(cn,orientation = 'horizontal')
plt.xlabel('Alongshore distance (km) ')
plt.title('CNTDIFF canyon, $K_v = 10^{-5}m^2s^{-1}$, Mean Cross-shelf transport Adv phase')
ax4 = plt.subplot(1,6,4)
cn = ax4.contourf(yc[227:267,120]/1000.0,rc[0:30],np.mean(ASdown[10:,:,:],axis=0),15, vmax = np.max(np.mean(ASdown[10:,:,:],axis=0)),
vmin = -np.max(np.mean(ASdown[10:,:,:],axis=0)),cmap = 'RdYlBu_r')
cb = plt.colorbar(cn,orientation = 'horizontal')
plt.xlabel('Cross-shore distance (km) ')
ax5 = plt.subplot(1,6,5)
cn = ax5.contourf(xc[227,240:-1]/1000.0,rc[0:30],np.mean(Down[10:,:,:],axis=0),15, vmax = -np.min(np.mean(Down[10:,:,:],axis=0)),
vmin = np.min(np.mean(Down[10:,:,:],axis=0)),cmap = 'RdYlBu_r')
cb = plt.colorbar(cn,orientation = 'horizontal')
plt.xlabel('Alongshore distance (km) ')
ax5 = plt.subplot(1,6,6)
cn = ax5.contourf(xc[227,120:240]/1000.0,yc[227:267,120]/1000.0,np.mean(Vert[10:,:,:],axis=0),15, vmax = np.max(np.mean(Vert[10:,:,:],axis=0)),
vmin = -np.max(np.mean(Vert[10:,:,:],axis=0)),cmap = 'RdYlBu_r')
shelfbreakline = ax5.contour(xc[227,120:240]/1000.0,yc[227:267,120]/1000.0,depth[227:267,120:240],[152.0],colors='k')
cb = plt.colorbar(cn,orientation = 'horizontal')
plt.xlabel('Alongshore distance (km) ')
plt.ylabel('Cross-shore distance (km) ')
plt.title('Vertical transport shelf-break depth')
plt.show()
#fig45.savefig('results/figures/PosterOSM16/CS_TRANS_AdvPh_CNTrun03Tr1.eps', format='eps', dpi=1000, bbox_inches='tight')
| 35.877095
| 143
| 0.654625
|
cad5ed227971641e5f84d2302241c76f1ddbcc49
| 93
|
py
|
Python
|
common/determined_common/experimental/checkpoint/__init__.py
|
renedlog/determined
|
7b1f2b1a845e4def17d489a5ad7592a2eaef0608
|
[
"Apache-2.0"
] | null | null | null |
common/determined_common/experimental/checkpoint/__init__.py
|
renedlog/determined
|
7b1f2b1a845e4def17d489a5ad7592a2eaef0608
|
[
"Apache-2.0"
] | 1
|
2022-02-10T07:31:44.000Z
|
2022-02-10T07:31:44.000Z
|
common/determined_common/experimental/checkpoint/__init__.py
|
renedlog/determined
|
7b1f2b1a845e4def17d489a5ad7592a2eaef0608
|
[
"Apache-2.0"
] | null | null | null |
from determined_common.experimental.checkpoint._checkpoint import Checkpoint, get_checkpoint
| 46.5
| 92
| 0.903226
|
7150c2407aa6de0fd10c10c62780152528827a4e
| 999
|
py
|
Python
|
animation/health.py
|
dansarno/pygame-pandemic-simulation
|
24e4faa03538735552d189f1b5f286d80e25db4b
|
[
"MIT"
] | null | null | null |
animation/health.py
|
dansarno/pygame-pandemic-simulation
|
24e4faa03538735552d189f1b5f286d80e25db4b
|
[
"MIT"
] | null | null | null |
animation/health.py
|
dansarno/pygame-pandemic-simulation
|
24e4faa03538735552d189f1b5f286d80e25db4b
|
[
"MIT"
] | null | null | null |
import tools
import numpy as np
configs = tools.load_yaml('config.yaml')
class Status:
def __init__(self, status, speed, frame_limit):
self.status = status
self.speed = speed
self.frame_limit = frame_limit
healthy = Status('healthy',
tools.random_between(configs['people']['healthy']['speed']),
tools.random_between(configs['people']['healthy']['frame_limit']))
infected = Status('infected',
tools.random_between(configs['people']['infected']['speed']),
tools.random_between(configs['people']['infected']['frame_limit']))
recovered = Status('recovered',
tools.random_between(configs['people']['recovered']['speed']),
tools.random_between(configs['people']['recovered']['frame_limit']))
dead = Status('dead',
tools.random_between(configs['people']['dead']['speed']),
tools.random_between(configs['people']['dead']['frame_limit']))
| 38.423077
| 87
| 0.617618
|
db326751a7e6439270937312ab34a121fd53e865
| 1,731
|
py
|
Python
|
dn.py
|
abhijith0505/SimpDFS
|
08a30ecf672a045a03bb9968c51d48ffe8a02542
|
[
"MIT"
] | null | null | null |
dn.py
|
abhijith0505/SimpDFS
|
08a30ecf672a045a03bb9968c51d48ffe8a02542
|
[
"MIT"
] | null | null | null |
dn.py
|
abhijith0505/SimpDFS
|
08a30ecf672a045a03bb9968c51d48ffe8a02542
|
[
"MIT"
] | null | null | null |
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
# TODO: Read from config file
BLOCK_STORE_PATH = "./blocks"
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ("/RPC2",)
def get_block_name(path, seq):
"""Return block id (name) for given file path and seq."""
# Replace / with - and append seq number
# No validation done on path or block
# Chances of colision. E.g: /a/b/c and /a/b-c both map to a-b-c
# Also first character is always a - (because path starts with /). Meh.
return "{}-{}".format(path.replace("/", "-"), str(seq))
def put_block(path, seq, data):
"""Write given data into a file named id under BLOCK_STORE_PATH."""
block_name = get_block_name(path, seq)
print("Writing block " + block_name)
with open(BLOCK_STORE_PATH + "/" + block_name, "w") as f:
f.write(data)
# Should be returning success/failure
return 1
def get_block(path, seq):
"""Return contents of file named id under BLOCK_STORE_PATH."""
block_name = get_block_name(path, seq)
print("Fetching block " + block_name)
with open(BLOCK_STORE_PATH + "/" + block_name, "r") as f:
data = f.read()
return data
def main():
print("Starting DN")
# put_block('/my/dir/file', 6, 'hello file')
# print(get_block('/my/dir/file', 6))
with SimpleXMLRPCServer(("0.0.0.0", 1111), requestHandler=RequestHandler) as server:
server.register_introspection_functions()
server.register_function(get_block)
server.register_function(put_block)
print("Awaiting requests")
server.serve_forever()
print("Stopping DN")
if __name__ == "__main__":
main()
| 27.47619
| 88
| 0.664934
|
d6b6e2dac5cbecb42f38e89971beef4bbcd9148e
| 982
|
py
|
Python
|
meteoservice/urls.py
|
berthakim/world-meteo-page
|
6789ab8e571f6be64d250f0a60413fa5de5671d7
|
[
"MIT"
] | null | null | null |
meteoservice/urls.py
|
berthakim/world-meteo-page
|
6789ab8e571f6be64d250f0a60413fa5de5671d7
|
[
"MIT"
] | null | null | null |
meteoservice/urls.py
|
berthakim/world-meteo-page
|
6789ab8e571f6be64d250f0a60413fa5de5671d7
|
[
"MIT"
] | null | null | null |
"""map_service URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('weather.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.071429
| 80
| 0.720978
|
6046204381846cc3374129ab37cb3544833f8011
| 680
|
py
|
Python
|
home.admin/config.scripts/lnd.unlock.py
|
johnzweng/raspiblitz
|
8d6dad1b9895853d900f68b725144ec96f99da95
|
[
"MIT"
] | 1
|
2020-05-01T22:40:16.000Z
|
2020-05-01T22:40:16.000Z
|
home.admin/config.scripts/lnd.unlock.py
|
johnzweng/raspiblitz
|
8d6dad1b9895853d900f68b725144ec96f99da95
|
[
"MIT"
] | 1
|
2020-01-17T15:43:23.000Z
|
2020-01-17T16:40:01.000Z
|
home.admin/config.scripts/lnd.unlock.py
|
johnzweng/raspiblitz
|
8d6dad1b9895853d900f68b725144ec96f99da95
|
[
"MIT"
] | 1
|
2021-03-04T05:44:34.000Z
|
2021-03-04T05:44:34.000Z
|
# parameter #1: password c to unlock wallet
import base64
import codecs
import json
import requests
import sys
pw = sys.argv[1]
url = 'https://localhost:8080/v1/unlockwallet'
cert_path = '/mnt/hdd/lnd/tls.cert'
try:
pw_b64 = base64.b64encode(pw).decode()
except TypeError: # for Python3+
pw_b64 = base64.b64encode(pw.encode()).decode('UTF-8')
data = {'wallet_password': pw_b64}
try:
r = requests.post(url, verify=cert_path, data=json.dumps(data))
except requests.exceptions.ConnectionError as err:
print(err)
print("\nAn Error occurred - is LND running?")
sys.exit(1)
if r.status_code == 404:
print("Already unlocked!")
else:
print(r.json())
| 22.666667
| 67
| 0.7
|
e359dde2717d7fd3e0840f8a303f0af548431382
| 4,603
|
py
|
Python
|
src/snowflake/connector/tool/dump_ocsp_response.py
|
jurecuhalev/snowflake-connector-python
|
c7b71e18111c16ffa8869855cd52c21e5b8e19d1
|
[
"Apache-2.0"
] | 311
|
2017-01-06T03:02:10.000Z
|
2022-03-28T12:39:10.000Z
|
src/snowflake/connector/tool/dump_ocsp_response.py
|
jurecuhalev/snowflake-connector-python
|
c7b71e18111c16ffa8869855cd52c21e5b8e19d1
|
[
"Apache-2.0"
] | 720
|
2017-01-12T19:05:15.000Z
|
2022-03-31T12:24:36.000Z
|
src/snowflake/connector/tool/dump_ocsp_response.py
|
jurecuhalev/snowflake-connector-python
|
c7b71e18111c16ffa8869855cd52c21e5b8e19d1
|
[
"Apache-2.0"
] | 289
|
2017-02-02T00:01:41.000Z
|
2022-03-19T06:01:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
import time
from os import path
from time import gmtime, strftime
from asn1crypto import ocsp as asn1crypto_ocsp
from snowflake.connector.compat import urlsplit
from snowflake.connector.ocsp_asn1crypto import SnowflakeOCSPAsn1Crypto as SFOCSP
from snowflake.connector.ssl_wrap_socket import _openssl_connect
def main():
"""Internal Tool: OCSP response dumper."""
def help():
print("Dump OCSP Response for the URL. ")
print(
"""
Usage: {} <url> [<url> ...]
""".format(
path.basename(sys.argv[0])
)
)
sys.exit(2)
import sys
if len(sys.argv) < 2:
help()
urls = sys.argv[1:]
dump_ocsp_response(urls, output_filename=None)
def dump_good_status(current_time, single_response):
print("This Update: {}".format(single_response["this_update"].native))
print("Next Update: {}".format(single_response["next_update"].native))
this_update = (
single_response["this_update"].native.replace(tzinfo=None) - SFOCSP.ZERO_EPOCH
).total_seconds()
next_update = (
single_response["next_update"].native.replace(tzinfo=None) - SFOCSP.ZERO_EPOCH
).total_seconds()
tolerable_validity = SFOCSP._calculate_tolerable_validity(this_update, next_update)
print(
"Tolerable Update: {}".format(
strftime("%Y%m%d%H%M%SZ", gmtime(next_update + tolerable_validity))
)
)
if SFOCSP._is_validaity_range(current_time, this_update, next_update):
print("OK")
else:
print(SFOCSP._validity_error_message(current_time, this_update, next_update))
def dump_revoked_status(single_response):
revoked_info = single_response["cert_status"]
revocation_time = revoked_info.native["revocation_time"]
revocation_reason = revoked_info.native["revocation_reason"]
print(
"Revoked Time: {}".format(
revocation_time.strftime(SFOCSP.OUTPUT_TIMESTAMP_FORMAT)
)
)
print("Revoked Reason: {}".format(revocation_reason))
def dump_ocsp_response(urls, output_filename):
ocsp = SFOCSP()
for url in urls:
if not url.startswith("http"):
url = "https://" + url
parsed_url = urlsplit(url)
hostname = parsed_url.hostname
port = parsed_url.port or 443
connection = _openssl_connect(hostname, port)
cert_data = ocsp.extract_certificate_chain(connection)
current_time = int(time.time())
print("Target URL: {}".format(url))
print(
"Current Time: {}".format(strftime("%Y%m%d%H%M%SZ", gmtime(current_time)))
)
for issuer, subject in cert_data:
_, _ = ocsp.create_ocsp_request(issuer, subject)
_, _, _, cert_id, ocsp_response_der = ocsp.validate_by_direct_connection(
issuer, subject
)
ocsp_response = asn1crypto_ocsp.OCSPResponse.load(ocsp_response_der)
print("------------------------------------------------------------")
print("Subject Name: {}".format(subject.subject.native))
print("Issuer Name: {}".format(issuer.subject.native))
print("OCSP URI: {}".format(subject.ocsp_urls))
print("CRL URI: {}".format(subject.crl_distribution_points[0].native))
print("Issuer Name Hash: {}".format(subject.issuer.sha1))
print("Issuer Key Hash: {}".format(issuer.public_key.sha1))
print("Serial Number: {}".format(subject.serial_number))
print("Response Status: {}".format(ocsp_response["response_status"].native))
basic_ocsp_response = ocsp_response.basic_ocsp_response
tbs_response_data = basic_ocsp_response["tbs_response_data"]
print("Responder ID: {}".format(tbs_response_data["responder_id"].name))
current_time = int(time.time())
for single_response in tbs_response_data["responses"]:
cert_status = single_response["cert_status"].name
if cert_status == "good":
dump_good_status(current_time, single_response)
elif cert_status == "revoked":
dump_revoked_status(single_response)
else:
print("Unknown")
print("")
if output_filename:
SFOCSP.OCSP_CACHE.write_ocsp_response_cache_file(ocsp, output_filename)
return SFOCSP.OCSP_CACHE.CACHE
if __name__ == "__main__":
main()
| 36.531746
| 88
| 0.6335
|
2e4d0ce16bd1dec25ccdf3b823e6ecf2fd6f5842
| 2,304
|
py
|
Python
|
test/functional/p2p_pos_fakestake.py
|
smartblockscoin/smartblockscoin_src
|
13bcb5953998e8a54533ac0a7bbb05a21bfc74d9
|
[
"MIT"
] | null | null | null |
test/functional/p2p_pos_fakestake.py
|
smartblockscoin/smartblockscoin_src
|
13bcb5953998e8a54533ac0a7bbb05a21bfc74d9
|
[
"MIT"
] | null | null | null |
test/functional/p2p_pos_fakestake.py
|
smartblockscoin/smartblockscoin_src
|
13bcb5953998e8a54533ac0a7bbb05a21bfc74d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2019 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a PoS block where the coinstake input prevout is already spent.
'''
from time import sleep
from fake_stake.base_test import Smartblockscoin_FakeStakeTest
class PoSFakeStake(Smartblockscoin_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a PoS block where the coinstake input prevout is already spent."
self.init_test()
INITAL_MINED_BLOCKS = 150 # First mined blocks (rewards collected to spend)
MORE_MINED_BLOCKS = 100 # Blocks mined after spending
STAKE_AMPL_ROUNDS = 2 # Rounds of stake amplification
self.NUM_BLOCKS = 3 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Collect the possible prevouts
self.log.info("Collecting all unspent coins which we generated from mining...")
# 3) Create 10 addresses - Do the stake amplification
self.log.info("Performing the stake amplification (%d rounds)..." % STAKE_AMPL_ROUNDS)
utxo_list = self.node.listunspent()
address_list = []
for i in range(10):
address_list.append(self.node.getnewaddress())
utxo_list = self.stake_amplification(utxo_list, STAKE_AMPL_ROUNDS, address_list)
self.log.info("Done. Utxo list has %d elements." % len(utxo_list))
sleep(2)
# 4) Start mining again so that spent prevouts get confirmted in a block.
self.log.info("Mining %d more blocks..." % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
# 5) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake blocks")
err_msgs = self.test_spam("Main", utxo_list)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
PoSFakeStake().main()
| 39.050847
| 115
| 0.672743
|
d423fcf35ecc8e350befa4f0602d6c0322c2194b
| 79
|
py
|
Python
|
plugins/github/komand_github/actions/create_issue_comment/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/github/komand_github/actions/create_issue_comment/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/github/komand_github/actions/create_issue_comment/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import CreateIssueComment
| 26.333333
| 39
| 0.797468
|
07799efe7e42778f0cf3cc5b099bdb17f1f1b30d
| 4,360
|
py
|
Python
|
accounts/views.py
|
calvin620707/reservoir
|
f6d403a783cfb3b7e7091e7b1e57dee1d7f3cc46
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
calvin620707/reservoir
|
f6d403a783cfb3b7e7091e7b1e57dee1d7f3cc46
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
calvin620707/reservoir
|
f6d403a783cfb3b7e7091e7b1e57dee1d7f3cc46
|
[
"MIT"
] | null | null | null |
import logging
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse, reverse_lazy
from django.views import View
from django.views.generic import ListView, DetailView, DeleteView
from accounts.forms import CreateNewProjectForm, UpdateProjectForm, MembershipFormSet
from accounts.models import Project, ProjectMembership
from accounts.util import refresh_project_memberships
logger = logging.getLogger()
class MyProjectsView(ListView):
model = Project
def get_queryset(self):
return self.request.user.project_set.all()
class MyNewProjectView(View):
def post(self, request):
form = CreateNewProjectForm(request.POST)
if form.is_valid():
proj = Project.objects.create(name=form.cleaned_data['name'])
refresh_project_memberships(proj, [request.user])
request.user.current_project = proj
request.user.save()
return HttpResponseRedirect(reverse('accounts:project-detail', kwargs={'pk': proj.id}))
return render(request, 'accounts/create_new_project.html', {'form': form})
def get(self, request):
form = CreateNewProjectForm()
return render(request, 'accounts/create_new_project.html', {'form': form})
def update_project_view(request, pk):
proj = get_object_or_404(Project, pk=pk)
invite_link = request.build_absolute_uri(
reverse('accounts:join-project', kwargs={'project_id': proj.id})
)
if request.method == 'POST':
project_form = UpdateProjectForm(request.POST, instance=proj, prefix='project')
memberships_formset = MembershipFormSet(request.POST, prefix='memberships')
if all([project_form.is_valid(), memberships_formset.is_valid()]):
project_form.save()
memberships_formset.save()
else:
project_form = UpdateProjectForm(instance=proj, prefix='project')
memberships_formset = MembershipFormSet(
queryset=ProjectMembership.objects.filter(project=proj),
prefix='memberships'
)
return render(request, 'accounts/update_project.html', {
'project_form': project_form,
'memberships_formset': memberships_formset,
'invite_link': invite_link,
'is_updated': True if request.method == 'POST' else False
})
class MyProjectDeleteView(DeleteView):
model = Project
success_url = reverse_lazy('accounts:my-projects')
class ProjectDetailView(DetailView):
model = Project
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['invite_link'] = self.request.build_absolute_uri(
reverse('accounts:join-project', kwargs={'project_id': context['object'].id})
)
return context
class MyCurrentProjectView(View):
def post(self, request):
"""Set user's current project"""
request.user.current_project = get_object_or_404(Project, id=request.POST['project_id'])
request.user.save()
messages.success(request, 'Your current project was changed to "{}"'.format(request.user.current_project.name))
return HttpResponseRedirect(reverse('sheets:add-costs'))
class JoinProjectView(View):
def get(self, request, project_id):
project = get_object_or_404(Project, id=project_id)
return render(request, 'accounts/join_project.html', context={'project': project})
def post(self, request, project_id):
"""Join a let current user join given project"""
project = get_object_or_404(Project, id=project_id)
members = list(project.members.all())
if request.user in members:
messages.add_message(request, messages.INFO, 'You already joined "{}" project.'.format(project.name))
return HttpResponseRedirect(reverse('sheets:add-costs'))
members.append(request.user)
refresh_project_memberships(project, members)
request.user.current_project = project
request.user.save()
messages.success(request, 'You joined "{}" project.'.format(project.name))
return HttpResponseRedirect(reverse('sheets:add-costs'))
| 36.638655
| 119
| 0.696101
|
7f044af4ff6ce0dde13e961c761b52da8c5878e3
| 993
|
py
|
Python
|
test/test_statistics.py
|
kunakl07/AL-MLresearch
|
5ab80169563e6cbe4de15aefa4bbfa09298795f9
|
[
"MIT"
] | null | null | null |
test/test_statistics.py
|
kunakl07/AL-MLresearch
|
5ab80169563e6cbe4de15aefa4bbfa09298795f9
|
[
"MIT"
] | null | null | null |
test/test_statistics.py
|
kunakl07/AL-MLresearch
|
5ab80169563e6cbe4de15aefa4bbfa09298795f9
|
[
"MIT"
] | null | null | null |
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from matplotlib import pyplot
import tensorflow as tf
import statistics
import unittest
import random
img_width, img_height = 288, 432
test_datagen = ImageDataGenerator(rescale=1. / 55)
test_data_generator = test_datagen.flow_from_directory(
"test",
target_size=(img_width, img_height),
batch_size=32,
shuffle=False)
true_classes = test_data_generator.classes
randomlist = []
for j in range(201):
randomlist.append(random.randint(0, 1))
model = tf.keras.models.load_model("preprocess_mag_scipy_Srkws.h5")
predictions = model.predict_proba(test_data_generator)
class TestPreprocess(unittest.TestCase):
def test_plot_roc_curve(self):
lr_auc = statistics.plot_roc_curve("preprocess_mag_scipy_Srkws.h5", "test", predictions, true_classes, randomlist)
self.assertEqual(lr_auc, 0.857029702970297)
| 28.371429
| 122
| 0.777442
|
daf03884e1ac982c1a8f1eea39109ce81fd6a379
| 1,373
|
py
|
Python
|
TrekBot2_WS/build/hector_map_tools/catkin_generated/generate_cached_setup.py
|
Rafcin/RescueRoboticsLHMV
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | 1
|
2018-10-04T14:37:00.000Z
|
2018-10-04T14:37:00.000Z
|
TrekBot2_WS/build/hector_map_tools/catkin_generated/generate_cached_setup.py
|
Rafcin/TrekBot
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | null | null | null |
TrekBot2_WS/build/hector_map_tools/catkin_generated/generate_cached_setup.py
|
Rafcin/TrekBot
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/xavier_ssd/TrekBot/TrekBot2_WS/devel;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/hector_map_tools/env.sh')
output_filename = '/xavier_ssd/TrekBot/TrekBot2_WS/build/hector_map_tools/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| 44.290323
| 108
| 0.74654
|
2bb7090fe81735f77b92afaf74bb235fe84903f8
| 1,478
|
py
|
Python
|
settings.py
|
Vultik/KiCad-Diff
|
8c8814c212f8d9beebc2e5731ff31586a03fa27e
|
[
"MIT"
] | null | null | null |
settings.py
|
Vultik/KiCad-Diff
|
8c8814c212f8d9beebc2e5731ff31586a03fa27e
|
[
"MIT"
] | null | null | null |
settings.py
|
Vultik/KiCad-Diff
|
8c8814c212f8d9beebc2e5731ff31586a03fa27e
|
[
"MIT"
] | null | null | null |
import sys
from subprocess import PIPE, Popen
from typing import List, Tuple
args = ""
global verbose
global gitProg
global fossilProg
global svnProg
global diffProg
global grepProg
global plot_prog
global output_dir
global web_dir
verbose = 0
gitProg = "git"
fossilProg = "fossil"
svnProg = "svn"
diffProg = "diff"
grepProg = "grep"
plot_prog = "plotpcb"
web_dir = "web"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def run_cmd(exec_path: str, cmd: List[str]) -> Tuple[str, str]:
if verbose > 1:
print("")
print(bcolors.WARNING + "Path:", exec_path + bcolors.ENDC)
print(bcolors.WARNING + " Cmd:", bcolors.OKBLUE + " ".join(cmd) + bcolors.ENDC)
p = Popen(
cmd,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=True,
encoding="utf-8",
cwd=exec_path,
)
stdout, stderr = p.communicate()
ret = p.wait()
if verbose > 3:
print(bcolors.OKCYAN + stdout + bcolors.ENDC)
if verbose > 2:
print(bcolors.FAIL + stderr + bcolors.ENDC)
if verbose > 1:
if ret == 0:
print(bcolors.OKGREEN + "Code:", str(ret) + bcolors.ENDC)
else:
print(bcolors.FAIL + "Code:", str(ret) + bcolors.ENDC)
return stdout.strip("\n "), stderr
| 18.948718
| 87
| 0.59134
|
f55613ca66cbc6caee64f560857978450cd17712
| 1,437
|
py
|
Python
|
Exercises/Kattis/3_1_i_hate_the_number_nine.py
|
Gwarglemar/PythonExercises
|
3261892dea4d51b320cde2ce8a47e67a67609d30
|
[
"MIT"
] | 1
|
2019-05-04T04:49:17.000Z
|
2019-05-04T04:49:17.000Z
|
Exercises/Kattis/3_1_i_hate_the_number_nine.py
|
Gwarglemar/Python
|
3261892dea4d51b320cde2ce8a47e67a67609d30
|
[
"MIT"
] | null | null | null |
Exercises/Kattis/3_1_i_hate_the_number_nine.py
|
Gwarglemar/Python
|
3261892dea4d51b320cde2ce8a47e67a67609d30
|
[
"MIT"
] | null | null | null |
#for T test cases, each consisting of an integer D, how many numbers are there of D digits that DO NOT contain any 9's?
#sounds weird at first, but I think it's super simple; the difficulty of this primarily comes from the problem of efficiency, in that you have to compute results with large values for D in under 1 second.
#the first digit can have value 1-8, any digit after can have value 0-8
#from that we get a simple formula
##because of high output values, the question specifies mod 1,000,000,007
# output = (8*(9**(digits-1)))%1000000007
#the above is our formula, but it's too inefficient for high numbers of digits; let's break it down to a custom function, which is a common efficient power function for python, but with our base-result set to 8 to match our formula.
def custom_power(x, y, p):
res = 8 # Initialize result
# Update x if it is more
# than or equal to p
x = x % p
while (y > 0) :
# If y is odd, multiply
# x with result
if ((y & 1) == 1) :
res = (res * x) % p
# y must be even now
y = y >> 1 # y = y/2
x = (x * x) % p
return res
#first input: num test cases
num_cases = int(input())
#for each test case, get the num digits
for _ in range(num_cases):
digits = int(input())
output = custom_power(9,digits-1,1000000007)
print(output)
| 41.057143
| 233
| 0.627001
|
f1f812275c8763641044393451d5247a69623303
| 121
|
py
|
Python
|
pycode3/elif.py
|
v-sukt/misc_code
|
ac5ea0a55a070c88c410d14511c25d332fc675d5
|
[
"Apache-2.0"
] | null | null | null |
pycode3/elif.py
|
v-sukt/misc_code
|
ac5ea0a55a070c88c410d14511c25d332fc675d5
|
[
"Apache-2.0"
] | null | null | null |
pycode3/elif.py
|
v-sukt/misc_code
|
ac5ea0a55a070c88c410d14511c25d332fc675d5
|
[
"Apache-2.0"
] | null | null | null |
day="Tuesday"
if day=="Monday":
print("Sunny")
elif day=="Tuesday":
print("Cloudy")
else:
print("Rainy!")
| 15.125
| 20
| 0.578512
|
233f99a6c7825102ce3818e81f325bafff215abb
| 6,904
|
py
|
Python
|
caravel/config.py
|
JoeBrody/Caravel
|
fc1e63761cc86a5673433b0e2efc0081325684d3
|
[
"Apache-2.0"
] | null | null | null |
caravel/config.py
|
JoeBrody/Caravel
|
fc1e63761cc86a5673433b0e2efc0081325684d3
|
[
"Apache-2.0"
] | null | null | null |
caravel/config.py
|
JoeBrody/Caravel
|
fc1e63761cc86a5673433b0e2efc0081325684d3
|
[
"Apache-2.0"
] | null | null | null |
"""The main config file for Caravel
All configuration in this file can be overridden by providing a caravel_config
in your PYTHONPATH as there is a ``from caravel_config import *``
at the end of this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from dateutil import tz
from flask_appbuilder.security.manager import AUTH_DB
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(os.path.expanduser('~'), '.caravel')
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
# ---------------------------------------------------------
# Caravel specific config
# ---------------------------------------------------------
ROW_LIMIT = 50000
CARAVEL_WORKERS = 16
CARAVEL_WEBSERVER_ADDRESS = '0.0.0.0'
CARAVEL_WEBSERVER_PORT = 8088
CARAVEL_WEBSERVER_TIMEOUT = 60
CUSTOM_SECURITY_MANAGER = None
# ---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h' # noqa
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(DATA_DIR, 'caravel.db')
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# Flask-WTF flag for CSRF
CSRF_ENABLED = True
# Whether to run the web server in debug mode or not
DEBUG = False
# Whether to show the stacktrace on 500 error
SHOW_STACKTRACE = True
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = "Caravel"
# Uncomment to setup Setup an App icon
APP_ICON = "/static/assets/images/caravel_logo.png"
# Druid query timezone
# tz.tzutc() : Using utc timezone
# tz.tzlocal() : Using local timezone
# other tz can be overridden by providing a local_config
DRUID_IS_ACTIVE = True
DRUID_TZ = tz.tzutc()
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
# ---------------------------------------------------
# Roles config
# ---------------------------------------------------
# Grant public role the same set of permissions as for the GAMMA role.
# This is useful if one wants to enable anonymous users to view
# dashboards. Explicit grant on specific datasets is still required.
PUBLIC_ROLE_LIKE_GAMMA = False
# ---------------------------------------------------
# Babel config for translations
# ---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'babel/translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag': 'us', 'name': 'English'},
# 'fr': {'flag': 'fr', 'name': 'French'},
# 'zh': {'flag': 'cn', 'name': 'Chinese'},
}
# ---------------------------------------------------
# Image and file configuration
# ---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
# IMG_SIZE = (300, 200, True)
CACHE_DEFAULT_TIMEOUT = None
CACHE_CONFIG = {'CACHE_TYPE': 'null'}
# CORS Options
ENABLE_CORS = False
CORS_OPTIONS = {}
# ---------------------------------------------------
# List of viz_types not allowed in your environment
# For example: Blacklist pivot table and treemap:
# VIZ_TYPE_BLACKLIST = ['pivot_table', 'treemap']
# ---------------------------------------------------
VIZ_TYPE_BLACKLIST = []
# ---------------------------------------------------
# List of data sources not to be refreshed in druid cluster
# ---------------------------------------------------
DRUID_DATA_SOURCE_BLACKLIST = []
"""
1) http://docs.python-guide.org/en/latest/writing/logging/
2) https://docs.python.org/2/library/logging.config.html
"""
# Console Log Settings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
LOG_LEVEL = 'DEBUG'
# ---------------------------------------------------
# Enable Time Rotate Log Handler
# ---------------------------------------------------
# LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
ENABLE_TIME_ROTATE = False
TIME_ROTATE_LOG_LEVEL = 'DEBUG'
FILENAME = os.path.join(DATA_DIR, 'caravel.log')
ROLLOVER = 'midnight'
INTERVAL = 1
BACKUP_COUNT = 30
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = ""
# Maximum number of rows returned in the SQL editor
SQL_MAX_ROW = 1000
# If defined, shows this text in an alert-warning box in the navbar
# one example use case may be "STAGING" to make it clear that this is
# not the production version of the site.
WARNING_MSG = None
# Default celery config is to use SQLA as a broker, in a production setting
# you'll want to use a proper broker as specified here:
# http://docs.celeryproject.org/en/latest/getting-started/brokers/index.html
"""
# Example:
class CeleryConfig(object):
BROKER_URL = 'sqla+sqlite:///celerydb.sqlite'
CELERY_IMPORTS = ('caravel.tasks', )
CELERY_RESULT_BACKEND = 'db+sqlite:///celery_results.sqlite'
CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}}
CELERY_CONFIG = CeleryConfig
"""
CELERY_CONFIG = None
SQL_CELERY_DB_FILE_PATH = os.path.join(DATA_DIR, 'celerydb.sqlite')
SQL_CELERY_RESULTS_DB_FILE_PATH = os.path.join(DATA_DIR, 'celery_results.sqlite')
# The db id here results in selecting this one as a default in SQL Lab
DEFAULT_DB_ID = None
try:
from caravel_config import * # noqa
except ImportError:
pass
if not CACHE_DEFAULT_TIMEOUT:
CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get('CACHE_DEFAULT_TIMEOUT')
| 31.962963
| 81
| 0.637457
|
8d94c972cf4c2bcb45385ec0527d1cda31cd8e64
| 178
|
py
|
Python
|
rename_firmware.py
|
Domochip/WirelessTeleInfo
|
de3a102a243ed62f59b8e26e92527a03253679a2
|
[
"CC-BY-3.0"
] | 18
|
2019-07-03T15:46:47.000Z
|
2021-11-24T09:35:07.000Z
|
rename_firmware.py
|
Domochip/Wireless-TeleInfo
|
73cb27d8035635d4fd43aecab1bea120f048a898
|
[
"CC-BY-3.0"
] | 3
|
2019-07-29T11:58:44.000Z
|
2022-03-04T21:31:25.000Z
|
rename_firmware.py
|
Domochip/Wireless-TeleInfo
|
73cb27d8035635d4fd43aecab1bea120f048a898
|
[
"CC-BY-3.0"
] | 7
|
2019-12-28T18:55:45.000Z
|
2021-09-06T14:29:13.000Z
|
Import("env")
my_flags = env.ParseFlags(env['BUILD_FLAGS'])
defines = {k: v for (k, v) in my_flags.get("CPPDEFINES")}
# print defines
env.Replace(PROGNAME=defines.get("MODEL"))
| 25.428571
| 57
| 0.707865
|
ccb72b67893f092ece864160a4c2429514eb3bad
| 3,870
|
py
|
Python
|
covid19_cali.py
|
wjma98/covid19-stats
|
c7ecd174a146fd8902db9f2fade58aaacc0ab309
|
[
"MIT"
] | null | null | null |
covid19_cali.py
|
wjma98/covid19-stats
|
c7ecd174a146fd8902db9f2fade58aaacc0ab309
|
[
"MIT"
] | null | null | null |
covid19_cali.py
|
wjma98/covid19-stats
|
c7ecd174a146fd8902db9f2fade58aaacc0ab309
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 14:42:01 2021
@author: William
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
import pandas as pd
import matplotlib.dates as mdates
import datetime as dt
# Parsing .csv data from NYT to find total deaths. starts on 1/25/20. state-level data
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'
path = 'us-states.csv'
###################################### State-level data
#Read data
df = pd.read_csv(url, error_bad_lines = False)
df = df.set_index('state')
california = df.loc['California']
california_deaths = df.loc['California', 'deaths']
california_cases = df.loc['California', 'cases']
#compute 1 day lag in order to get daily new cases and deaths
lag_california_deaths = california_deaths.shift(1)
lag_california_cases = california_cases.shift(1)
#convert series to numpy array
array_deaths = california_deaths.to_numpy()
array_cases = california_cases.to_numpy()
array_lag_deaths = lag_california_deaths.to_numpy()
array_lag_cases = lag_california_cases.to_numpy()
daily_deaths = array_deaths - array_lag_deaths
daily_cases = array_cases - array_lag_cases
w = np.isnan(daily_deaths)
daily_deaths[w] = 0
w = np.isnan(daily_cases)
daily_cases[w] = 0
#make a smooth-funct. of daily deaths and cases
x2 = np.arange(len(daily_cases))
log_daily_deaths = np.where(daily_deaths > 0, np.log(daily_deaths), 0)
log_daily_cases = np.where(daily_cases > 0, np.log(daily_cases), 0)
s2 = UnivariateSpline(x2, log_daily_deaths, k=3, s=50)
log_best_fit_daily_deaths = s2(x2)
best_fit_daily_deaths = np.exp(log_best_fit_daily_deaths)
s3 = UnivariateSpline(x2, log_daily_cases, k=3, s=25)
log_best_fit_daily_cases = s3(x2)
best_fit_daily_cases = np.exp(log_best_fit_daily_cases)
#compute rolling sums using best_fit
df2 = pd.DataFrame(data = best_fit_daily_cases)
best_fit_roll_12 = df2.rolling(12).sum()
array_best_fit_roll_12 = best_fit_roll_12.to_numpy()
#rolling sums using actual data.
df3 = pd.DataFrame(data = daily_cases)
actual_roll_12 = df3.rolling(12).sum()
array_actual_roll_12 = actual_roll_12.to_numpy()
array_best_fit_roll_12 = np.resize(array_best_fit_roll_12, (len(daily_cases),))
array_actual_roll_12 = np.resize(array_actual_roll_12, (len(daily_cases),))
R_e = (best_fit_daily_cases / array_best_fit_roll_12) * 12 #best fit
R_e2 =(daily_cases / array_actual_roll_12) * 12 #actual data
#plotting stuff
figure, axes = plt.subplots(nrows=3, ncols=1, figsize = (13,8), sharex = True)
# =============================================================================
# for the purpose of curve-fitting
# plt.plot(x2, log_daily_deaths, label = 'log daily deaths')
# plt.plot(x2, log_best_fit_daily_deaths, label = 'best fit deaths in log')
# =============================================================================
ones = np.arange(len(x2))
ones.fill(1)
now = dt.datetime.now()
start = now - dt.timedelta(days=len(R_e))
days = mdates.drange(start,now,dt.timedelta(days=1))
axes[0,].scatter(days, daily_cases, color ='red', label = 'data')
axes[0,].plot(days, best_fit_daily_cases, label = 'best fit', color = 'blue')
axes[0,].set(ylabel = 'Daily Cases')
axes[1,].scatter(days, daily_deaths, color ='green', label = 'data')
axes[1,].plot(days, best_fit_daily_deaths, label = 'best fit', color = 'blue')
axes[1,].set(ylabel = 'Daily Deaths')
axes[2,].plot(days, R_e, color = 'orange')
axes[2,].plot(days, ones, '--', color = 'red')
axes[2,].set(ylabel = 'R_0')
axes[2,].set_yticks(np.arange(0.5, 3, 0.5))
plt.subplots_adjust(hspace=0.2)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=30))
plt.gcf().autofmt_xdate()
plt.suptitle("COVID-19 Stats in California")
axes[0,].legend()
axes[1,].legend()
plt.show()
| 32.25
| 86
| 0.710594
|
0228276a92c6a30f5b0e37fbcfa61081060a1750
| 2,052
|
py
|
Python
|
docs/auto_aim.py
|
mbway/game-jam-2018
|
00ea01d0eb697ceddba009034351f859dba98ab5
|
[
"MIT"
] | null | null | null |
docs/auto_aim.py
|
mbway/game-jam-2018
|
00ea01d0eb697ceddba009034351f859dba98ab5
|
[
"MIT"
] | null | null | null |
docs/auto_aim.py
|
mbway/game-jam-2018
|
00ea01d0eb697ceddba009034351f859dba98ab5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from numpy import pi
def main():
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
x = np.linspace(-pi*0.75, pi*0.75, num=200)
start_influence = 0.1 # [0,1]
max_influence = 0.8 # [0,1]
start_diff = 1 # rad
def influence(power):
width = -np.log(start_influence/max_influence)/(start_diff**power)
return max_influence * np.exp(-width*(x**power))
ax1.axhline(y=max_influence, c='r', linestyle=':', alpha=0.5, label='max_influence')
ax1.axhline(y=start_influence, c='b', linestyle=':', alpha=0.5, label='start_influence')
ax1.axvline(x=start_diff, c='grey', linestyle='--', alpha=0.5, label=r'$\pm$start_diff')
ax1.axvline(x=-start_diff, c='grey', linestyle='--', alpha=0.5)
ax1.plot(x, influence(2), label='influence (power=2)')
ax1.plot(x, influence(4), label='influence (power=4)')
ax1.plot(x, influence(6), label='influence (power=6)')
ax1.set_ylim((-0.01, 1))
ax1.legend()
ax1.set_ylabel('auto aim influence')
ax1.set_xlabel('angle difference (rads)')
ax1.set_title('Auto Aim Influence')
x = np.linspace(0, 1, num=200)
max_start_diff_multiply = 4
def start_diff_mul(xs):
return (max_start_diff_multiply-1)*np.exp(-8*xs)+1
ax2.plot(x, start_diff_mul(x), label='multiplier')
x = np.linspace(-0.2, 0, num=100)
ax2.plot(x, start_diff_mul(x), c='grey', linestyle=':')
ax2.axvline(x=0, c='k')
ax2.axhline(y=max_start_diff_multiply, c='grey', linestyle='--', label='max_start_diff_multiply')
ax2.axhline(y=1, c='grey', linestyle='--', label='1.0')
ax2.set_ylim((0, max_start_diff_multiply+1))
ax2.legend()
ax2.set_ylabel('multiplier')
ax2.set_xlabel('subtended angle (rads)')
ax2.set_title('Auto Aim start_diff multiplier')
fig.tight_layout()
fig.savefig('auto_aim.pdf', bbox_inches='tight')
fig.savefig('auto_aim.png', dpi=100, bbox_inches='tight')
plt.show()
if __name__ == '__main__':
main()
| 34.779661
| 101
| 0.648635
|
c8c0b92b08e56d896cadb4bc53d2c88b77b07ac4
| 730
|
py
|
Python
|
complex_venv/lib/python3.7/site-packages/shexer/io/graph/yielder/multi_tsv_nt_triples_yielder.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 1
|
2021-10-06T00:21:10.000Z
|
2021-10-06T00:21:10.000Z
|
complex_venv/lib/python3.7/site-packages/shexer/io/graph/yielder/multi_tsv_nt_triples_yielder.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 14
|
2021-01-15T21:51:38.000Z
|
2021-11-10T10:08:22.000Z
|
complex_venv/lib/python3.7/site-packages/shexer/io/graph/yielder/multi_tsv_nt_triples_yielder.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 1
|
2021-01-18T10:32:56.000Z
|
2021-01-18T10:32:56.000Z
|
from shexer.io.graph.yielder.tsv_nt_triples_yielder import TsvNtTriplesYielder
from shexer.io.graph.yielder.multifile_base_triples_yielder import MultifileBaseTripleYielder
class MultiTsvNtTriplesYielder(MultifileBaseTripleYielder):
def __init__(self, list_of_files, allow_untyped_numbers=False):
super(MultiTsvNtTriplesYielder, self).__init__(list_of_files=list_of_files,
allow_untyped_numbers=allow_untyped_numbers)
def _constructor_file_yielder(self, a_source_file, parse_namespaces=False):
return TsvNtTriplesYielder(source_file=a_source_file,
allow_untyped_numbers=self._allow_untyped_numbers)
| 52.142857
| 100
| 0.735616
|
c9b41a18d6999c394bf2e2c0547ad118721dde2e
| 5,064
|
py
|
Python
|
paddlenlp/ops/faster_transformer/sample/plato_export_model_sample.py
|
d294270681/PaddleNLP
|
76385a79ce835049aae3975f882baf8bcd1c65fc
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/ops/faster_transformer/sample/plato_export_model_sample.py
|
d294270681/PaddleNLP
|
76385a79ce835049aae3975f882baf8bcd1c65fc
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/ops/faster_transformer/sample/plato_export_model_sample.py
|
d294270681/PaddleNLP
|
76385a79ce835049aae3975f882baf8bcd1c65fc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import numpy as np
from attrdict import AttrDict
import argparse
import time
import paddle
import yaml
from pprint import pprint
from paddlenlp.ops import FasterGPT
from paddlenlp.transformers import UnifiedTransformerLMHeadModel, UnifiedTransformerTokenizer
from paddlenlp.ops import FasterUnifiedTransformer
from paddlenlp.utils.log import logger
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
default="plato-xl",
type=str,
help="The model name to specify the gpt to use. Can be one of ['gpt2-en', 'gpt2-medium-en', 'gpt-cpm-large-cn']. "
)
parser.add_argument(
"--inference_model_dir",
default="./infer_model/",
type=str,
help="Path to save inference model of gpt. ")
parser.add_argument(
"--topk",
default=4,
type=int,
help="The number of candidate to procedure top_k sampling. ")
parser.add_argument(
"--topp",
default=1.0,
type=float,
help="The probability threshold to procedure top_p sampling. ")
parser.add_argument(
"--max_out_len", default=64, type=int, help="Maximum output length. ")
parser.add_argument(
"--min_out_len", default=1, type=int, help="Minimum output length. ")
parser.add_argument(
"--num_return_sequence",
default=1,
type=int,
help="The number of returned sequence. ")
parser.add_argument(
"--temperature",
default=1.0,
type=float,
help="The temperature to set. ")
parser.add_argument(
"--num_return_sequences",
default=1,
type=int,
help="The number of returned sequences. ")
parser.add_argument(
"--use_fp16_decoding",
action="store_true",
help="Whether to use fp16 decoding to predict. ")
parser.add_argument(
"--decoding_strategy",
default="sampling",
choices=["sampling", "beam_search"],
type=str,
help="The main strategy to decode. ")
parser.add_argument(
"--num_beams",
default=4,
type=int,
help="The number of candidate to procedure beam search. ")
parser.add_argument(
"--diversity_rate",
default=0.0,
type=float,
help="The diversity rate to procedure beam search. ")
args = parser.parse_args()
return args
def do_predict(args):
place = "gpu"
place = paddle.set_device(place)
model_name = 'plato-xl'
model = UnifiedTransformerLMHeadModel.from_pretrained(model_name)
tokenizer = UnifiedTransformerTokenizer.from_pretrained(model_name)
plato = FasterUnifiedTransformer(
model=model, use_fp16_decoding=args.use_fp16_decoding)
# Set evaluate mode
plato.eval()
# Convert dygraph model to static graph model
plato = paddle.jit.to_static(
plato,
input_spec=[
# input_ids
paddle.static.InputSpec(
shape=[None, None], dtype="int32"),
# token_type_ids
paddle.static.InputSpec(
shape=[None, None], dtype="int32"),
# attention_mask
paddle.static.InputSpec(
shape=[None, 1, None, None], dtype="float32"),
# seq_len
paddle.static.InputSpec(
shape=[None], dtype="int32"),
# role_ids
paddle.static.InputSpec(
shape=[None, None], dtype="int32"),
# position_ids
paddle.static.InputSpec(
shape=[None, None], dtype="int32"),
args.max_out_len,
args.min_out_len,
args.topk,
args.topp,
args.decoding_strategy,
tokenizer.cls_token_id, # cls/bos
tokenizer.sep_token_id, # sep/eos
tokenizer.pad_token_id, # pad
args.num_beams, # num_beams. Used for beam_search.
args.diversity_rate, # diversity rate. Used for beam search.
args.temperature,
args.num_return_sequences,
])
# Save converted static graph model
paddle.jit.save(plato, os.path.join(args.inference_model_dir, "plato"))
logger.info("PLATO has been saved to {}".format(args.inference_model_dir))
if __name__ == "__main__":
args = parse_args()
pprint(args)
do_predict(args)
| 31.65
| 122
| 0.627962
|
a8530918efd50d913e9c772cbec2116431927d98
| 26,674
|
py
|
Python
|
src/robusta/integrations/kubernetes/autogenerated/events.py
|
pavangudiwada/robusta
|
cc1cb8a2e198f404e275a3947cf64e9f700f56f4
|
[
"MIT"
] | 273
|
2021-12-28T20:48:48.000Z
|
2022-03-31T16:03:13.000Z
|
src/robusta/integrations/kubernetes/autogenerated/events.py
|
pavangudiwada/robusta
|
cc1cb8a2e198f404e275a3947cf64e9f700f56f4
|
[
"MIT"
] | 103
|
2022-01-10T11:45:47.000Z
|
2022-03-31T16:31:11.000Z
|
src/robusta/integrations/kubernetes/autogenerated/events.py
|
pavangudiwada/robusta
|
cc1cb8a2e198f404e275a3947cf64e9f700f56f4
|
[
"MIT"
] | 35
|
2021-12-30T15:30:14.000Z
|
2022-03-28T11:43:57.000Z
|
# This file was autogenerated. Do not edit.
import logging
import traceback
from dataclasses import dataclass
from abc import abstractmethod
from hikaru.model import Pod,ReplicaSet,DaemonSet,Deployment,StatefulSet,Service,Event,HorizontalPodAutoscaler,Node,ClusterRole,ClusterRoleBinding,Job,Namespace,ServiceAccount,PersistentVolume
from hikaru.utils import Response
from pydantic import BaseModel
from typing import Union, Optional, List
from ..base_event import K8sBaseChangeEvent
from ....core.model.events import ExecutionBaseEvent, ExecutionEventBaseParams
from ..custom_models import RobustaPod,RobustaDeployment,RobustaJob
from hikaru.model.rel_1_16.v1 import ClusterRole as v1ClusterRole
from hikaru.model.rel_1_16.v1 import ClusterRoleBinding as v1ClusterRoleBinding
from hikaru.model.rel_1_16.v1 import DaemonSet as v1DaemonSet
from hikaru.model.rel_1_16.v1 import Deployment as v1Deployment
from hikaru.model.rel_1_16.v1 import Event as v1Event
from hikaru.model.rel_1_16.v1 import HorizontalPodAutoscaler as v1HorizontalPodAutoscaler
from hikaru.model.rel_1_16.v1 import Job as v1Job
from hikaru.model.rel_1_16.v1 import Namespace as v1Namespace
from hikaru.model.rel_1_16.v1 import Node as v1Node
from hikaru.model.rel_1_16.v1 import PersistentVolume as v1PersistentVolume
from hikaru.model.rel_1_16.v1 import Pod as v1Pod
from hikaru.model.rel_1_16.v1 import ReplicaSet as v1ReplicaSet
from hikaru.model.rel_1_16.v1 import Service as v1Service
from hikaru.model.rel_1_16.v1 import ServiceAccount as v1ServiceAccount
from hikaru.model.rel_1_16.v1 import StatefulSet as v1StatefulSet
from hikaru.model.rel_1_16.v2beta1 import ClusterRole as v2beta1ClusterRole
from hikaru.model.rel_1_16.v2beta1 import ClusterRoleBinding as v2beta1ClusterRoleBinding
from hikaru.model.rel_1_16.v2beta1 import DaemonSet as v2beta1DaemonSet
from hikaru.model.rel_1_16.v2beta1 import Deployment as v2beta1Deployment
from hikaru.model.rel_1_16.v2beta1 import Event as v2beta1Event
from hikaru.model.rel_1_16.v2beta1 import HorizontalPodAutoscaler as v2beta1HorizontalPodAutoscaler
from hikaru.model.rel_1_16.v2beta1 import Job as v2beta1Job
from hikaru.model.rel_1_16.v2beta1 import Namespace as v2beta1Namespace
from hikaru.model.rel_1_16.v2beta1 import Node as v2beta1Node
from hikaru.model.rel_1_16.v2beta1 import PersistentVolume as v2beta1PersistentVolume
from hikaru.model.rel_1_16.v2beta1 import Pod as v2beta1Pod
from hikaru.model.rel_1_16.v2beta1 import ReplicaSet as v2beta1ReplicaSet
from hikaru.model.rel_1_16.v2beta1 import Service as v2beta1Service
from hikaru.model.rel_1_16.v2beta1 import ServiceAccount as v2beta1ServiceAccount
from hikaru.model.rel_1_16.v2beta1 import StatefulSet as v2beta1StatefulSet
from hikaru.model.rel_1_16.v2beta2 import ClusterRole as v2beta2ClusterRole
from hikaru.model.rel_1_16.v2beta2 import ClusterRoleBinding as v2beta2ClusterRoleBinding
from hikaru.model.rel_1_16.v2beta2 import DaemonSet as v2beta2DaemonSet
from hikaru.model.rel_1_16.v2beta2 import Deployment as v2beta2Deployment
from hikaru.model.rel_1_16.v2beta2 import Event as v2beta2Event
from hikaru.model.rel_1_16.v2beta2 import HorizontalPodAutoscaler as v2beta2HorizontalPodAutoscaler
from hikaru.model.rel_1_16.v2beta2 import Job as v2beta2Job
from hikaru.model.rel_1_16.v2beta2 import Namespace as v2beta2Namespace
from hikaru.model.rel_1_16.v2beta2 import Node as v2beta2Node
from hikaru.model.rel_1_16.v2beta2 import PersistentVolume as v2beta2PersistentVolume
from hikaru.model.rel_1_16.v2beta2 import Pod as v2beta2Pod
from hikaru.model.rel_1_16.v2beta2 import ReplicaSet as v2beta2ReplicaSet
from hikaru.model.rel_1_16.v2beta2 import Service as v2beta2Service
from hikaru.model.rel_1_16.v2beta2 import ServiceAccount as v2beta2ServiceAccount
from hikaru.model.rel_1_16.v2beta2 import StatefulSet as v2beta2StatefulSet
LOADERS_MAPPINGS = {
'pod': (True, RobustaPod.readNamespacedPod),
'replicaset': (True, ReplicaSet.readNamespacedReplicaSet),
'daemonset': (True, DaemonSet.readNamespacedDaemonSet),
'deployment': (True, RobustaDeployment.readNamespacedDeployment),
'statefulset': (True, StatefulSet.readNamespacedStatefulSet),
'service': (True, Service.readNamespacedService),
'event': (True, Event.readNamespacedEvent),
'horizontalpodautoscaler': (True, HorizontalPodAutoscaler.readNamespacedHorizontalPodAutoscaler),
'node': (False, Node.readNode),
'clusterrole': (False, ClusterRole.readClusterRole),
'clusterrolebinding': (False, ClusterRoleBinding.readClusterRoleBinding),
'job': (True, RobustaJob.readNamespacedJob),
'namespace': (False, Namespace.readNamespace),
'serviceaccount': (True, ServiceAccount.readNamespacedServiceAccount),
'persistentvolume': (False, PersistentVolume.readPersistentVolume),
}
class ResourceLoader:
@staticmethod
def read_resource(kind: str, name: str, namespace: str = None) -> Response:
resource_mapper = LOADERS_MAPPINGS[kind.lower()]
if not resource_mapper:
raise Exception("resource loader not found")
if resource_mapper[0]: # namespaced resource
return resource_mapper[1](name=name, namespace=namespace)
else:
return resource_mapper[1](name=name)
class ResourceAttributes(ExecutionEventBaseParams):
kind: str
name: str
namespace: Optional[str] = None
@dataclass
class KubernetesResourceEvent(ExecutionBaseEvent):
_obj: Optional[Union[RobustaPod,ReplicaSet,DaemonSet,RobustaDeployment,StatefulSet,Service,Event,HorizontalPodAutoscaler,Node,ClusterRole,ClusterRoleBinding,RobustaJob,Namespace,ServiceAccount,PersistentVolume]] = None
def __init__(self, obj: Union[RobustaPod,ReplicaSet,DaemonSet,RobustaDeployment,StatefulSet,Service,Event,HorizontalPodAutoscaler,Node,ClusterRole,ClusterRoleBinding,RobustaJob,Namespace,ServiceAccount,PersistentVolume], named_sinks: List[str]):
super().__init__(named_sinks=named_sinks)
self._obj = obj
def get_resource(self) -> Optional[Union[RobustaPod,ReplicaSet,DaemonSet,RobustaDeployment,StatefulSet,Service,Event,HorizontalPodAutoscaler,Node,ClusterRole,ClusterRoleBinding,RobustaJob,Namespace,ServiceAccount,PersistentVolume]]:
return self._obj
@staticmethod
def from_params(params: ResourceAttributes) -> Optional["KubernetesResourceEvent"]:
try:
obj = ResourceLoader.read_resource(
kind=params.kind,
name=params.name,
namespace=params.namespace
).obj
except Exception:
logging.error(f"Could not load resource {params}", exc_info=True)
return None
return KubernetesResourceEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class KubernetesAnyChangeEvent(K8sBaseChangeEvent):
obj: Optional[Union[RobustaDeployment,RobustaJob,RobustaPod,v1ClusterRole,v1ClusterRoleBinding,v1DaemonSet,v1Event,v1HorizontalPodAutoscaler,v1Namespace,v1Node,v1PersistentVolume,v1ReplicaSet,v1Service,v1ServiceAccount,v1StatefulSet,v2beta1ClusterRole,v2beta1ClusterRoleBinding,v2beta1DaemonSet,v2beta1Event,v2beta1HorizontalPodAutoscaler,v2beta1Namespace,v2beta1Node,v2beta1PersistentVolume,v2beta1ReplicaSet,v2beta1Service,v2beta1ServiceAccount,v2beta1StatefulSet,v2beta2ClusterRole,v2beta2ClusterRoleBinding,v2beta2DaemonSet,v2beta2Event,v2beta2HorizontalPodAutoscaler,v2beta2Namespace,v2beta2Node,v2beta2PersistentVolume,v2beta2ReplicaSet,v2beta2Service,v2beta2ServiceAccount,v2beta2StatefulSet]] = None
old_obj: Optional[Union[RobustaDeployment,RobustaJob,RobustaPod,v1ClusterRole,v1ClusterRoleBinding,v1DaemonSet,v1Event,v1HorizontalPodAutoscaler,v1Namespace,v1Node,v1PersistentVolume,v1ReplicaSet,v1Service,v1ServiceAccount,v1StatefulSet,v2beta1ClusterRole,v2beta1ClusterRoleBinding,v2beta1DaemonSet,v2beta1Event,v2beta1HorizontalPodAutoscaler,v2beta1Namespace,v2beta1Node,v2beta1PersistentVolume,v2beta1ReplicaSet,v2beta1Service,v2beta1ServiceAccount,v2beta1StatefulSet,v2beta2ClusterRole,v2beta2ClusterRoleBinding,v2beta2DaemonSet,v2beta2Event,v2beta2HorizontalPodAutoscaler,v2beta2Namespace,v2beta2Node,v2beta2PersistentVolume,v2beta2ReplicaSet,v2beta2Service,v2beta2ServiceAccount,v2beta2StatefulSet]] = None
def get_resource(self) -> Optional[Union[RobustaDeployment,RobustaJob,RobustaPod,v1ClusterRole,v1ClusterRoleBinding,v1DaemonSet,v1Event,v1HorizontalPodAutoscaler,v1Namespace,v1Node,v1PersistentVolume,v1ReplicaSet,v1Service,v1ServiceAccount,v1StatefulSet,v2beta1ClusterRole,v2beta1ClusterRoleBinding,v2beta1DaemonSet,v2beta1Event,v2beta1HorizontalPodAutoscaler,v2beta1Namespace,v2beta1Node,v2beta1PersistentVolume,v2beta1ReplicaSet,v2beta1Service,v2beta1ServiceAccount,v2beta1StatefulSet,v2beta2ClusterRole,v2beta2ClusterRoleBinding,v2beta2DaemonSet,v2beta2Event,v2beta2HorizontalPodAutoscaler,v2beta2Namespace,v2beta2Node,v2beta2PersistentVolume,v2beta2ReplicaSet,v2beta2Service,v2beta2ServiceAccount,v2beta2StatefulSet]]:
return self.obj
class PodAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class PodEvent(KubernetesResourceEvent):
def __init__(self, obj: RobustaPod, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_pod(self) -> Optional[RobustaPod]:
return self._obj
@staticmethod
def from_params(params: PodAttributes) -> Optional["PodEvent"]:
try:
obj = RobustaPod.readNamespacedPod(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Pod {params}", exc_info=True)
return None
return PodEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class PodChangeEvent(PodEvent, KubernetesAnyChangeEvent):
obj: Optional[RobustaPod] = None
old_obj: Optional[RobustaPod] = None
def get_pod(self) -> Optional[RobustaPod]:
return self.obj
class ReplicaSetAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class ReplicaSetEvent(KubernetesResourceEvent):
def __init__(self, obj: ReplicaSet, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_replicaset(self) -> Optional[ReplicaSet]:
return self._obj
@staticmethod
def from_params(params: ReplicaSetAttributes) -> Optional["ReplicaSetEvent"]:
try:
obj = ReplicaSet.readNamespacedReplicaSet(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load ReplicaSet {params}", exc_info=True)
return None
return ReplicaSetEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class ReplicaSetChangeEvent(ReplicaSetEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ReplicaSet,v2beta1ReplicaSet,v2beta2ReplicaSet]] = None
old_obj: Optional[Union[v1ReplicaSet,v2beta1ReplicaSet,v2beta2ReplicaSet]] = None
def get_replicaset(self) -> Optional[Union[v1ReplicaSet,v2beta1ReplicaSet,v2beta2ReplicaSet]]:
return self.obj
class DaemonSetAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class DaemonSetEvent(KubernetesResourceEvent):
def __init__(self, obj: DaemonSet, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_daemonset(self) -> Optional[DaemonSet]:
return self._obj
@staticmethod
def from_params(params: DaemonSetAttributes) -> Optional["DaemonSetEvent"]:
try:
obj = DaemonSet.readNamespacedDaemonSet(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load DaemonSet {params}", exc_info=True)
return None
return DaemonSetEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class DaemonSetChangeEvent(DaemonSetEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1DaemonSet,v2beta1DaemonSet,v2beta2DaemonSet]] = None
old_obj: Optional[Union[v1DaemonSet,v2beta1DaemonSet,v2beta2DaemonSet]] = None
def get_daemonset(self) -> Optional[Union[v1DaemonSet,v2beta1DaemonSet,v2beta2DaemonSet]]:
return self.obj
class DeploymentAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class DeploymentEvent(KubernetesResourceEvent):
def __init__(self, obj: RobustaDeployment, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_deployment(self) -> Optional[RobustaDeployment]:
return self._obj
@staticmethod
def from_params(params: DeploymentAttributes) -> Optional["DeploymentEvent"]:
try:
obj = RobustaDeployment.readNamespacedDeployment(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Deployment {params}", exc_info=True)
return None
return DeploymentEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class DeploymentChangeEvent(DeploymentEvent, KubernetesAnyChangeEvent):
obj: Optional[RobustaDeployment] = None
old_obj: Optional[RobustaDeployment] = None
def get_deployment(self) -> Optional[RobustaDeployment]:
return self.obj
class StatefulSetAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class StatefulSetEvent(KubernetesResourceEvent):
def __init__(self, obj: StatefulSet, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_statefulset(self) -> Optional[StatefulSet]:
return self._obj
@staticmethod
def from_params(params: StatefulSetAttributes) -> Optional["StatefulSetEvent"]:
try:
obj = StatefulSet.readNamespacedStatefulSet(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load StatefulSet {params}", exc_info=True)
return None
return StatefulSetEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class StatefulSetChangeEvent(StatefulSetEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1StatefulSet,v2beta1StatefulSet,v2beta2StatefulSet]] = None
old_obj: Optional[Union[v1StatefulSet,v2beta1StatefulSet,v2beta2StatefulSet]] = None
def get_statefulset(self) -> Optional[Union[v1StatefulSet,v2beta1StatefulSet,v2beta2StatefulSet]]:
return self.obj
class ServiceAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class ServiceEvent(KubernetesResourceEvent):
def __init__(self, obj: Service, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_service(self) -> Optional[Service]:
return self._obj
@staticmethod
def from_params(params: ServiceAttributes) -> Optional["ServiceEvent"]:
try:
obj = Service.readNamespacedService(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Service {params}", exc_info=True)
return None
return ServiceEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class ServiceChangeEvent(ServiceEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Service,v2beta1Service,v2beta2Service]] = None
old_obj: Optional[Union[v1Service,v2beta1Service,v2beta2Service]] = None
def get_service(self) -> Optional[Union[v1Service,v2beta1Service,v2beta2Service]]:
return self.obj
class EventAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class EventEvent(KubernetesResourceEvent):
def __init__(self, obj: Event, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_event(self) -> Optional[Event]:
return self._obj
@staticmethod
def from_params(params: EventAttributes) -> Optional["EventEvent"]:
try:
obj = Event.readNamespacedEvent(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Event {params}", exc_info=True)
return None
return EventEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class EventChangeEvent(EventEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Event,v2beta1Event,v2beta2Event]] = None
old_obj: Optional[Union[v1Event,v2beta1Event,v2beta2Event]] = None
def get_event(self) -> Optional[Union[v1Event,v2beta1Event,v2beta2Event]]:
return self.obj
class HorizontalPodAutoscalerAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class HorizontalPodAutoscalerEvent(KubernetesResourceEvent):
def __init__(self, obj: HorizontalPodAutoscaler, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_horizontalpodautoscaler(self) -> Optional[HorizontalPodAutoscaler]:
return self._obj
@staticmethod
def from_params(params: HorizontalPodAutoscalerAttributes) -> Optional["HorizontalPodAutoscalerEvent"]:
try:
obj = HorizontalPodAutoscaler.readNamespacedHorizontalPodAutoscaler(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load HorizontalPodAutoscaler {params}", exc_info=True)
return None
return HorizontalPodAutoscalerEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class HorizontalPodAutoscalerChangeEvent(HorizontalPodAutoscalerEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1HorizontalPodAutoscaler,v2beta1HorizontalPodAutoscaler,v2beta2HorizontalPodAutoscaler]] = None
old_obj: Optional[Union[v1HorizontalPodAutoscaler,v2beta1HorizontalPodAutoscaler,v2beta2HorizontalPodAutoscaler]] = None
def get_horizontalpodautoscaler(self) -> Optional[Union[v1HorizontalPodAutoscaler,v2beta1HorizontalPodAutoscaler,v2beta2HorizontalPodAutoscaler]]:
return self.obj
class NodeAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class NodeEvent(KubernetesResourceEvent):
def __init__(self, obj: Node, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_node(self) -> Optional[Node]:
return self._obj
@staticmethod
def from_params(params: NodeAttributes) -> Optional["NodeEvent"]:
try:
obj = Node.readNode(name=params.name).obj
except Exception:
logging.error(f"Could not load Node {params}", exc_info=True)
return None
return NodeEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class NodeChangeEvent(NodeEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Node,v2beta1Node,v2beta2Node]] = None
old_obj: Optional[Union[v1Node,v2beta1Node,v2beta2Node]] = None
def get_node(self) -> Optional[Union[v1Node,v2beta1Node,v2beta2Node]]:
return self.obj
class ClusterRoleAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class ClusterRoleEvent(KubernetesResourceEvent):
def __init__(self, obj: ClusterRole, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_clusterrole(self) -> Optional[ClusterRole]:
return self._obj
@staticmethod
def from_params(params: ClusterRoleAttributes) -> Optional["ClusterRoleEvent"]:
try:
obj = ClusterRole.readClusterRole(name=params.name).obj
except Exception:
logging.error(f"Could not load ClusterRole {params}", exc_info=True)
return None
return ClusterRoleEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class ClusterRoleChangeEvent(ClusterRoleEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ClusterRole,v2beta1ClusterRole,v2beta2ClusterRole]] = None
old_obj: Optional[Union[v1ClusterRole,v2beta1ClusterRole,v2beta2ClusterRole]] = None
def get_clusterrole(self) -> Optional[Union[v1ClusterRole,v2beta1ClusterRole,v2beta2ClusterRole]]:
return self.obj
class ClusterRoleBindingAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class ClusterRoleBindingEvent(KubernetesResourceEvent):
def __init__(self, obj: ClusterRoleBinding, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_clusterrolebinding(self) -> Optional[ClusterRoleBinding]:
return self._obj
@staticmethod
def from_params(params: ClusterRoleBindingAttributes) -> Optional["ClusterRoleBindingEvent"]:
try:
obj = ClusterRoleBinding.readClusterRoleBinding(name=params.name).obj
except Exception:
logging.error(f"Could not load ClusterRoleBinding {params}", exc_info=True)
return None
return ClusterRoleBindingEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class ClusterRoleBindingChangeEvent(ClusterRoleBindingEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ClusterRoleBinding,v2beta1ClusterRoleBinding,v2beta2ClusterRoleBinding]] = None
old_obj: Optional[Union[v1ClusterRoleBinding,v2beta1ClusterRoleBinding,v2beta2ClusterRoleBinding]] = None
def get_clusterrolebinding(self) -> Optional[Union[v1ClusterRoleBinding,v2beta1ClusterRoleBinding,v2beta2ClusterRoleBinding]]:
return self.obj
class JobAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class JobEvent(KubernetesResourceEvent):
def __init__(self, obj: RobustaJob, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_job(self) -> Optional[RobustaJob]:
return self._obj
@staticmethod
def from_params(params: JobAttributes) -> Optional["JobEvent"]:
try:
obj = RobustaJob.readNamespacedJob(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Job {params}", exc_info=True)
return None
return JobEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class JobChangeEvent(JobEvent, KubernetesAnyChangeEvent):
obj: Optional[RobustaJob] = None
old_obj: Optional[RobustaJob] = None
def get_job(self) -> Optional[RobustaJob]:
return self.obj
class NamespaceAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class NamespaceEvent(KubernetesResourceEvent):
def __init__(self, obj: Namespace, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_namespace(self) -> Optional[Namespace]:
return self._obj
@staticmethod
def from_params(params: NamespaceAttributes) -> Optional["NamespaceEvent"]:
try:
obj = Namespace.readNamespace(name=params.name).obj
except Exception:
logging.error(f"Could not load Namespace {params}", exc_info=True)
return None
return NamespaceEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class NamespaceChangeEvent(NamespaceEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Namespace,v2beta1Namespace,v2beta2Namespace]] = None
old_obj: Optional[Union[v1Namespace,v2beta1Namespace,v2beta2Namespace]] = None
def get_namespace(self) -> Optional[Union[v1Namespace,v2beta1Namespace,v2beta2Namespace]]:
return self.obj
class ServiceAccountAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class ServiceAccountEvent(KubernetesResourceEvent):
def __init__(self, obj: ServiceAccount, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_serviceaccount(self) -> Optional[ServiceAccount]:
return self._obj
@staticmethod
def from_params(params: ServiceAccountAttributes) -> Optional["ServiceAccountEvent"]:
try:
obj = ServiceAccount.readNamespacedServiceAccount(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load ServiceAccount {params}", exc_info=True)
return None
return ServiceAccountEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class ServiceAccountChangeEvent(ServiceAccountEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ServiceAccount,v2beta1ServiceAccount,v2beta2ServiceAccount]] = None
old_obj: Optional[Union[v1ServiceAccount,v2beta1ServiceAccount,v2beta2ServiceAccount]] = None
def get_serviceaccount(self) -> Optional[Union[v1ServiceAccount,v2beta1ServiceAccount,v2beta2ServiceAccount]]:
return self.obj
class PersistentVolumeAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class PersistentVolumeEvent(KubernetesResourceEvent):
def __init__(self, obj: PersistentVolume, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_persistentvolume(self) -> Optional[PersistentVolume]:
return self._obj
@staticmethod
def from_params(params: PersistentVolumeAttributes) -> Optional["PersistentVolumeEvent"]:
try:
obj = PersistentVolume.readPersistentVolume(name=params.name).obj
except Exception:
logging.error(f"Could not load PersistentVolume {params}", exc_info=True)
return None
return PersistentVolumeEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class PersistentVolumeChangeEvent(PersistentVolumeEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1PersistentVolume,v2beta1PersistentVolume,v2beta2PersistentVolume]] = None
old_obj: Optional[Union[v1PersistentVolume,v2beta1PersistentVolume,v2beta2PersistentVolume]] = None
def get_persistentvolume(self) -> Optional[Union[v1PersistentVolume,v2beta1PersistentVolume,v2beta2PersistentVolume]]:
return self.obj
KIND_TO_EVENT_CLASS = {
'pod': PodChangeEvent,
'replicaset': ReplicaSetChangeEvent,
'daemonset': DaemonSetChangeEvent,
'deployment': DeploymentChangeEvent,
'statefulset': StatefulSetChangeEvent,
'service': ServiceChangeEvent,
'event': EventChangeEvent,
'horizontalpodautoscaler': HorizontalPodAutoscalerChangeEvent,
'node': NodeChangeEvent,
'clusterrole': ClusterRoleChangeEvent,
'clusterrolebinding': ClusterRoleBindingChangeEvent,
'job': JobChangeEvent,
'namespace': NamespaceChangeEvent,
'serviceaccount': ServiceAccountChangeEvent,
'persistentvolume': PersistentVolumeChangeEvent
}
| 42.272583
| 726
| 0.768464
|
d5c00b12f31d47c39358bb04cef918a333106eba
| 2,727
|
py
|
Python
|
src/phantomx_gazebo/phantomx.py
|
kkonen/phantomx_gazebo
|
19f5d5735e15aa3cc1bbd2c051d6f7024862752e
|
[
"BSD-2-Clause-FreeBSD"
] | 30
|
2015-04-01T16:55:56.000Z
|
2022-01-13T00:02:36.000Z
|
src/phantomx_gazebo/phantomx.py
|
kkonen/phantomx_gazebo
|
19f5d5735e15aa3cc1bbd2c051d6f7024862752e
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2015-07-30T12:47:51.000Z
|
2019-03-09T21:27:25.000Z
|
src/phantomx_gazebo/phantomx.py
|
kkonen/phantomx_gazebo
|
19f5d5735e15aa3cc1bbd2c051d6f7024862752e
|
[
"BSD-2-Clause-FreeBSD"
] | 21
|
2015-07-30T09:36:41.000Z
|
2021-12-20T14:59:13.000Z
|
import rospy
import time
from geometry_msgs.msg import Twist
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
class PhantomX:
"""Client ROS class for manipulating PhantomX in Gazebo"""
def __init__(self, ns='/phantomx/'):
self.ns = ns
self.joints = None
self.angles = None
self._sub_joints = rospy.Subscriber(
ns + 'joint_states', JointState, self._cb_joints, queue_size=1)
rospy.loginfo('Waiting for joints to be populated...')
while not rospy.is_shutdown():
if self.joints is not None:
break
rospy.sleep(0.1)
rospy.loginfo('Waiting for joints to be populated...')
rospy.loginfo('Joints populated')
rospy.loginfo('Creating joint command publishers')
self._pub_joints = {}
for j in self.joints:
p = rospy.Publisher(
ns + j + '_position_controller/command', Float64, queue_size=1)
self._pub_joints[j] = p
rospy.sleep(1)
self._pub_cmd_vel = rospy.Publisher(ns + 'cmd_vel', Twist, queue_size=1)
def set_walk_velocity(self, x, y, t):
msg = Twist()
msg.linear.x = x
msg.linear.y = y
msg.angular.z = t
self._pub_cmd_vel.publish(msg)
def _cb_joints(self, msg):
if self.joints is None:
self.joints = msg.name
self.angles = msg.position
def get_angles(self):
if self.joints is None:
return None
if self.angles is None:
return None
return dict(zip(self.joints, self.angles))
def set_angles(self, angles):
for j, v in angles.items():
if j not in self.joints:
rospy.logerror('Invalid joint name "' + j + '"')
continue
self._pub_joints[j].publish(v)
def set_angles_slow(self, stop_angles, delay=2):
start_angles = self.get_angles()
start = time.time()
stop = start + delay
r = rospy.Rate(100)
while not rospy.is_shutdown():
t = time.time()
if t > stop:
break
ratio = (t - start) / delay
angles = interpolate(stop_angles, start_angles, ratio)
self.set_angles(angles)
r.sleep()
def interpolate(anglesa, anglesb, coefa):
z = {}
joints = anglesa.keys()
for j in joints:
z[j] = anglesa[j] * coefa + anglesb[j] * (1 - coefa)
return z
def get_distance(anglesa, anglesb):
d = 0
joints = anglesa.keys()
if len(joints) == 0:
return 0
for j in joints:
d += abs(anglesb[j] - anglesa[j])
d /= len(joints)
return d
| 28.705263
| 80
| 0.569124
|
e0d2885f1e95069a1e7859d88d3edf8cca939904
| 542
|
py
|
Python
|
test_cluster_response_time.py
|
qikiqi/docker-filebeat
|
ad7e75b8553b00be32a65bc54878f23c82728290
|
[
"MIT"
] | null | null | null |
test_cluster_response_time.py
|
qikiqi/docker-filebeat
|
ad7e75b8553b00be32a65bc54878f23c82728290
|
[
"MIT"
] | null | null | null |
test_cluster_response_time.py
|
qikiqi/docker-filebeat
|
ad7e75b8553b00be32a65bc54878f23c82728290
|
[
"MIT"
] | null | null | null |
import requests
import pprint as pp
import sys
times = int(sys.argv[2])
for i in range(times):
headers = {
'Content-Type': 'application/json',
}
data = '{"profile": true,"query" : {"match" : { "ts_date" : "20180315" }}}'
response = requests.post('http://elastic:changeme@{}:9200/filebeat/_search?human=true'.format(sys.argv[1]), headers=headers, data=data)
resp_time = response.json()['profile']['shards'][0]['searches'][0]['query'][0]['time']
print("Iteration {} response time: {}".format(i, resp_time))
| 30.111111
| 139
| 0.632841
|
ec714c22711073ec04c038d3d96c587f0113613b
| 888
|
py
|
Python
|
toolset/utils/audit.py
|
xsoheilalizadeh/FrameworkBenchmarks
|
855527008f7488e4fd508d1e72dfa9953874a2c6
|
[
"BSD-3-Clause"
] | 5,300
|
2015-01-02T08:04:20.000Z
|
2022-03-31T10:08:33.000Z
|
toolset/utils/audit.py
|
xsoheilalizadeh/FrameworkBenchmarks
|
855527008f7488e4fd508d1e72dfa9953874a2c6
|
[
"BSD-3-Clause"
] | 3,075
|
2015-01-01T05:11:45.000Z
|
2022-03-31T23:56:33.000Z
|
toolset/utils/audit.py
|
xsoheilalizadeh/FrameworkBenchmarks
|
855527008f7488e4fd508d1e72dfa9953874a2c6
|
[
"BSD-3-Clause"
] | 2,151
|
2015-01-02T14:16:09.000Z
|
2022-03-30T00:15:26.000Z
|
from toolset.utils.output_helper import log
from colorama import Fore
class Audit:
'''
Audits frameworks for inconsistencies
'''
def __init__(self, benchmarker):
self.benchmarker = benchmarker
def start_audit(self):
for lang in self.benchmarker.metadata.gather_languages():
for test_dir in self.benchmarker.metadata.gather_language_tests(
lang):
self.audit_test_dir(test_dir)
def audit_test_dir(self, test_dir):
warnings = 0
log('Auditing %s:' % test_dir, color=Fore.BLUE)
if not self.benchmarker.metadata.has_file(test_dir, 'README.md'):
log('README.md file is missing')
warnings += 1
if warnings:
log('(%s) warning(s)' % warnings, color=Fore.YELLOW)
else:
log('No problems to report', color=Fore.GREEN)
| 28.645161
| 76
| 0.618243
|
27780476e04d4f37214826072debd6e66845bce3
| 2,675
|
py
|
Python
|
src/robotide/lib/robot/running/arguments/argumentspec.py
|
adrianyorke/RIDE
|
b7db7fb376276add0cb1d7b0ee9cf89500d0d26d
|
[
"ECL-2.0",
"Apache-2.0"
] | 775
|
2015-01-12T06:54:09.000Z
|
2022-03-25T05:18:05.000Z
|
src/robotide/lib/robot/running/arguments/argumentspec.py
|
adrianyorke/RIDE
|
b7db7fb376276add0cb1d7b0ee9cf89500d0d26d
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,191
|
2015-05-19T16:49:09.000Z
|
2022-03-28T21:38:34.000Z
|
src/robotide/lib/robot/running/arguments/argumentspec.py
|
adrianyorke/RIDE
|
b7db7fb376276add0cb1d7b0ee9cf89500d0d26d
|
[
"ECL-2.0",
"Apache-2.0"
] | 382
|
2015-01-24T08:41:44.000Z
|
2022-03-13T10:14:20.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robotide.lib.robot.utils import setter
from .argumentconverter import ArgumentConverter
from .argumentmapper import ArgumentMapper
from .argumentresolver import ArgumentResolver
from .typevalidator import TypeValidator
class ArgumentSpec(object):
def __init__(self, name=None, type='Keyword', positional=None,
varargs=None, kwonlyargs=None, kwargs=None, defaults=None,
types=None, supports_named=True):
self.name = name
self.type = type
self.positional = positional or []
self.varargs = varargs
self.kwonlyargs = kwonlyargs or []
self.kwargs = kwargs
self.defaults = defaults or {}
self.types = types
self.supports_named = supports_named
@setter
def types(self, types):
return TypeValidator(self).validate(types)
@property
def minargs(self):
required = [arg for arg in self.positional if arg not in self.defaults]
return len(required)
@property
def maxargs(self):
return len(self.positional) if not self.varargs else sys.maxsize
@property
def argument_names(self):
return (self.positional + ([self.varargs] if self.varargs else []) +
self.kwonlyargs + ([self.kwargs] if self.kwargs else []))
def resolve(self, arguments, variables=None, resolve_named=True,
resolve_variables_until=None, dict_to_kwargs=False):
resolver = ArgumentResolver(self, resolve_named,
resolve_variables_until, dict_to_kwargs)
positional, named = resolver.resolve(arguments, variables)
if self.types or self.defaults:
converter = ArgumentConverter(self, dry_run=not variables)
positional, named = converter.convert(positional, named)
return positional, named
def map(self, positional, named, replace_defaults=True):
mapper = ArgumentMapper(self)
return mapper.map(positional, named, replace_defaults)
| 37.152778
| 79
| 0.687103
|
545b89cb60838d6339ab1852e4e7eb46c1590184
| 751
|
py
|
Python
|
pubsubat/urls.py
|
zerobased-co/pubsub.at
|
c53ee698d3d2beced0147a8aa9707f69c3ef46c1
|
[
"MIT"
] | null | null | null |
pubsubat/urls.py
|
zerobased-co/pubsub.at
|
c53ee698d3d2beced0147a8aa9707f69c3ef46c1
|
[
"MIT"
] | null | null | null |
pubsubat/urls.py
|
zerobased-co/pubsub.at
|
c53ee698d3d2beced0147a8aa9707f69c3ef46c1
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.decorators.csrf import csrf_exempt
from graphene_django.views import GraphQLView
from graphql_playground.views import GraphQLPlaygroundView
from pubsub.views import index
urlpatterns = [
path('admin/', admin.site.urls),
path('summernote/', include('django_summernote.urls')),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
path('playground/', GraphQLPlaygroundView.as_view(endpoint="/graphql/")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [re_path(r'', index)]
| 32.652174
| 80
| 0.778961
|
85b5fa38d032e98644a4822963580067d3151834
| 213
|
py
|
Python
|
bitdotio/api/__init__.py
|
bitdotioinc/python-bitdotio
|
acdc58e34c047a4fda9cee557bf3851f6cfec71e
|
[
"MIT"
] | 6
|
2021-03-03T21:56:28.000Z
|
2022-02-16T09:03:20.000Z
|
bitdotio/api/__init__.py
|
bitdotioinc/python-bitdotio
|
acdc58e34c047a4fda9cee557bf3851f6cfec71e
|
[
"MIT"
] | 4
|
2021-06-14T17:58:12.000Z
|
2021-09-23T17:27:20.000Z
|
bitdotio/api/__init__.py
|
bitdotioinc/python-bitdotio
|
acdc58e34c047a4fda9cee557bf3851f6cfec71e
|
[
"MIT"
] | null | null | null |
# do not import all apis into this module because that uses a lot of memory and stack frames
# if you need the ability to import all apis from one package, import them with
# from bitdotio.apis import ApiBitdotio
| 53.25
| 92
| 0.788732
|
27578770d6c55e6e0e95254bfa3776b3444a6e59
| 3,885
|
py
|
Python
|
tests/test_plugin_utils.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 1
|
2020-06-19T08:34:23.000Z
|
2020-06-19T08:34:23.000Z
|
tests/test_plugin_utils.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 1
|
2021-01-16T14:13:39.000Z
|
2022-02-09T10:43:44.000Z
|
tests/test_plugin_utils.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 1
|
2020-07-11T22:09:07.000Z
|
2020-07-11T22:09:07.000Z
|
from __future__ import unicode_literals
import sys
import unittest
from streamlink.plugin.api.utils import itertags
def unsupported_versions_1979():
"""Unsupported python versions for itertags
3.7.0 - 3.7.2 and 3.8.0a1
- https://github.com/streamlink/streamlink/issues/1979
- https://bugs.python.org/issue34294
"""
v = sys.version_info
if (v.major == 3) and (
# 3.7.0 - 3.7.2
(v.minor == 7 and v.micro <= 2)
# 3.8.0a1
or (v.minor == 8 and v.micro == 0 and v.releaselevel == 'alpha' and v.serial <= 1)
):
return True
else:
return False
class TestPluginUtil(unittest.TestCase):
test_html = """
<!doctype html>
<html lang="en" class="no-js">
<title>Title</title>
<meta property="og:type" content= "website" />
<meta property="og:url" content="http://test.se/"/>
<meta property="og:site_name" content="Test" />
<script src="https://test.se/test.js"></script>
<link rel="stylesheet" type="text/css" href="https://test.se/test.css">
<script>Tester.ready(function () {
alert("Hello, world!"); });</script>
<p>
<a
href="http://test.se/foo">bar</a>
</p>
</html>
""" # noqa: W291
def test_itertags_single_text(self):
title = list(itertags(self.test_html, "title"))
self.assertTrue(len(title), 1)
self.assertEqual(title[0].tag, "title")
self.assertEqual(title[0].text, "Title")
self.assertEqual(title[0].attributes, {})
def test_itertags_attrs_text(self):
script = list(itertags(self.test_html, "script"))
self.assertTrue(len(script), 2)
self.assertEqual(script[0].tag, "script")
self.assertEqual(script[0].text, "")
self.assertEqual(script[0].attributes, {"src": "https://test.se/test.js"})
self.assertEqual(script[1].tag, "script")
self.assertEqual(script[1].text.strip(), """Tester.ready(function () {\nalert("Hello, world!"); });""")
self.assertEqual(script[1].attributes, {})
@unittest.skipIf(unsupported_versions_1979(),
"python3.7 issue, see bpo-34294")
def test_itertags_multi_attrs(self):
metas = list(itertags(self.test_html, "meta"))
self.assertTrue(len(metas), 3)
self.assertTrue(all(meta.tag == "meta" for meta in metas))
self.assertEqual(metas[0].text, None)
self.assertEqual(metas[1].text, None)
self.assertEqual(metas[2].text, None)
self.assertEqual(metas[0].attributes, {"property": "og:type", "content": "website"})
self.assertEqual(metas[1].attributes, {"property": "og:url", "content": "http://test.se/"})
self.assertEqual(metas[2].attributes, {"property": "og:site_name", "content": "Test"})
def test_multi_line_a(self):
anchor = list(itertags(self.test_html, "a"))
self.assertTrue(len(anchor), 1)
self.assertEqual(anchor[0].tag, "a")
self.assertEqual(anchor[0].text, "bar")
self.assertEqual(anchor[0].attributes, {"href": "http://test.se/foo"})
@unittest.skipIf(unsupported_versions_1979(),
"python3.7 issue, see bpo-34294")
def test_no_end_tag(self):
links = list(itertags(self.test_html, "link"))
self.assertTrue(len(links), 1)
self.assertEqual(links[0].tag, "link")
self.assertEqual(links[0].text, None)
self.assertEqual(links[0].attributes, {"rel": "stylesheet",
"type": "text/css",
"href": "https://test.se/test.css"})
def test_tag_inner_tag(self):
links = list(itertags(self.test_html, "p"))
self.assertTrue(len(links), 1)
self.assertEqual(links[0].tag, "p")
self.assertEqual(links[0].text.strip(), '<a \nhref="http://test.se/foo">bar</a>')
self.assertEqual(links[0].attributes, {})
| 38.088235
| 111
| 0.606435
|
fb32e014506e69ab0680016bfe499f6bb686f32c
| 272
|
py
|
Python
|
ch02/perceptron-nand.py
|
nobukatsu/deep-learning-from-scratch
|
34e4b3e90aa2a3c34369653e75ae17a5a092ca94
|
[
"MIT"
] | null | null | null |
ch02/perceptron-nand.py
|
nobukatsu/deep-learning-from-scratch
|
34e4b3e90aa2a3c34369653e75ae17a5a092ca94
|
[
"MIT"
] | null | null | null |
ch02/perceptron-nand.py
|
nobukatsu/deep-learning-from-scratch
|
34e4b3e90aa2a3c34369653e75ae17a5a092ca94
|
[
"MIT"
] | null | null | null |
import numpy as np
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
b = 0.7
tmp = np.sum(w*x) + b
if tmp <= 0:
return 0
elif tmp > 0:
return 1
print(NAND(0,0))
print(NAND(1,0))
print(NAND(0,1))
print(NAND(1,1))
| 16
| 30
| 0.5
|
e6e28ca269021cacbcf2ade0db64a484215f2339
| 338
|
py
|
Python
|
src/ufdl/json/core/jobs/_ValueTypePair.py
|
waikato-ufdl/ufdl-json-messages
|
408901bdf79aa9ae7cff1af165deee83e62f6088
|
[
"Apache-2.0"
] | null | null | null |
src/ufdl/json/core/jobs/_ValueTypePair.py
|
waikato-ufdl/ufdl-json-messages
|
408901bdf79aa9ae7cff1af165deee83e62f6088
|
[
"Apache-2.0"
] | null | null | null |
src/ufdl/json/core/jobs/_ValueTypePair.py
|
waikato-ufdl/ufdl-json-messages
|
408901bdf79aa9ae7cff1af165deee83e62f6088
|
[
"Apache-2.0"
] | null | null | null |
from wai.json.object import StrictJSONObject
from wai.json.object.property import StringProperty
class ValueTypePair(StrictJSONObject['ValueTypePair']):
"""
A pair of a value and its type.
"""
# The value passed to the input
value: str = StringProperty()
# The type of the value
type: str = StringProperty()
| 26
| 55
| 0.704142
|
f9ed5011ffef37009b331790110e40710d0507c9
| 5,241
|
py
|
Python
|
conda_env/cli/main_create.py
|
cswartzvi/conda
|
b2e0ed6b6119b7623d8f64c47d4d04f56e9cf137
|
[
"BSD-3-Clause"
] | null | null | null |
conda_env/cli/main_create.py
|
cswartzvi/conda
|
b2e0ed6b6119b7623d8f64c47d4d04f56e9cf137
|
[
"BSD-3-Clause"
] | null | null | null |
conda_env/cli/main_create.py
|
cswartzvi/conda
|
b2e0ed6b6119b7623d8f64c47d4d04f56e9cf137
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import print_function
from argparse import RawDescriptionHelpFormatter
import json
import os
import sys
import textwrap
from conda.cli import install as cli_install
from conda.cli.conda_argparse import add_parser_default_packages, add_parser_json, \
add_parser_prefix, add_parser_networking, add_parser_experimental_solver
from conda.core.prefix_data import PrefixData
from conda.exceptions import SpecNotFound
from conda.gateways.disk.delete import rm_rf
from conda.misc import touch_nonadmin
from .common import get_prefix, print_result, get_filename
from .. import specs
from ..installers.base import InvalidInstaller, get_installer
description = """
Create an environment based on an environment file
"""
example = """
examples:
conda env create
conda env create -n name
conda env create vader/deathstar
conda env create -f=/path/to/environment.yml
conda env create -f=/path/to/requirements.txt -n deathstar
conda env create -f=/path/to/requirements.txt -p /home/user/software/deathstar
"""
def configure_parser(sub_parsers):
p = sub_parsers.add_parser(
'create',
formatter_class=RawDescriptionHelpFormatter,
description=description,
help=description,
epilog=example,
)
p.add_argument(
'-f', '--file',
action='store',
help='environment definition file (default: environment.yml)',
default='environment.yml',
)
# Add name and prefix args
add_parser_prefix(p)
# Add networking args
add_parser_networking(p)
p.add_argument(
'remote_definition',
help='remote environment definition / IPython notebook',
action='store',
default=None,
nargs='?'
)
p.add_argument(
'--force',
help=('force creation of environment (removing a previously existing '
'environment of the same name).'),
action='store_true',
default=False,
)
p.add_argument(
'-d', '--dry-run',
help='Only display what would have been done.',
action='store_true',
default=False
)
add_parser_default_packages(p)
add_parser_json(p)
add_parser_experimental_solver(p)
p.set_defaults(func='.main_create.execute')
def execute(args, parser):
from conda.base.context import context
name = args.remote_definition or args.name
try:
spec = specs.detect(name=name, filename=get_filename(args.file), directory=os.getcwd())
env = spec.environment
# FIXME conda code currently requires args to have a name or prefix
# don't overwrite name if it's given. gh-254
if args.prefix is None and args.name is None:
args.name = env.name
except SpecNotFound:
raise
prefix = get_prefix(args, search=False)
if args.force and prefix != context.root_prefix and os.path.exists(prefix):
rm_rf(prefix)
cli_install.check_prefix(prefix, json=args.json)
# TODO, add capability
# common.ensure_override_channels_requires_channel(args)
# channel_urls = args.channel or ()
result = {"conda": None, "pip": None}
args_packages = context.create_default_packages if not args.no_default_packages else []
if args.dry_run:
installer_type = 'conda'
installer = get_installer(installer_type)
pkg_specs = env.dependencies.get(installer_type, [])
pkg_specs.extend(args_packages)
solved_env = installer.dry_run(pkg_specs, args, env)
if args.json:
print(json.dumps(solved_env.to_dict(), indent=2))
else:
print(solved_env.to_yaml(), end='')
else:
if args_packages:
installer_type = "conda"
installer = get_installer(installer_type)
result[installer_type] = installer.install(prefix, args_packages, args, env)
if len(env.dependencies.items()) == 0:
installer_type = "conda"
pkg_specs = []
installer = get_installer(installer_type)
result[installer_type] = installer.install(prefix, pkg_specs, args, env)
else:
for installer_type, pkg_specs in env.dependencies.items():
try:
installer = get_installer(installer_type)
result[installer_type] = installer.install(prefix, pkg_specs, args, env)
except InvalidInstaller:
sys.stderr.write(textwrap.dedent("""
Unable to install package for {0}.
Please double check and ensure your dependencies file has
the correct spelling. You might also try installing the
conda-env-{0} package to see if provides the required
installer.
""").lstrip().format(installer_type)
)
return -1
if env.variables:
pd = PrefixData(prefix)
pd.set_environment_env_vars(env.variables)
touch_nonadmin(prefix)
print_result(args, prefix, result)
| 32.351852
| 95
| 0.64606
|
799242c9b74ad450c42183aa61805b0f4f3eb789
| 40,293
|
py
|
Python
|
test/functional/p2p_compactblocks.py
|
alexander3636/btcbam-core-1
|
2565b3b110d049e8f188b57ea42e84466f307f35
|
[
"MIT"
] | 6
|
2021-08-02T21:35:10.000Z
|
2022-01-18T05:41:49.000Z
|
test/functional/p2p_compactblocks.py
|
alexander3636/btcbam-core-1
|
2565b3b110d049e8f188b57ea42e84466f307f35
|
[
"MIT"
] | null | null | null |
test/functional/p2p_compactblocks.py
|
alexander3636/btcbam-core-1
|
2565b3b110d049e8f188b57ea42e84466f307f35
|
[
"MIT"
] | 4
|
2021-08-14T13:12:43.000Z
|
2021-11-26T21:19:35.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
import random
from decimal import Decimal
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until, softfork_active, satoshi_round
from test_framework.bitcoinbamconfig import COINBASE_MATURITY
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self, cmpct_version):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
self.cmpct_version = cmpct_version
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.utxos = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
block = self.build_block_on_tip(self.nodes[0])
self.segwit_node.send_and_ping(msg_no_witness_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.nodes[0].generatetoaddress(COINBASE_MATURITY, self.nodes[0].getnewaddress(address_type="bech32"))
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.segwit_node.send_and_ping(msg_no_witness_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, test_node, old_node=None):
preferred_version = test_node.cmpct_version
node = self.nodes[0]
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version + 1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.segwit_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, test_node, use_witness_address=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.getnewaddress(address_type='bech32')
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send - Decimal(0.2)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.2)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert segwit_tx_generated # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%064x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert entry.tx.wit.is_null()
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, test_node, segwit=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_blocktxn()
else:
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
with_witness = (version == 2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_no_witness_blocktxn()
if with_witness:
msg_bt = msg_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert block.vtx[1].hash in node.getrawmempool()
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert tx.hash in mempool
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_no_witness_blocktxn()
if version == 2:
msg = msg_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version == 2:
test_node.send_and_ping(msg_block(block))
else:
test_node.send_and_ping(msg_no_witness_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert tx.wit.is_null()
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, test_node):
node = self.nodes[0]
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, listeners):
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, test_node, use_segwit=True):
node = self.nodes[0]
assert len(self.utxos)
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer):
node = self.nodes[0]
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = peer.cmpct_version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer):
node = self.nodes[0]
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=1), services=NODE_NETWORK)
self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
assert softfork_active(self.nodes[0], "segwit")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.segwit_node, old_node=self.old_node)
self.test_sendcmpct(self.additional_segwit_node)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.old_node)
self.test_compactblock_construction(self.segwit_node)
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.segwit_node)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.segwit_node)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.segwit_node)
self.test_getblocktxn_handler(self.old_node)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.segwit_node)
self.test_compactblocks_not_at_tip(self.old_node)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.segwit_node)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.segwit_node, self.additional_segwit_node)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.old_node)
self.request_cb_announcements(self.segwit_node)
self.test_end_to_end_block_relay([self.segwit_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.segwit_node)
self.test_invalid_tx_in_compactblock(self.old_node)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| 46.635417
| 500
| 0.672276
|
7bc85d9f7f27f7dc9c69edc5ef90853576f69591
| 10,271
|
py
|
Python
|
datumaro/plugins/icdar_format/extractor.py
|
certiware/posemaro
|
3f9bed71dd4a1053ea27bed1a85f2ff01fdcf800
|
[
"MIT"
] | null | null | null |
datumaro/plugins/icdar_format/extractor.py
|
certiware/posemaro
|
3f9bed71dd4a1053ea27bed1a85f2ff01fdcf800
|
[
"MIT"
] | null | null | null |
datumaro/plugins/icdar_format/extractor.py
|
certiware/posemaro
|
3f9bed71dd4a1053ea27bed1a85f2ff01fdcf800
|
[
"MIT"
] | 1
|
2021-06-20T05:29:48.000Z
|
2021-06-20T05:29:48.000Z
|
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from glob import iglob
import os.path as osp
import numpy as np
from datumaro.components.extractor import (
Bbox, Caption, DatasetItem, Importer, Mask, MaskCategories, Polygon,
SourceExtractor,
)
from datumaro.util.image import find_images
from datumaro.util.mask_tools import lazy_mask
from .format import IcdarPath, IcdarTask
class _IcdarExtractor(SourceExtractor):
def __init__(self, path, task, subset=None):
self._path = path
self._task = task
if task is IcdarTask.word_recognition:
if not osp.isfile(path):
raise FileNotFoundError(
"Can't read annotation file '%s'" % path)
if not subset:
subset = osp.basename(osp.dirname(path))
super().__init__(subset=subset)
self._dataset_dir = osp.dirname(osp.dirname(path))
self._items = list(self._load_recognition_items().values())
elif task in {IcdarTask.text_localization, IcdarTask.text_segmentation}:
if not osp.isdir(path):
raise NotADirectoryError(
"Can't open folder with annotation files '%s'" % path)
if not subset:
subset = osp.basename(path)
super().__init__(subset=subset)
self._dataset_dir = osp.dirname(path)
if task is IcdarTask.text_localization:
self._items = list(self._load_localization_items().values())
else:
self._items = list(self._load_segmentation_items().values())
def _load_recognition_items(self):
items = {}
with open(self._path, encoding='utf-8') as f:
for line in f:
line = line.strip()
objects = line.split(', ')
if len(objects) == 2:
image = objects[0]
objects = objects[1].split('\"')
if 1 < len(objects):
if len(objects) % 2:
captions = [objects[2 * i + 1]
for i in range(int(len(objects) / 2))]
else:
raise Exception("Line %s: unexpected number "
"of quotes in filename" % line)
else:
captions = objects[0].split()
else:
image = objects[0][:-1]
captions = []
item_id = osp.splitext(image)[0]
image_path = osp.join(osp.dirname(self._path),
IcdarPath.IMAGES_DIR, image)
if item_id not in items:
items[item_id] = DatasetItem(item_id, subset=self._subset,
image=image_path)
annotations = items[item_id].annotations
for caption in captions:
annotations.append(Caption(caption))
return items
def _load_localization_items(self):
items = {}
image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
if osp.isdir(image_dir):
images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
for p in find_images(image_dir, recursive=True) }
else:
images = {}
for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
item_id = osp.splitext(osp.relpath(path, self._path))[0]
if osp.basename(item_id).startswith('gt_'):
item_id = osp.join(osp.dirname(item_id), osp.basename(item_id)[3:])
item_id = item_id.replace('\\', '/')
if item_id not in items:
items[item_id] = DatasetItem(item_id, subset=self._subset,
image=images.get(item_id))
annotations = items[item_id].annotations
with open(path, encoding='utf-8') as f:
for line in f:
line = line.strip()
objects = line.split('\"')
if 1 < len(objects):
if len(objects) == 3:
text = objects[1]
else:
raise Exception("Line %s: unexpected number "
"of quotes in filename" % line)
else:
text = ''
objects = objects[0].split()
if len(objects) == 1:
objects = objects[0].split(',')
if 8 <= len(objects):
points = [float(p) for p in objects[:8]]
attributes = {}
if 0 < len(text):
attributes['text'] = text
elif len(objects) == 9:
text = objects[8]
attributes['text'] = text
annotations.append(
Polygon(points, attributes=attributes))
elif 4 <= len(objects):
x = float(objects[0])
y = float(objects[1])
w = float(objects[2]) - x
h = float(objects[3]) - y
attributes = {}
if 0 < len(text):
attributes['text'] = text
elif len(objects) == 5:
text = objects[4]
attributes['text'] = text
annotations.append(
Bbox(x, y, w, h, attributes=attributes))
return items
def _load_segmentation_items(self):
items = {}
image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
if osp.isdir(image_dir):
images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
for p in find_images(image_dir, recursive=True) }
else:
images = {}
for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
item_id = osp.splitext(osp.relpath(path, self._path))[0]
item_id = item_id.replace('\\', '/')
if item_id.endswith('_GT'):
item_id = item_id[:-3]
if item_id not in items:
items[item_id] = DatasetItem(item_id, subset=self._subset,
image=images.get(item_id))
annotations = items[item_id].annotations
colors = [(255, 255, 255)]
chars = ['']
centers = [0]
groups = [0]
group = 1
number_in_group = 0
with open(path, encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == '':
if number_in_group == 1:
groups[len(groups) - 1] = 0
else:
group += 1
number_in_group = 0
continue
objects = line.split()
if objects[0][0] == '#':
objects[0] = objects[0][1:]
objects[9] = '\" \"'
objects.pop()
if len(objects) != 10:
raise Exception("Line %s contains the wrong number "
"of arguments, e.g. '241 73 144 1 4 0 3 1 4 \"h\"" % line)
centers.append(objects[3] + ' ' + objects[4])
groups.append(group)
colors.append(tuple(int(o) for o in objects[:3]))
char = objects[9]
if char[0] == '\"' and char[-1] == '\"':
char = char[1:-1]
chars.append(char)
number_in_group += 1
if number_in_group == 1:
groups[len(groups) - 1] = 0
mask_categories = MaskCategories(
{i: colors[i] for i in range(len(colors))})
inverse_cls_colormap = mask_categories.inverse_colormap
gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)
if osp.isfile(gt_path):
# load mask through cache
mask = lazy_mask(gt_path, inverse_cls_colormap)
mask = mask()
classes = np.unique(mask)
for label_id in classes:
if label_id == 0:
continue
i = int(label_id)
annotations.append(Mask(group=groups[i],
image=self._lazy_extract_mask(mask, label_id),
attributes={ 'index': i - 1,
'color': ' '.join(str(p) for p in colors[i]),
'text': chars[i], 'center': centers[i] }
))
return items
@staticmethod
def _lazy_extract_mask(mask, c):
return lambda: mask == c
class IcdarWordRecognitionExtractor(_IcdarExtractor):
def __init__(self, path, **kwargs):
kwargs['task'] = IcdarTask.word_recognition
super().__init__(path, **kwargs)
class IcdarTextLocalizationExtractor(_IcdarExtractor):
def __init__(self, path, **kwargs):
kwargs['task'] = IcdarTask.text_localization
super().__init__(path, **kwargs)
class IcdarTextSegmentationExtractor(_IcdarExtractor):
def __init__(self, path, **kwargs):
kwargs['task'] = IcdarTask.text_segmentation
super().__init__(path, **kwargs)
class IcdarWordRecognitionImporter(Importer):
@classmethod
def find_sources(cls, path):
return cls._find_sources_recursive(path, '.txt', 'icdar_word_recognition')
class IcdarTextLocalizationImporter(Importer):
@classmethod
def find_sources(cls, path):
return cls._find_sources_recursive(path, '', 'icdar_text_localization')
class IcdarTextSegmentationImporter(Importer):
@classmethod
def find_sources(cls, path):
return cls._find_sources_recursive(path, '', 'icdar_text_segmentation')
| 38.040741
| 86
| 0.489242
|
d4a8a2d9facc33082b8449beec0957f3ceb1b91c
| 25,100
|
py
|
Python
|
electrum_ltc/base_wizard.py
|
vivekteega/electrum-flo
|
8278132246b2dc9a253ec94b97de746771ef01be
|
[
"MIT"
] | null | null | null |
electrum_ltc/base_wizard.py
|
vivekteega/electrum-flo
|
8278132246b2dc9a253ec94b97de746771ef01be
|
[
"MIT"
] | null | null | null |
electrum_ltc/base_wizard.py
|
vivekteega/electrum-flo
|
8278132246b2dc9a253ec94b97de746771ef01be
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import traceback
from functools import partial
from . import bitcoin
from . import keystore
from .keystore import bip44_derivation, purpose48_derivation
from .wallet import Imported_Wallet, Standard_Wallet, Multisig_Wallet, wallet_types, Wallet
from .storage import STO_EV_USER_PW, STO_EV_XPUB_PW, get_derivation_used_for_hw_device_encryption
from .i18n import _
from .util import UserCancelled, InvalidPassword, WalletFileException
# hardware device setup purpose
HWD_SETUP_NEW_WALLET, HWD_SETUP_DECRYPT_WALLET = range(0, 2)
class ScriptTypeNotSupported(Exception): pass
class GoBack(Exception): pass
class BaseWizard(object):
def __init__(self, config, plugins, storage):
super(BaseWizard, self).__init__()
self.config = config
self.plugins = plugins
self.storage = storage
self.wallet = None
self.stack = []
self.plugin = None
self.keystores = []
self.is_kivy = config.get('gui') == 'kivy'
self.seed_type = None
def set_icon(self, icon):
pass
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
f(self, *args)
elif hasattr(self, action):
f = getattr(self, action)
f(*args)
else:
raise Exception("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Create") + ' ' + name
message = '\n'.join([
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Import Litecoin addresses or private keys")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def upgrade_storage(self):
exc = None
def on_finished():
if exc is None:
self.wallet = Wallet(self.storage)
self.terminate()
else:
raise exc
def do_upgrade():
nonlocal exc
try:
self.storage.upgrade()
except Exception as e:
exc = e
self.waiting_dialog(do_upgrade, _('Upgrading wallet format...'), on_finished=on_finished)
def load_2fa(self):
self.storage.put('wallet_type', '2fa')
self.storage.put('use_trustedcoin', True)
self.plugin = self.plugins.load_plugin('trustedcoin')
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.load_2fa()
action = self.storage.get_action()
elif choice == 'imported':
action = 'import_addresses_or_keys'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
i = len(self.keystores)
title = _('Add cosigner') + ' (%d of %d)'%(i+1, self.n) if self.wallet_type=='multisig' else _('Keystore')
if self.wallet_type =='standard' or i==0:
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_from_seed', _('I already have a seed')),
('restore_from_key', _('Use a master key')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Use a hardware device')))
else:
message = _('Add a cosigner to your multi-sig wallet')
choices = [
('restore_from_key', _('Enter cosigner key')),
('restore_from_seed', _('Enter cosigner seed')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Cosign with hardware device')))
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def import_addresses_or_keys(self):
v = lambda x: keystore.is_address_list(x) or keystore.is_private_key_list(x)
title = _("Import Litecoin Addresses")
message = _("Enter a list of Litecoin addresses (this will create a watching-only wallet), or a list of private keys.")
self.add_xpub_dialog(title=title, message=message, run_next=self.on_import,
is_valid=v, allow_multi=True, show_wif_help=True)
def on_import(self, text):
# create a temporary wallet and exploit that modifications
# will be reflected on self.storage
if keystore.is_address_list(text):
w = Imported_Wallet(self.storage)
for x in text.split():
w.import_address(x)
elif keystore.is_private_key_list(text):
k = keystore.Imported_KeyStore({})
self.storage.put('keystore', k.dump())
w = Imported_Wallet(self.storage)
for x in keystore.get_private_keys(text):
w.import_private_key(x, None)
self.keystores.append(w.keystore)
else:
return self.terminate()
return self.run('create_wallet')
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_master_key
title = _("Create keystore from a master key")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub/ypub/zpub)."),
_("To create a spending wallet, please enter a master private key (xprv/yprv/zprv).")
])
self.add_xpub_dialog(title=title, message=message, run_next=self.on_restore_from_key, is_valid=v)
else:
i = len(self.keystores) + 1
self.add_cosigner_dialog(index=i, run_next=self.on_restore_from_key, is_valid=keystore.is_bip32_key)
def on_restore_from_key(self, text):
k = keystore.from_master_key(text)
self.on_keystore(k)
def choose_hw_device(self, purpose=HWD_SETUP_NEW_WALLET):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device(purpose))
return
# scan devices
devices = []
devmgr = self.plugins.device_manager
try:
scanned_devices = devmgr.scan_devices()
except BaseException as e:
devmgr.print_error('error scanning devices: {}'.format(e))
debug_msg = ' {}:\n {}'.format(_('Error scanning devices'), e)
else:
debug_msg = ''
for name, description, plugin in support:
try:
# FIXME: side-effect: unpaired_device_info sets client.handler
u = devmgr.unpaired_device_infos(None, plugin, devices=scanned_devices)
except BaseException as e:
devmgr.print_error('error getting device infos for {}: {}'.format(name, e))
debug_msg += ' {}:\n {}\n'.format(plugin.name, e)
continue
devices += list(map(lambda x: (name, x), u))
if not debug_msg:
debug_msg = ' {}'.format(_('No exceptions encountered.'))
if not devices:
msg = ''.join([
_('No hardware device detected.') + '\n',
_('To trigger a rescan, press \'Next\'.') + '\n\n',
_('If your device is not detected on Windows, go to "Settings", "Devices", "Connected devices", and do "Remove device". Then, plug your device again.') + ' ',
_('On Linux, you might have to add a new permission to your udev rules.') + '\n\n',
_('Debug message') + '\n',
debug_msg
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device(purpose))
return
# select device
self.devices = devices
choices = []
for name, info in devices:
state = _("initialized") if info.initialized else _("wiped")
label = info.label or _("An unnamed {}").format(name)
descr = "%s [%s, %s]" % (label, name, state)
choices.append(((name, info), descr))
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next= lambda *args: self.on_device(*args, purpose=purpose))
def on_device(self, name, device_info, *, purpose):
self.plugin = self.plugins.get_plugin(name)
try:
self.plugin.setup_device(device_info, self, purpose)
except OSError as e:
self.show_error(_('We encountered an error while connecting to your device:')
+ '\n' + str(e) + '\n'
+ _('To try to fix this, we will now re-pair with your device.') + '\n'
+ _('Please try again.'))
devmgr = self.plugins.device_manager
devmgr.unpair_id(device_info.device.id_)
self.choose_hw_device(purpose)
return
except (UserCancelled, GoBack):
self.choose_hw_device(purpose)
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
self.choose_hw_device(purpose)
return
if purpose == HWD_SETUP_NEW_WALLET:
def f(derivation, script_type):
self.run('on_hw_derivation', name, device_info, derivation, script_type)
self.derivation_and_script_type_dialog(f)
elif purpose == HWD_SETUP_DECRYPT_WALLET:
derivation = get_derivation_used_for_hw_device_encryption()
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, 'standard', self)
password = keystore.Xpub.get_pubkey_from_xpub(xpub, ())
try:
self.storage.decrypt(password)
except InvalidPassword:
# try to clear session so that user can type another passphrase
devmgr = self.plugins.device_manager
client = devmgr.client_by_id(device_info.device.id_)
if hasattr(client, 'clear_session'): # FIXME not all hw wallet plugins have this
client.clear_session()
raise
else:
raise Exception('unknown purpose: %s' % purpose)
def derivation_and_script_type_dialog(self, f):
message1 = _('Choose the type of addresses in your wallet.')
message2 = '\n'.join([
_('You can override the suggested derivation path.'),
_('If you are not sure what this is, leave this field unchanged.')
])
if self.wallet_type == 'multisig':
# There is no general standard for HD multisig.
# For legacy, this is partially compatible with BIP45; assumes index=0
# For segwit, a custom path is used, as there is no standard at all.
choices = [
('standard', 'legacy multisig (p2sh)', "m/45'/0"),
('p2wsh-p2sh', 'p2sh-segwit multisig (p2wsh-p2sh)', purpose48_derivation(0, xtype='p2wsh-p2sh')),
('p2wsh', 'native segwit multisig (p2wsh)', purpose48_derivation(0, xtype='p2wsh')),
]
else:
choices = [
('standard', 'legacy (p2pkh)', bip44_derivation(0, bip43_purpose=44)),
('p2wpkh-p2sh', 'p2sh-segwit (p2wpkh-p2sh)', bip44_derivation(0, bip43_purpose=49)),
('p2wpkh', 'native segwit (p2wpkh)', bip44_derivation(0, bip43_purpose=84)),
]
while True:
try:
self.choice_and_line_dialog(
run_next=f, title=_('Script type and Derivation path'), message1=message1,
message2=message2, choices=choices, test_text=bitcoin.is_bip32_derivation)
return
except ScriptTypeNotSupported as e:
self.show_error(e)
# let the user choose again
def on_hw_derivation(self, name, device_info, derivation, xtype):
from .keystore import hardware_keystore
try:
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, xtype, self)
except ScriptTypeNotSupported:
raise # this is handled in derivation_dialog
except BaseException as e:
self.show_error(e)
return
d = {
'type': 'hardware',
'hw_type': name,
'derivation': derivation,
'xpub': xpub,
'label': device_info.label,
}
k = hardware_keystore(d)
self.on_keystore(k)
def passphrase_dialog(self, run_next, is_restoring=False):
title = _('Seed extension')
message = '\n'.join([
_('You may extend your seed with custom words.'),
_('Your seed extension must be saved together with your seed.'),
])
warning = '\n'.join([
_('Note that this is NOT your encryption password.'),
_('If you do not know what this is, leave this field empty.'),
])
warn_issue4566 = is_restoring and self.seed_type == 'bip39'
self.line_dialog(title=title, message=message, warning=warning,
default='', test=lambda x:True, run_next=run_next,
warn_issue4566=warn_issue4566)
def restore_from_seed(self):
self.opt_bip39 = True
self.opt_ext = True
is_cosigning_seed = lambda x: bitcoin.seed_type(x) in ['standard', 'segwit']
test = bitcoin.is_seed if self.wallet_type == 'standard' else is_cosigning_seed
self.restore_seed_dialog(run_next=self.on_restore_seed, test=test)
def on_restore_seed(self, seed, is_bip39, is_ext):
self.seed_type = 'bip39' if is_bip39 else bitcoin.seed_type(seed)
if self.seed_type == 'bip39':
f = lambda passphrase: self.on_restore_bip39(seed, passphrase)
self.passphrase_dialog(run_next=f, is_restoring=True) if is_ext else f('')
elif self.seed_type in ['standard', 'segwit']:
f = lambda passphrase: self.run('create_keystore', seed, passphrase)
self.passphrase_dialog(run_next=f, is_restoring=True) if is_ext else f('')
elif self.seed_type == 'old':
self.run('create_keystore', seed, '')
elif self.seed_type == '2fa':
self.load_2fa()
self.run('on_restore_seed', seed, is_ext)
else:
raise Exception('Unknown seed type', self.seed_type)
def on_restore_bip39(self, seed, passphrase):
def f(derivation, script_type):
self.run('on_bip43', seed, passphrase, derivation, script_type)
self.derivation_and_script_type_dialog(f)
def create_keystore(self, seed, passphrase):
k = keystore.from_seed(seed, passphrase, self.wallet_type == 'multisig')
self.on_keystore(k)
def on_bip43(self, seed, passphrase, derivation, script_type):
k = keystore.from_bip39_seed(seed, passphrase, derivation, xtype=script_type)
self.on_keystore(k)
def on_keystore(self, k):
has_xpub = isinstance(k, keystore.Xpub)
if has_xpub:
from .bitcoin import xpub_type
t1 = xpub_type(k.xpub)
if self.wallet_type == 'standard':
if has_xpub and t1 not in ['standard', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_error(_('Wrong key type') + ' %s'%t1)
self.run('choose_keystore')
return
self.keystores.append(k)
self.run('create_wallet')
elif self.wallet_type == 'multisig':
assert has_xpub
if t1 not in ['standard', 'p2wsh', 'p2wsh-p2sh']:
self.show_error(_('Wrong key type') + ' %s'%t1)
self.run('choose_keystore')
return
if k.xpub in map(lambda x: x.xpub, self.keystores):
self.show_error(_('Error: duplicate master public key'))
self.run('choose_keystore')
return
if len(self.keystores)>0:
t2 = xpub_type(self.keystores[0].xpub)
if t1 != t2:
self.show_error(_('Cannot add this cosigner:') + '\n' + "Their key type is '%s', we are '%s'"%(t1, t2))
self.run('choose_keystore')
return
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
self.run('create_wallet')
def create_wallet(self):
encrypt_keystore = any(k.may_have_password() for k in self.keystores)
# note: the following condition ("if") is duplicated logic from
# wallet.get_available_storage_encryption_version()
if self.wallet_type == 'standard' and isinstance(self.keystores[0], keystore.Hardware_KeyStore):
# offer encrypting with a pw derived from the hw device
k = self.keystores[0]
try:
k.handler = self.plugin.create_handler(self)
password = k.get_password_for_storage_encryption()
except UserCancelled:
devmgr = self.plugins.device_manager
devmgr.unpair_xpub(k.xpub)
self.choose_hw_device()
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
self.request_storage_encryption(
run_next=lambda encrypt_storage: self.on_password(
password,
encrypt_storage=encrypt_storage,
storage_enc_version=STO_EV_XPUB_PW,
encrypt_keystore=False))
else:
# prompt the user to set an arbitrary password
self.request_password(
run_next=lambda password, encrypt_storage: self.on_password(
password,
encrypt_storage=encrypt_storage,
storage_enc_version=STO_EV_USER_PW,
encrypt_keystore=encrypt_keystore),
force_disable_encrypt_cb=not encrypt_keystore)
def on_password(self, password, *, encrypt_storage,
storage_enc_version=STO_EV_USER_PW, encrypt_keystore):
self.storage.set_keystore_encryption(bool(password) and encrypt_keystore)
if encrypt_storage:
self.storage.set_password(password, enc_version=storage_enc_version)
for k in self.keystores:
if k.may_have_password():
k.update_password(None, password)
if self.wallet_type == 'standard':
self.storage.put('seed_type', self.seed_type)
keys = self.keystores[0].dump()
self.storage.put('keystore', keys)
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'imported':
if len(self.keystores) > 0:
keys = self.keystores[0].dump()
self.storage.put('keystore', keys)
self.wallet = Imported_Wallet(self.storage)
self.wallet.storage.write()
self.terminate()
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def choose_seed_type(self):
title = _('Choose Seed type')
message = ' '.join([
_("The type of addresses used by your wallet will depend on your seed."),
_("Segwit wallets use bech32 addresses, defined in BIP173."),
_("Please note that websites and other wallets may not support these addresses yet."),
_("Thus, you might want to keep using a non-segwit wallet in order to be able to receive litecoins during the transition period.")
])
choices = [
('create_standard_seed', _('Standard')),
('create_segwit_seed', _('Segwit')),
]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def create_segwit_seed(self): self.create_seed('segwit')
def create_standard_seed(self): self.create_seed('standard')
def create_seed(self, seed_type):
from . import mnemonic
self.seed_type = seed_type
seed = mnemonic.Mnemonic('en').make_seed(self.seed_type)
self.opt_bip39 = False
f = lambda x: self.request_passphrase(seed, x)
self.show_seed_dialog(run_next=f, seed_text=seed)
def request_passphrase(self, seed, opt_passphrase):
if opt_passphrase:
f = lambda x: self.confirm_seed(seed, x)
self.passphrase_dialog(run_next=f)
else:
self.run('confirm_seed', seed, '')
def confirm_seed(self, seed, passphrase):
f = lambda x: self.confirm_passphrase(seed, passphrase)
self.confirm_seed_dialog(run_next=f, test=lambda x: x==seed)
def confirm_passphrase(self, seed, passphrase):
f = lambda x: self.run('create_keystore', seed, x)
if passphrase:
title = _('Confirm Seed Extension')
message = '\n'.join([
_('Your seed extension must be saved together with your seed.'),
_('Please type it here.'),
])
self.line_dialog(run_next=f, title=title, message=message, default='', test=lambda x: x==passphrase)
else:
f('')
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait...")
self.waiting_dialog(task, msg)
| 43.957968
| 174
| 0.594542
|
8ce8f6d4728baf847887814122e864445c89a24c
| 3,208
|
py
|
Python
|
extractm5.py
|
zohan180/gem5_tca
|
97102622927dbcce4cf4e43268780eadbbc1e31e
|
[
"BSD-3-Clause"
] | null | null | null |
extractm5.py
|
zohan180/gem5_tca
|
97102622927dbcce4cf4e43268780eadbbc1e31e
|
[
"BSD-3-Clause"
] | null | null | null |
extractm5.py
|
zohan180/gem5_tca
|
97102622927dbcce4cf4e43268780eadbbc1e31e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
walk_dir = sys.argv[1]
print('walk_dir = ' + walk_dir)
stringlist=[]
#stringlist.append("system.cpu.iq.fu_full::FloatMult")
stringlist.append("Multiplied 123 submatrices")
# stringlist.append("system.cpu.rob.rob_Access_Usage::mean")
# stringlist.append("system.cpu.rename.serializeStallCycles")
# stringlist.append("system.cpu.ipc_total")
# stringlist.append("system.cpu.dcache.ReadReq_misses::cpu.data")
#stringlist.append("system.cpu.committedInst")
# stringlist.append("system.cpu.fetch.icacheStallCycles")
# stringlist.append("system.cpu.iq.issued_per_cycle::mean")
# stringlist.append("system.cpu.icache.overall_miss_latency::total")
#string1="sdfsdflkasjdflkajsdf;laksjdfa;kdsf"
#string2="sdfsdflkasjdflkajsdf;laksjdfa;kdsf"
#string3="sdfsdflkasjdflkajsdf;laksjdfa;kdsf"
#string4="sdfsdflkasjdflkajsdf;laksjdfa;kdsf"
#string5="sdfsdflkasjdflkajsdf;laksjdfa;kdsf"
#string6="sdfsdflkasjdflkajsdf;laksjdfa;kdsf"
#string1="sim_insts"
#string2="sim_ticks"
#string3="system.cpu.commit.branchMispredicts"
#string4="system.cpu.dcache.ReadReq_misses::cpu.data"
#string5="system.cpu.iq.FU_type_0::FloatMult"
#string6="system.cpu.iq.FU_type_0::FloatAdd"
# If your current working directory may change during script execution, it's recommended to
# immediately convert program arguments to an absolute path. Then the variable root below will
# be an absolute path as well. Example:
# walk_dir = os.path.abspath(walk_dir)
#print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
list_file_path = os.path.join(walk_dir, 'm5out_stats.csv')
open(list_file_path, 'w').close()
beenhere=[]
statscollected=[]
for root, subdirs, files in os.walk(walk_dir):
#print('--\nroot = ' + root)
#print('list_file_path = ' + list_file_path)
with open(list_file_path, 'a') as list_file:
#list_file.truncate(0)
#for subdir in subdirs:
#print('\t- subdirectory ' + subdir)
for filename in files:
file_path = os.path.join(root, filename)
# print (str(file_path))
if filename == "condor_out":
print("found condor_out in " + root)
if root in beenhere:
beenhere.append(root)
else:
beenhere.append(root)
#list_file.write("\n" + root[root.find("/", root.find("/") + 1)+8:] + ",")
list_file.write("\n" + root[root.find("/",4):]+ ",")
#print('\t- file %s (full path: %s)' % (filename, file_path))
with open(file_path, 'rb') as f:
#print("YAY! I'M PRINTING! to " + list_file.name)
for line in f:
for stringelm in stringlist:
# #list_file.write(('The file %s contains:\n' % filename).encode('utf-8'))
if (line.find(stringelm) != -1):
# list_file.write(line.split()[1] + ",")
for(s) in line.split():
if (s.isdigit()):
list_file.write( str(s) + ",")
# if stringelm not in statscollected:
# statscollected.append(stringelm) #add element in found order
print(line)
with open(list_file_path, 'a') as f:
for line in statscollected:
f.write(line+",")
| 36.873563
| 138
| 0.65586
|
07de0baf8d9090ba259060f3e58b38fdd4b16883
| 1,245
|
py
|
Python
|
test/it/fakebintray.py
|
uk-gov-mirror/hmrc.service-manager
|
6d6fcc4a1d25d094e7d0c798fc4219cf20a6f234
|
[
"Apache-2.0"
] | 52
|
2015-02-04T21:09:11.000Z
|
2022-02-23T15:48:06.000Z
|
test/it/fakebintray.py
|
uk-gov-mirror/hmrc.service-manager
|
6d6fcc4a1d25d094e7d0c798fc4219cf20a6f234
|
[
"Apache-2.0"
] | 57
|
2015-01-08T10:17:16.000Z
|
2021-11-18T14:56:51.000Z
|
test/it/fakebintray.py
|
uk-gov-mirror/hmrc.service-manager
|
6d6fcc4a1d25d094e7d0c798fc4219cf20a6f234
|
[
"Apache-2.0"
] | 50
|
2015-02-02T16:12:13.000Z
|
2021-11-13T09:19:32.000Z
|
#!/usr/bin/python
from bottle import route, run, request
from bottle import static_file
MAVEN_METADATA = """<?xml version="1.0" encoding="UTF-8"?>
<metadata>
<groupId>uk.gov.hmrc</groupId>
<artifactId>help-frontend_2.11</artifactId>
<version>1.26.0-3-gd7ed03c</version>
<versioning>
<latest>1.26.0-3-gd7ed03c</latest>
<release>1.26.0-3-gd7ed03c</release>
<versions>
<version>1.26.0-1-gd0dba7c</version>
<version>1.26.0-2-gd213a4f</version>
<version>1.26.0-3-gd7ed03c</version>
</versions>
<lastUpdated>20150804143826</lastUpdated>
</versioning>
</metadata>"""
@route("/ping")
def ping():
return "pong"
@route("/hmrc/release-candidates/uk/gov/hmrc/playtest_2.11/maven-metadata.xml")
def maven_metadata():
return MAVEN_METADATA
@route("/hmrc/release-candidates/uk/gov/hmrc/playtest_2.11/1.26.0-3-gd7ed03c/playtest_2.11-1.26.0-3-gd7ed03c.tgz")
def server_static_tgz():
return static_file("bintray/playtest.tgz", root="./static/")
@route("/hmrc/release-candidates/uk/gov/hmrc/playtest_2.11/1.26.0-3-gd7ed03c/playtest_2.11-1.26.0-3-gd7ed03c.tgz.md5")
def server_static_md5():
return static_file("bintray/playtest.tgz.md5", root="./static/")
run(host="localhost", port=8061)
| 27.666667
| 118
| 0.704418
|
b60a588279aa6608775edf8b7311008f37d193e8
| 3,231
|
py
|
Python
|
extensions/rules/tar_file_string.py
|
aldeka/oppia
|
aead304c95a282c9ca8035bc25c4794864d07578
|
[
"Apache-2.0"
] | 3
|
2015-01-10T23:45:23.000Z
|
2015-02-17T10:46:08.000Z
|
extensions/rules/tar_file_string.py
|
aldeka/oppia
|
aead304c95a282c9ca8035bc25c4794864d07578
|
[
"Apache-2.0"
] | null | null | null |
extensions/rules/tar_file_string.py
|
aldeka/oppia
|
aead304c95a282c9ca8035bc25c4794864d07578
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for TarFileString."""
__author__ = 'Tarashish Mishra'
from extensions.rules import base
class ChecksWrapperDirName(base.TarFileStringRule):
description = 'does not have a wrapper directory named {{x|UnicodeString}}'
is_generic = True
def _evaluate(self, subject):
members = subject.getmembers()
for member in members:
if '/' not in member.name:
return member.isdir() and member.name != self.x
return True
class ChecksWrapperDirPresence(base.TarFileStringRule):
description = 'does not have a wrapper directory.'
is_generic = True
def _evaluate(self, subject):
members = subject.getmembers()
for member in members:
if '/' not in member.name:
return not member.isdir()
return True
class HasAppleDoubleFile(base.TarFileStringRule):
description = 'contains an Apple Double file.'
is_generic = True
def _evaluate(self, subject):
members = subject.getmembers()
for member in members:
if '/._' in member.name:
return True
return False
class HasUnexpectedFile(base.TarFileStringRule):
description = 'contains a file not present in {{expected_files|List}}.'
is_generic = True
def _evaluate(self, subject):
members = subject.getmembers()
for member in members:
if member.name not in self.expected_files:
return True
return False
class MissingExpectedFile(base.TarFileStringRule):
description = 'omits one or more files in {{expected_files|List}}.'
is_generic = True
def _evaluate(self, subject):
members = subject.getmembers()
member_list = []
for member in members:
if member.name in self.expected_files:
member_list.append(member.name)
return bool(
set(self.expected_files) - set(member_list)
)
class HasUnexpectedContent(base.TarFileStringRule):
description = 'contains file not present in {{file_list|List}}.'
is_generic = True
def _evaluate(self, subject):
members = subject.getmembers()
for member in members:
if member.isfile():
filename = member.name.split('/')[-1]
if filename in self.file_list:
subj_contents = subject.extractfile(member).read()
expected_contents = self.fs.get(filename).decode('utf-8')
if subj_contents != expected_contents:
return True
return False
| 31.676471
| 79
| 0.648406
|
19341549f97f506e1ec83003e62998a371bf7e10
| 1,621
|
py
|
Python
|
examples/rnp_gen.py
|
pernici/hobj
|
ccd3e49dd3c7eaf656de9b3f8f041c687f824be6
|
[
"BSD-3-Clause"
] | 1
|
2020-04-18T18:33:27.000Z
|
2020-04-18T18:33:27.000Z
|
examples/rnp_gen.py
|
pernici/hobj
|
ccd3e49dd3c7eaf656de9b3f8f041c687f824be6
|
[
"BSD-3-Clause"
] | null | null | null |
examples/rnp_gen.py
|
pernici/hobj
|
ccd3e49dd3c7eaf656de9b3f8f041c687f824be6
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
sys.path.insert(0,'../src')
from hobj import Hobj
from densearith import dup_add
from compatibility import itervalues
def get_sum(p, K):
"""
nv from p for rectangle non periodic
"""
nv = []
for v in itervalues(p):
nv = dup_add(nv, v, K)
return nv
def nv_r_nx_ny_np_rec(ny, K):
"""
generator of ``(nx, ny, nv)``, where `ny` is the input argument;
``nv`` is the matching generating polynomial of the grid ``(nx, ny)``
with open boundary conditions.
"""
# initial graph
N = 10000
p = {0: [K.one]}
hb = Hobj()
for i, j in [(i,i+1) for i in range(ny - 1)]:
p = hb.iadd_object(p, 1, [i, j], [], K)
for i in range(ny):
p = hb.iadd_object(p, 1, [i, i + ny], [i], K)
for i, j in [(k, k+1) for k in range(ny, 2*ny - 1)]:
p = hb.iadd_object(p, 1, [i, j], [], K)
n = 2*ny
for ii in range(N + 1):
# strip
for k in range(n - ny, n):
p = hb.iadd_object(p, 1, [k, k + ny], [k], K)
for i, j in [(k, k+1) for k in range(n, n + ny - 1)]:
p = hb.iadd_object(p, 1, [i, j], [], K)
n += ny
# closure
nv = get_sum(p, K)
yield n//ny, ny, nv
def test1():
import sys
from domains import ZZ as K
from time import time
try:
ny = int(sys.argv[1])
except:
print('prog ny')
sys.exit()
it = nv_r_nx_ny_np_rec(ny, K)
for n1, n2, nv in it:
print('n1=%d n2=%d nv=%s' %(n1, n2, nv))
if n1 >= ny:
break
if __name__ == '__main__':
print('rnp_gen:')
test1()
| 23.157143
| 73
| 0.502776
|
bc12113e874219b18176a585f6b260ba41a9da80
| 924
|
py
|
Python
|
scripts/plot_primitives.py
|
personalrobotics/herbpy
|
ab48e9190b061759b31bc9c879a7f96a51d975f5
|
[
"BSD-3-Clause"
] | 4
|
2017-03-04T06:18:21.000Z
|
2019-01-04T08:03:41.000Z
|
scripts/plot_primitives.py
|
personalrobotics/herbpy
|
ab48e9190b061759b31bc9c879a7f96a51d975f5
|
[
"BSD-3-Clause"
] | 87
|
2015-01-30T03:50:35.000Z
|
2017-02-20T18:55:42.000Z
|
scripts/plot_primitives.py
|
personalrobotics/herbpy
|
ab48e9190b061759b31bc9c879a7f96a51d975f5
|
[
"BSD-3-Clause"
] | 10
|
2015-07-29T13:13:05.000Z
|
2019-02-13T22:11:24.000Z
|
#!/usr/bin/env python
import argparse, yaml
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot the primitives in a primitives file")
parser.add_argument('--filename', type=str, default='base_planner_parameters.yaml')
args = parser.parse_args()
with open(args.filename, 'r') as f:
doc = yaml.load(f.read())
actions = doc['actions']
for angle in actions:
ang = angle['angle']
primitives = angle['primitives']
xvals = []
yvals = []
tvals = []
for primitive in primitives:
poses = primitive['poses']
for coord in poses:
xvals.append(coord[0])
yvals.append(coord[1])
tvals.append(coord[2])
plt.plot(xvals, yvals, '.b')
title_str = 'Angle %d' % ang
plt.title(title_str)
plt.show()
| 28
| 92
| 0.577922
|
48e1224189be745b9783db0fe288fc49aaaa514d
| 7,490
|
py
|
Python
|
onlineassessmentsystem/submissions/views.py
|
jwalit21/SDP_Online_Assessment_System
|
a778a0e0ae264fe74037a5f0b210d205ebc18d98
|
[
"MIT"
] | 1
|
2021-03-05T12:28:36.000Z
|
2021-03-05T12:28:36.000Z
|
onlineassessmentsystem/submissions/views.py
|
jwalit21/SDP_Online_Assessment_System
|
a778a0e0ae264fe74037a5f0b210d205ebc18d98
|
[
"MIT"
] | null | null | null |
onlineassessmentsystem/submissions/views.py
|
jwalit21/SDP_Online_Assessment_System
|
a778a0e0ae264fe74037a5f0b210d205ebc18d98
|
[
"MIT"
] | null | null | null |
import datetime
import json
import os
from urllib.parse import urlencode
import pytz
import requests
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import JsonResponse
from django.shortcuts import render
from django.utils import timezone
from classroom.models import ClassroomStudents
from problem.models import TestCase, Problem
from users.models import User
from .models import Submission
utc = pytz.UTC
loginRedirectMessage = urlencode({'msg': 'Please Login'})
'''
Function for Role based authorization of Problem; upon provided the pid to the request parameter
'''
def customRoleBasedProblemAuthorization(request, problem, isItLab):
user = request.user
# If Faculty hasn't created classroom
# or Student is not enrolled to the classroom
if user.isStudent:
try:
if isItLab:
classroomStudents = ClassroomStudents.objects.get(student=user, classroom=problem.lab.classroom)
else:
classroomStudents = ClassroomStudents.objects.get(student=user, classroom=problem.contest.classroom)
except ObjectDoesNotExist:
return False
else:
if ((isItLab and problem.lab.classroom.user != user) or (
not isItLab and problem.contest.classroom.user != user)):
return False
return True
'''Function to Compile code using API'''
def compileCode(code, stdIn):
data = {
'script': code,
'language': 'c',
'versionIndex': '4',
'clientId': settings.API_CLIENT_ID,
'clientSecret': settings.API_CLIENT_SECRET,
'stdin': stdIn
}
url = 'https://api.jdoodle.com/v1/execute'
r = requests.post(url, data=json.dumps(data), headers={"content-type": "application/json"})
r = r.json()
if 'error' in r:
return r['error']
else:
return r['output']
'''Function to generate file upload Path'''
def getSubmissionFilePath(request, user, problem):
fileNumber = Submission.objects.filter(user=user, problem=problem).count() + 1
return "submissions/" + user.username + "/" + str(problem.problemId) + "/code_" + str(fileNumber) + ".c"
'''Function to compare code's output with expected output of test case'''
def compareOutput(codeOutput, testcaseOutput):
if len(codeOutput) != len(testcaseOutput):
return False
for i in range(len(codeOutput)):
if codeOutput[i] != testcaseOutput[i]:
return False
return True
@login_required(login_url='/users/login?' + loginRedirectMessage)
def runCode(request):
code = request.GET.get('code')
stdin = request.GET.get('stdin')
output = compileCode(code, stdin)
return JsonResponse({"output": output})
'''Function to submit code'''
@login_required(login_url='/users/login?' + loginRedirectMessage)
def submitCode(request, update=False, submission=None):
code = request.GET.get('code')
problemId = request.GET.get('problemId')
problem = Problem.objects.get(problemId=problemId)
filePath = getSubmissionFilePath(request, request.user, problem)
uploadDirectory = settings.MEDIA_ROOT
try:
file = open(os.path.join(uploadDirectory, filePath), 'w')
except FileNotFoundError:
os.makedirs(os.path.dirname(os.path.join(uploadDirectory, filePath)))
file = open(os.path.join(uploadDirectory, filePath), 'w')
file.write(code)
file.close()
testCases = TestCase.objects.filter(problem=problem)
testCasesPassed = 0
totalTestCases = len(testCases)
for testCase in testCases:
fpInput = open(os.path.join(settings.BASE_DIR, testCase.inputFile.url[1:]), "r")
stdin = fpInput.read()
fpInput.close()
output = compileCode(code, stdin)
fpOutput = open(os.path.join(settings.BASE_DIR, testCase.outputFile.url[1:]), "r")
if compareOutput(output, fpOutput.read()):
testCasesPassed += 1
fpOutput.close()
score = int(testCasesPassed / totalTestCases * problem.points)
if update:
submission.score = score
submission.save()
return
submission = Submission(problem_id=problemId, status=True, submissionTime=datetime.date.today(), user=request.user,
score=score,
filePath=filePath)
submission.save()
return JsonResponse({"passed": testCasesPassed, 'total': totalTestCases, 'score': score})
'''Function To display List of submissions'''
@login_required(login_url='/users/login?' + loginRedirectMessage)
def list(request):
problemId = request.GET.get('problemId')
page = request.GET.get('page', 1)
if problemId is None:
return render(request, '404.html')
try:
problem = Problem.objects.get(problemId=problemId)
except ObjectDoesNotExist:
return render(request, '404.html')
if not customRoleBasedProblemAuthorization(request, problem, not problem.doesBelongToContest):
return render(request, 'accessDenied.html')
username = request.GET.get("username")
isOver = False
user = request.user
if problem.doesBelongToContest:
if timezone.now() >= problem.contest.endTime:
isOver = True
else:
if timezone.now() >= problem.lab.deadline:
isOver = True
if username is not None:
try:
user = User.objects.get(username=username)
submissionsList = Submission.objects.filter(problem=problem, user=user)
except ObjectDoesNotExist:
submissionsList = []
else:
submissionsList = Submission.objects.filter(problem=problem)
paginator = Paginator(submissionsList, 10)
try:
submissions = paginator.page(page)
except PageNotAnInteger:
submissions = paginator.page(1)
except EmptyPage:
submissions = paginator.page(paginator.num_pages)
return render(request, 'submissions/list.html',
{'problem': problem, 'submissions': submissions, "username": username, 'isOver': isOver,
'user': user})
@login_required(login_url='/users/login?' + loginRedirectMessage)
def view(request):
submissionId = request.GET.get('submissionId')
if submissionId is None:
return render(request, '404.html')
try:
submission = Submission.objects.get(submissionId=submissionId)
except ObjectDoesNotExist:
return render(request, '404.html')
if not customRoleBasedProblemAuthorization(request, submission.problem, not submission.problem.doesBelongToContest):
return render(request, 'accessDenied.html')
# Submission can be viewed by other participants only after contest is over
if submission.problem.doesBelongToContest:
if timezone.now() < submission.problem.contest.endTime and submission.user != request.user:
return render(request, 'accessDenied.html')
else:
if timezone.now() < submission.problem.lab.deadline and submission.user != request.user:
return render(request, 'accessDenied.html')
uploadDirectory = settings.MEDIA_ROOT
file = open(os.path.join(uploadDirectory, submission.filePath), "r")
code = file.read()
file.close()
return render(request, 'submissions/view.html', {'submission': submission, 'code': code})
| 33.587444
| 120
| 0.68024
|
db2d13ed1d2b15e81291da4ffc3868b9f6040fd9
| 17,609
|
py
|
Python
|
objectModel/Python/tests/cdm/projection/test_projection_add_supporting_attribute.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 884
|
2019-05-10T02:09:10.000Z
|
2022-03-31T14:02:00.000Z
|
objectModel/Python/tests/cdm/projection/test_projection_add_supporting_attribute.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 171
|
2019-06-10T11:34:37.000Z
|
2022-03-31T22:50:12.000Z
|
objectModel/Python/tests/cdm/projection/test_projection_add_supporting_attribute.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 340
|
2019-05-07T18:00:16.000Z
|
2022-03-31T12:00:15.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from typing import TYPE_CHECKING
from cdm.enums import CdmObjectType
from cdm.objectmodel import CdmCorpusDefinition, CdmEntityDefinition
from cdm.utilities import ResolveOptions, AttributeResolutionDirectiveSet
from tests.common import async_test
from tests.utilities.projection_test_utils import ProjectionTestUtils
if TYPE_CHECKING:
from cdm.objectmodel import CdmAttributeItem, CdmCollection
class ProjectionAddSupportingAttributeTest(unittest.TestCase):
"""A test class for testing the AddSupportingAttribute operation in a projection and in a resolution guidance"""
# All possible combinations of the different resolution directives
res_opts_combinations = [
[],
['referenceOnly'],
['normalized'],
['structured'],
['referenceOnly', 'normalized'],
['referenceOnly', 'structured'],
['referenceOnly', 'virtual'],
['normalized', 'structured'],
['normalized', 'structured', 'virtual'],
['referenceOnly', 'normalized', 'structured'],
['referenceOnly', 'normalized', 'structured', 'virtual']
]
# The path between test_data_path and test_name.
tests_subpath = os.path.join('Cdm', 'Projection', 'ProjectionAddSupportingAttributeTest')
@async_test
async def test_combine_ops_proj(self):
"""AddSupportingAttribute with replaceAsForeignKey operation in the same projection"""
test_name = 'test_combine_ops_proj'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, []) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
# Supporting attribute: 'PersonInfo_display', rename 'address' to 'homeAddress'
self.assertEqual(7, len(resolved_entity.attributes))
self.assertEqual('name', resolved_entity.attributes[0].name)
self.assertEqual('age', resolved_entity.attributes[1].name)
self.assertEqual('homeAddress', resolved_entity.attributes[2].name)
self.assertEqual('phoneNumber', resolved_entity.attributes[3].name)
self.assertEqual('email', resolved_entity.attributes[4].name)
self.assertEqual('address', resolved_entity.attributes[5].name)
self.assertEqual('PersonInfo_display', resolved_entity.attributes[6].name)
self.validate_in_support_of_attribute(resolved_entity.attributes[6], 'email')
@async_test
async def test_conditional_proj(self):
"""Test AddAttributeGroup operation with a 'referenceOnly' and 'virtual' condition"""
test_name = 'test_conditional_proj'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly']) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
# Condition not met, don't include supporting attribute
self.assertEqual(5, len(resolved_entity.attributes))
self.assertEqual('name', resolved_entity.attributes[0].name)
self.assertEqual('age', resolved_entity.attributes[1].name)
self.assertEqual('address', resolved_entity.attributes[2].name)
self.assertEqual('phoneNumber', resolved_entity.attributes[3].name)
self.assertEqual('email', resolved_entity.attributes[4].name)
resolved_entity2 = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly', 'virtual']) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
# Condition met, include the supporting attribute
self.assertEqual(6, len(resolved_entity2.attributes))
self.assertEqual('name', resolved_entity2.attributes[0].name)
self.assertEqual('age', resolved_entity2.attributes[1].name)
self.assertEqual('address', resolved_entity2.attributes[2].name)
self.assertEqual('phoneNumber', resolved_entity2.attributes[3].name)
self.assertEqual('email', resolved_entity2.attributes[4].name)
self.assertEqual('PersonInfo_display', resolved_entity2.attributes[5].name)
self.validate_in_support_of_attribute(resolved_entity2.attributes[5], 'email')
@async_test
async def test_entity_attribute(self):
"""Test resolving an entity attribute using resolution guidance"""
test_name = 'test_entity_attribute'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly']) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
self.assertEqual(2, len(resolved_entity.attributes))
self.assertEqual('id', resolved_entity.attributes[0].name)
self.assertEqual('PersonInfo_display', resolved_entity.attributes[1].name)
self.validate_in_support_of_attribute(resolved_entity.attributes[1], 'id', False)
# Resolve without directives
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
self.assertEqual(6, len(resolved_entity.attributes))
self.assertEqual('name', resolved_entity.attributes[0].name)
self.assertEqual('age', resolved_entity.attributes[1].name)
self.assertEqual('address', resolved_entity.attributes[2].name)
self.assertEqual('phoneNumber', resolved_entity.attributes[3].name)
self.assertEqual('email', resolved_entity.attributes[4].name)
self.assertEqual('PersonInfo_display', resolved_entity.attributes[5].name)
self.validate_in_support_of_attribute(resolved_entity.attributes[5], 'email', False)
@async_test
async def test_entity_attribute_proj(self):
"""Test resolving an entity attribute with add supporting attribute operation"""
test_name = 'test_entity_attribute_proj'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly']) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
self.assertEqual(6, len(resolved_entity.attributes))
self.assertEqual('name', resolved_entity.attributes[0].name)
self.assertEqual('age', resolved_entity.attributes[1].name)
self.assertEqual('address', resolved_entity.attributes[2].name)
self.assertEqual('phoneNumber', resolved_entity.attributes[3].name)
self.assertEqual('email', resolved_entity.attributes[4].name)
self.assertEqual('PersonInfo_display', resolved_entity.attributes[5].name)
self.validate_in_support_of_attribute(resolved_entity.attributes[5], 'email')
@async_test
async def test_extends_entity(self):
"""addSupportingAttribute on an entity definition using resolution guidance"""
test_name = 'test_extends_entity'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
self.maxDiff = None
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, []) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
# Supporting attribute: 'PersonInfo_display' (using extendsEntityResolutionGuidance)
self.assertEqual(6, len(resolved_entity.attributes))
self.assertEqual('name', resolved_entity.attributes[0].name)
self.assertEqual('age', resolved_entity.attributes[1].name)
self.assertEqual('address', resolved_entity.attributes[2].name)
self.assertEqual('phoneNumber', resolved_entity.attributes[3].name)
self.assertEqual('email', resolved_entity.attributes[4].name)
self.assertEqual('PersonInfo_display', resolved_entity.attributes[5].name)
self.validate_in_support_of_attribute(resolved_entity.attributes[5], 'email', False)
@async_test
async def test_extends_entity_proj(self):
"""addSupportingAttribute on an entity definition"""
test_name = 'test_extends_entity_proj'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, []) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
# Supporting attribute: 'PersonInfo_display' (using extendsEntityResolutionGuidance)
self.assertEqual(6, len(resolved_entity.attributes))
self.assertEqual('name', resolved_entity.attributes[0].name)
self.assertEqual('age', resolved_entity.attributes[1].name)
self.assertEqual('address', resolved_entity.attributes[2].name)
self.assertEqual('phoneNumber', resolved_entity.attributes[3].name)
self.assertEqual('email', resolved_entity.attributes[4].name)
self.assertEqual('PersonInfo_display', resolved_entity.attributes[5].name)
self.validate_in_support_of_attribute(resolved_entity.attributes[5], 'email')
@async_test
async def test_nested_proj(self):
"""Nested replaceAsForeignKey with addSupporingAttribute"""
test_name = 'test_nested_proj'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly']) # type: CdmEntityDefinition
# Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email']
self.assertEqual(2, len(resolved_entity.attributes))
self.assertEqual('personId', resolved_entity.attributes[0].name)
self.assertEqual('PersonInfo_display', resolved_entity.attributes[1].name)
self.validate_in_support_of_attribute(resolved_entity.attributes[1], 'personId')
@async_test
async def test_nested_type_attribute_proj(self):
"""Test resolving a type attribute with a nested add supporting attribute operation"""
test_name = 'test_nested_t_a_proj'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly']) # type: CdmEntityDefinition
# Original set of attributes: ["PersonInfo"]
self.assertEqual(2, len(resolved_entity.attributes))
self.assertEqual('name', resolved_entity.attributes[0].name)
supporting_attribute = resolved_entity.attributes[1] # type: CdmTypeAttributeDefinition
self.assertEqual('name_display', supporting_attribute.name)
self.validate_in_support_of_attribute(supporting_attribute, 'name', False)
@async_test
async def test_type_attribute(self):
"""Test resolving a type attribute using resolution guidance"""
test_name = 'test_type_attribute'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['structured']) # type: CdmEntityDefinition
# Original set of attributes: ["PersonInfo"]
self.assertEqual(2, len(resolved_entity.attributes))
self.assertEqual('PersonInfo', resolved_entity.attributes[0].name)
supporting_attribute = resolved_entity.attributes[1] # type: CdmTypeAttributeDefinition
self.assertEqual('PersonInfo_display', supporting_attribute.name)
self.validate_in_support_of_attribute(supporting_attribute, 'PersonInfo', False)
@async_test
async def test_type_attribute_proj(self):
"""Test resolving a type attribute with an add supporting attribute operation"""
test_name = 'test_type_attribute_proj'
entity_name = 'NewPerson'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly']) # type: CdmEntityDefinition
# Original set of attributes: ["PersonInfo"]
self.assertEqual(2, len(resolved_entity.attributes))
self.assertEqual('PersonInfo', resolved_entity.attributes[0].name)
supporting_attribute = resolved_entity.attributes[1] # type: CdmTypeAttributeDefinition
self.assertEqual('PersonInfo_display', supporting_attribute.name)
self.validate_in_support_of_attribute(supporting_attribute, 'PersonInfo', False)
def validate_in_support_of_attribute(self, supporting_attribute: 'CdmAttributeItem', from_attribute: str, check_virtual_trait: bool = True):
"""Validates that the supporting attribute has the 'is.addedInSupportOf' and 'is.virtual.attribute' traits"""
in_support_of_trait = supporting_attribute.applied_traits.item('is.addedInSupportOf') # type: Optional[CdmTraitReference]
self.assertIsNotNone(in_support_of_trait)
self.assertEqual(1, len(in_support_of_trait.arguments))
self.assertEqual(from_attribute, in_support_of_trait.arguments[0].value)
if check_virtual_trait:
self.assertIsNotNone(supporting_attribute.applied_traits.item('is.virtual.attribute'), 'Missing is.virtual.attribute traits')
| 60.098976
| 147
| 0.734965
|
84cb8e0a9b6506e913fd8bd37fab9f2d7f8319fe
| 251
|
py
|
Python
|
Networking/Packets/Outgoing/ShowAllyShootPacket.py
|
dot1991/pyrelay
|
2f0a6e55c54938b1a75afea257d77f5e6a2d51c3
|
[
"MIT"
] | null | null | null |
Networking/Packets/Outgoing/ShowAllyShootPacket.py
|
dot1991/pyrelay
|
2f0a6e55c54938b1a75afea257d77f5e6a2d51c3
|
[
"MIT"
] | null | null | null |
Networking/Packets/Outgoing/ShowAllyShootPacket.py
|
dot1991/pyrelay
|
2f0a6e55c54938b1a75afea257d77f5e6a2d51c3
|
[
"MIT"
] | null | null | null |
class ShowAllyShootPacket:
def __init__(self):
self.type = "SHOWALLYSHOOT"
self.toggle = 0
def write(self, writer):
writer.writeInt32(self.toggle)
def read(self, reader):
self.toggle = reader.writeInt32()
| 22.818182
| 41
| 0.629482
|
e4fd0674ee69114a6cd2bdcda409f1d6750b9364
| 3,090
|
py
|
Python
|
app/db_methods.py
|
Stanley-Okwii/send-it-api
|
c86654e828e64b5f39db4ed5fad1e8889c14c6a2
|
[
"Apache-2.0"
] | null | null | null |
app/db_methods.py
|
Stanley-Okwii/send-it-api
|
c86654e828e64b5f39db4ed5fad1e8889c14c6a2
|
[
"Apache-2.0"
] | 1
|
2018-11-11T11:35:43.000Z
|
2018-11-11T11:35:43.000Z
|
app/db_methods.py
|
Stanley-Okwii/send-it-api
|
c86654e828e64b5f39db4ed5fad1e8889c14c6a2
|
[
"Apache-2.0"
] | 1
|
2018-11-11T11:32:06.000Z
|
2018-11-11T11:32:06.000Z
|
from app.models import DataModel
from datetime import datetime
from pytz import timezone
db_connect = DataModel()
cursor = db_connect.cursor
dictcur = db_connect.dict_cursor
def register_new_user(data):
"""registers a new user"""
query = "INSERT INTO users(username, email, password, role) \
VALUES('{0}', '{1}', '{2}', '{3}')".format(
data['username'],
data['email'],
data['password'],
data['role']
)
cursor.execute(query)
def update_user_account(data, email):
"""update user information"""
query = "UPDATE users SET username='{0}', password='{1}' WHERE email='{2}'".format(
data['username'],
data['password'],
email
)
cursor.execute(query)
def update_user_role_to_admin(data):
"""update user information"""
query = "UPDATE users SET role='{0}' WHERE email='{1}'".format(
data['role'],
data['email']
)
cursor.execute(query)
def delete_user_account(email):
"""delete a user account"""
backup_parcels = "INSERT INTO parcel_order_archive(order_id, parcel, weight, \
price, receiver, destination, current_location, pickup_location, \
email, created_at, status) \
SELECT order_id, parcel, weight, price, receiver, destination, \
current_location, pickup_location, email, created_at, status \
FROM parcel_order WHERE email='%s'; \
DELETE FROM users WHERE email='%s';" % (email, email)
cursor.execute(backup_parcels)
def get_all_users():
"""get all user accounts"""
query = "SELECT * FROM users"
dictcur.execute(query)
users = dictcur.fetchall()
return users
def create_parcel_order(data):
"""creates a new parcel delivery order"""
uganda_time = timezone('Africa/Kampala')
created_at = datetime.now(uganda_time).strftime("%Y-%m-%d %I:%M:%S %p")
query = "INSERT INTO parcel_order(parcel, weight, \
price, receiver, destination, current_location, \
pickup_location, email, created_at, status) \
VALUES('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s')" % (
data['parcel'],
data['weight'],
data['price'],
data['receiver'],
data['destination'],
data['pickup_location'],
data['current_location'],
data['email'],
created_at,
'pending'
)
cursor.execute(query)
def update_parcel_order(data):
"""updates an existing parcel delivery order"""
query = "UPDATE parcel_order SET current_location='%s', pickup_location='%s', \
status='%s', destination='%s' WHERE order_id='%i'" % (
data['current_location'],
data['pickup_location'],
data['status'],
data['destination'],
data['order_id']
)
cursor.execute(query)
def get_all_parcel_orders(database_table):
query = "SELECT * FROM %s" % (database_table)
dictcur.execute(query)
parcel_orders = dictcur.fetchall()
return parcel_orders
| 30
| 87
| 0.600647
|
d62e1351ebac5c8ee472e1d1b280f4c6faa778f2
| 4,062
|
py
|
Python
|
testcases/pytorch/fast_neural_style.py
|
shinh/chainer-compiler
|
2a5e0bfd3f1abd7258a4cbffcfab79bc1d28f9e9
|
[
"MIT"
] | 116
|
2019-01-25T03:54:44.000Z
|
2022-03-08T00:11:14.000Z
|
testcases/pytorch/fast_neural_style.py
|
shinh/chainer-compiler
|
2a5e0bfd3f1abd7258a4cbffcfab79bc1d28f9e9
|
[
"MIT"
] | 431
|
2019-01-25T10:18:44.000Z
|
2020-06-17T05:28:55.000Z
|
testcases/pytorch/fast_neural_style.py
|
momohatt/chainer-compiler
|
26782cd29a5becf8e2badf268b47d98b3a6aea1d
|
[
"MIT"
] | 26
|
2019-01-25T07:21:09.000Z
|
2021-11-26T04:24:35.000Z
|
# Original: https://github.com/pytorch/examples/blob/490243127c02a5ea3348fa4981ecd7e9bcf6144c/fast_neural_style/neural_style/transformer_net.py
import torch
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
# Example input
def gen_TransformerNet_model():
model = TransformerNet()
x = torch.rand(5, 3, 16, 16)
return model, (x,)
| 37.266055
| 143
| 0.65288
|
6e39f7294a2d2c59632272eb6e6cd8c97c93fc6d
| 87
|
py
|
Python
|
project/twittr/apps.py
|
evrom/django-example-for-class
|
c53b9e0ffe01014efa1229255887fc23b810d265
|
[
"MIT"
] | 1
|
2017-10-27T14:22:27.000Z
|
2017-10-27T14:22:27.000Z
|
project/twittr/apps.py
|
evrom/django-example-for-class
|
c53b9e0ffe01014efa1229255887fc23b810d265
|
[
"MIT"
] | null | null | null |
project/twittr/apps.py
|
evrom/django-example-for-class
|
c53b9e0ffe01014efa1229255887fc23b810d265
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TwittrConfig(AppConfig):
name = 'twittr'
| 14.5
| 33
| 0.747126
|
8eff933ff7c025841ca792783960e7e73d780d6a
| 14,780
|
py
|
Python
|
tests/test_mail.py
|
agdsn/sipa
|
a733bce0a54ad3a79732de82e7f32b35bd87d4e2
|
[
"MIT"
] | 22
|
2015-04-17T15:58:30.000Z
|
2021-04-19T08:26:32.000Z
|
tests/test_mail.py
|
agdsn/sipa
|
a733bce0a54ad3a79732de82e7f32b35bd87d4e2
|
[
"MIT"
] | 329
|
2015-04-14T23:34:31.000Z
|
2022-01-21T03:02:46.000Z
|
tests/test_mail.py
|
agdsn/sipa
|
a733bce0a54ad3a79732de82e7f32b35bd87d4e2
|
[
"MIT"
] | 18
|
2015-04-17T13:57:56.000Z
|
2018-05-30T14:20:59.000Z
|
from dataclasses import dataclass, field
from typing import List
from unittest import TestCase
from unittest.mock import MagicMock, patch
from sipa.mail import send_contact_mail, send_complex_mail, \
send_official_contact_mail, send_usersuite_contact_mail, \
compose_subject, compose_body, send_mail
class MailSendingTestBase(TestCase):
"""Test Base for functions using `send_mail`
This test base provides a mock for :py:meth:`~sipa.mail.send_mail`
so any tunction that builds up on it can be tested by watching
what ``self.send_mail_mock`` got called with.
This class provides its own setup routine, which either takes
``self.args`` and passes it as keyword arguments to
``self.mail_function`` (be careful to define it as a
staticmethod). If that is not enough (because something needs to
be patched or similar), override ``self._call_mail_function``.
"""
mail_function: staticmethod
def setUp(self):
super().setUp()
self.send_mail_mock = MagicMock(return_value=True)
self.success = self._call_mail_function()
def _call_mail_function(self):
try:
func = self.mail_function
except AttributeError as exc:
raise AttributeError("You must either provide `mail_function` "
"or override `_call_mail_function`!") from exc
with patch('sipa.mail.send_mail', self.send_mail_mock):
return func(**self.args)
@property
def args(self):
return {}
def assert_arg_in_call_arg(self, arg, call_arg):
self.assertIn(self.args[arg], self.send_mail_mock.call_args[1][call_arg])
def assert_arg_equals_call_arg(self, arg, call_arg):
self.assertEqual(self.args[arg], self.send_mail_mock.call_args[1][call_arg])
class ComposeSubjectTestCase(TestCase):
def test_tag_and_category(self):
composed = compose_subject("Subject!", tag="foo", category="bar")
self.assertEqual(composed, "[foo] bar: Subject!")
def test_tag_missing(self):
composed = compose_subject("Subject!", category="bar")
self.assertEqual(composed, "bar: Subject!")
def test_category_missing(self):
composed = compose_subject("Subject!", tag="foo")
self.assertEqual(composed, "[foo] Subject!")
def test_both_missing(self):
composed = compose_subject("subject")
self.assertEqual(composed, "subject")
class ComposeBodyTestCase(TestCase):
def setUp(self):
super().setUp()
self.message = "Lorem ipsum Dolor sit amet.\ngaudeamus igitur!"
def test_without_dict_is_identity(self):
self.assertEqual(compose_body(self.message), self.message)
def test_correct_header_with_full_dict(self):
info = {'Name': "Foo Bar", 'Social status': "Knows Python"}
composed = compose_body(self.message, header=info)
for key, val in info.items():
self.assertIn(f"{key}: {val}", composed)
self.assertIn(self.message, composed)
class SMTPTestBase(TestCase):
"""Base class providing mocks for current_app and SMTP().send_mail()"""
def setUp(self):
self.app_mock = MagicMock()
self.smtp_mock = MagicMock()
self.app_mock.config = self._get_app_config()
def _get_app_config(self):
return {
'MAILSERVER_HOST': 'some-mailserver.agdsn.network',
'MAILSERVER_PORT': 25,
'MAILSERVER_SSL': None,
'MAILSERVER_SSL_VERIFY': False,
'MAILSERVER_SSL_CA_DATA': None,
'MAILSERVER_SSL_CA_FILE': None,
'MAILSERVER_USER': None,
'MAILSERVER_PASSWORD': None,
'CONTACT_SENDER_MAIL': 'noreply@agdsn.de',
}
def _patch_smtp(self):
if self.app_mock.config['MAILSERVER_SSL'] == 'ssl':
return patch('sipa.mail.smtplib.SMTP_SSL', self.smtp_mock)
else:
return patch('sipa.mail.smtplib.SMTP', self.smtp_mock)
class SendMailTestBase(SMTPTestBase):
def setUp(self):
super().setUp()
def dont_wrap_message(msg):
return msg
self.wrap_mock = MagicMock(side_effect=dont_wrap_message)
self.args = {
'author': "foo@bar.baz",
'recipient': "support@agd.sn",
'subject': "Internet broken",
'message': "Fix it!!!",
}
with self._patch_smtp(), \
patch('sipa.mail.current_app', self.app_mock), \
patch('sipa.mail.wrap_message', self.wrap_mock), \
self.assertLogs('sipa.mail', level='INFO') as log:
self.success = send_mail(**self.args)
self.log = log
@dataclass
class SendmailSig:
"""Signature of SMTP().sendmail()"""
from_addr: str
to_addrs: List[str]
msg: str
mail_options: List = field(default_factory=lambda: [])
rcpt_options: List = field(default_factory=lambda: [])
call_args = self.smtp_mock().sendmail.call_args
self.observed_call_args = SendmailSig(*call_args[0], **call_args[1])
class SendMailCommonTests(object):
def test_wrap_message_called(self):
self.assertEqual(self.wrap_mock.call_count, 1)
self.assertEqual(self.wrap_mock.call_args[0], (self.args['message'],))
def test_smtp_close_called(self):
self.assertTrue(self.smtp_mock().close.called)
def test_sendmail_envelope_sender(self):
self.assertEqual(self.observed_call_args.from_addr,
self.app_mock.config['CONTACT_SENDER_MAIL'],
"Wrong envelope sender set!")
def test_sendmail_from_header(self):
self.assertIn(f"From: {self.args['author']}\n",
self.observed_call_args.msg,
"Wrong From: header!")
def test_sendmail_otrs_header(self):
self.assertIn(f"X-OTRS-CustomerId: {self.args['author']}\n",
self.observed_call_args.msg,
"X-OTRS-CustumerId incorrect!")
def test_sendmail_reply_to(self):
self.assertIn(f"Reply-To: {self.args['author']}\n",
self.observed_call_args.msg,
"Wrong Reply-To: header!")
def test_sendmail_recipient_passed(self):
recipient = self.observed_call_args.to_addrs
self.assertEqual(recipient, self.args['recipient'])
message = self.observed_call_args.msg
self.assertIn(f"To: {recipient}", message)
def test_sendmail_subject_passed(self):
message = self.observed_call_args.msg
self.assertIn(f"Subject: {self.args['subject']}", message)
def test_returned_true(self):
self.assertEqual(self.success, True)
def test_info_logged(self):
log_message = self.log.output.pop()
self.assertIn("Successfully sent mail", log_message)
# nothing else there
self.assertFalse(self.log.output)
class SendMailNoAuthTestCase(SendMailTestBase, SendMailCommonTests):
def test_smtp_login_not_called(self):
self.assertFalse(self.smtp_mock().login.called)
class SendMailAuthTestCase(SendMailTestBase, SendMailCommonTests):
def test_smtp_login_called(self):
self.assertTrue(self.smtp_mock().login.called)
def _get_app_config(self):
return {
**super(SendMailTestBase, self)._get_app_config(),
'MAILSERVER_USER': 'test',
'MAILSERVER_PASSWORD': 'secure',
}
class SendMailTestSslCase(SendMailTestBase, SendMailCommonTests):
def _get_app_config(self):
return {
**super(SendMailTestBase, self)._get_app_config(),
'MAILSERVER_PORT': 465,
'MAILSERVER_SSL': 'ssl',
}
class SendMailTestStarttlsCase(SendMailTestBase, SendMailCommonTests):
def test_smtp_starttls_called(self):
self.assertTrue(self.smtp_mock().starttls.called)
def _get_app_config(self):
return {
**super(SendMailTestBase, self)._get_app_config(),
'MAILSERVER_PORT': 587,
'MAILSERVER_SSL': 'starttls',
}
class SendMailFailingTestCase(SMTPTestBase):
def setUp(self):
super().setUp()
def bad_sendmail(*_, **__):
raise IOError()
self.smtp_mock().sendmail.side_effect = bad_sendmail
with self._patch_smtp(), \
patch('sipa.mail.current_app', self.app_mock) as app, \
self.assertLogs('sipa.mail', level='ERROR') as log:
self.success = send_mail('', '', '', '')
self.log = log
def test_send_mail_logs_on_success(self):
log_message = self.log.output.pop()
self.assertIn("Unable to connect", log_message)
# nothing else there
self.assertFalse(self.log.output)
def test_failing_returns_false(self):
self.assertFalse(self.success)
class ComplexMailContentTestCase(MailSendingTestBase):
mail_function = staticmethod(send_complex_mail)
@property
def args(self):
return {
'author': "foo@bar.baz",
'subject': "test",
'message': "Dies ist eine Testnachricht.",
'tag': "Testtag",
'category': 'Kategorie mit einer nichtleeren Menge an Morphismen',
'header': {'foo': "Bar", 'alkohol': "Na Klar!"},
}
def test_success_passed(self):
self.assertTrue(self.success)
def test_keyword_args_used(self):
self.assertFalse(self.send_mail_mock.call_args[0])
def test_subject_complete_passed(self):
subject_passed = self.send_mail_mock.call_args[1]['subject']
self.assertIn(self.args['subject'], subject_passed)
self.assertIn(self.args['tag'], subject_passed)
self.assertIn(self.args['category'], subject_passed)
def test_message_complete_passed(self):
message_passed = self.send_mail_mock.call_args[1]['message']
self.assertIn(self.args['message'], message_passed)
for key, value in self.args['header'].items():
self.assertIn(key, message_passed)
self.assertIn(value, message_passed)
class ComplexMailArgumentsTestCase(TestCase):
def test_fails_on_missing_argument(self):
"""Test send_complex_mail needs all of the required arguments"""
required_args = ['author', 'recipient', 'subject', 'message']
for blacklist_arg in required_args:
kwargs = {arg: MagicMock() for arg in required_args
if arg != blacklist_arg}
with self.assertRaises(TypeError):
send_complex_mail(**kwargs)
class OfficialContactMailTestCase(MailSendingTestBase):
mail_function = staticmethod(send_official_contact_mail)
@property
def args(self):
return {
'author': "foo@bar.baz",
'subject': "test",
'message': "Suchen sie einen Partner?",
'name': "Paul Dirac",
}
def test_success_passed(self):
self.assertTrue(self.success)
def test_sender_mail_passed(self):
self.assert_arg_equals_call_arg('author', 'author')
def test_subject_complete(self):
self.assert_arg_in_call_arg('subject', 'subject')
def test_message_body_complete(self):
self.assert_arg_in_call_arg('message', 'message')
self.assert_arg_in_call_arg('name', 'message')
class ContactMailTestCase(MailSendingTestBase):
def setUp(self):
self.backends_mock = MagicMock()
self.dorm_display_name = "Testdormitory"
self.backends_mock.get_dormitory('test').display_name = self.dorm_display_name
self.dorm_mail = "support@foo.bar"
self.backends_mock.get_dormitory('test').datasource.support_mail = self.dorm_mail
# the setup of the parent class comes later because this
# prepares the `mail_function` call
super().setUp()
@property
def args(self):
return {
'author': "foo@bar.baz",
'subject': "test",
'name': "Paul Dirac",
'message': "Nö",
'dormitory_name': 'test',
}
def _call_mail_function(self):
with patch('sipa.mail.send_mail', self.send_mail_mock), \
patch('sipa.mail.backends', self.backends_mock):
return send_contact_mail(**self.args)
def test_success_passed(self):
self.assertTrue(self.success)
def test_message_complete(self):
self.assert_arg_in_call_arg('message', 'message')
self.assert_arg_in_call_arg('name', 'message')
self.assertIn(self.dorm_display_name, self.send_mail_mock.call_args[1]['message'])
def test_subject_complete(self):
self.assert_arg_in_call_arg('subject', 'subject')
def test_sender_mail_passed(self):
self.assert_arg_equals_call_arg('author', 'author')
def test_recipient_passed(self):
recipient = self.send_mail_mock.call_args[1]['recipient']
self.assertEqual(recipient, self.dorm_mail)
class UsersuiteContactMailTestCase(MailSendingTestBase):
def setUp(self):
mock = MagicMock()
mock.login.value = 'test_login'
mock.datasource.mail_server = "agdsn.de"
mock.datasource.support_mail = "support@agd.sn"
self.user_mock = mock
# the setup of the parent class comes later because this
# prepares the `mail_function` call
super().setUp()
mail_function = staticmethod(send_usersuite_contact_mail)
@property
def args(self):
return {
'subject': "test",
'message': "Nö",
'category': "Spaßkategorie",
'user': self.user_mock,
}
def test_success_passed(self):
self.assertTrue(self.success)
def test_sender_composed_correctly(self):
sender = self.send_mail_mock.call_args[1]['author']
self.assertTrue(sender.endswith(self.user_mock.datasource.mail_server),
msg="Sender does not end with mail_server")
self.assertTrue(sender.startswith(self.user_mock.login.value),
msg="Sender does not start with login")
def test_recipient_passed(self):
expected_recipient = self.user_mock.datasource.support_mail
self.assertEqual(expected_recipient,
self.send_mail_mock.call_args[1]['recipient'])
def test_subject_completeg(self):
self.assert_arg_in_call_arg('subject', 'subject')
self.assert_arg_in_call_arg('category', 'subject')
def test_message_complete(self):
self.assert_arg_in_call_arg('message', 'message')
self.assertIn(self.user_mock.login.value, self.send_mail_mock.call_args[1]['message'])
| 34.292343
| 94
| 0.643166
|
bf60095e2d59cb2f39a7f6d41f03ef2643cd4f72
| 282
|
py
|
Python
|
tests/test_skeleton.py
|
aniket-ak/Battery-SOH-Forecasting
|
da9bf14556e2f617f4f79701605bfc1846dcac20
|
[
"MIT"
] | 2
|
2021-11-23T08:47:25.000Z
|
2022-02-23T07:53:20.000Z
|
tests/test_skeleton.py
|
aniket-ak/Battery-SOH-Forecasting
|
da9bf14556e2f617f4f79701605bfc1846dcac20
|
[
"MIT"
] | null | null | null |
tests/test_skeleton.py
|
aniket-ak/Battery-SOH-Forecasting
|
da9bf14556e2f617f4f79701605bfc1846dcac20
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from gpr.skeleton import fib
__author__ = "Aniket"
__copyright__ = "Aniket"
__license__ = "mit"
def test_fib():
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
| 15.666667
| 39
| 0.620567
|
db0ffc1c8ac44f4fb8d9522cdbb603cd83a516da
| 7,814
|
py
|
Python
|
assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
YorksonChang/CS231n-Assignments
|
8113f69a3b9f7b26640d454c48aa05f653e71bd6
|
[
"MIT"
] | null | null | null |
assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
YorksonChang/CS231n-Assignments
|
8113f69a3b9f7b26640d454c48aa05f653e71bd6
|
[
"MIT"
] | null | null | null |
assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
YorksonChang/CS231n-Assignments
|
8113f69a3b9f7b26640d454c48aa05f653e71bd6
|
[
"MIT"
] | null | null | null |
import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
for j in xrange(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension. #
#####################################################################
distance_l2 = np.sqrt(np.sum(np.square(self.X_train[j] - X[i])))
dists[i][j] = distance_l2
pass
#####################################################################
# END OF YOUR CODE #
#####################################################################
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy. #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in xrange(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
pass
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
return y_pred
| 44.651429
| 80
| 0.455337
|
27d4d543afebedb09c7dbc8c3463fa8b5c67298b
| 32,709
|
py
|
Python
|
rosserial/rosserial_python/src/rosserial_python/SerialClient.py
|
JVR01/3DPrinted_Robot_ArmRepo
|
0f0b5aa7ddb9279f11aba6ad7c2a38c2d8926714
|
[
"MIT"
] | 1
|
2020-11-20T03:10:18.000Z
|
2020-11-20T03:10:18.000Z
|
rosserial/rosserial_python/src/rosserial_python/SerialClient.py
|
JVR01/3DPrinted_Robot_ArmRepo
|
0f0b5aa7ddb9279f11aba6ad7c2a38c2d8926714
|
[
"MIT"
] | 30
|
2020-11-27T23:12:12.000Z
|
2021-04-25T15:37:42.000Z
|
rosserial/rosserial_python/src/rosserial_python/SerialClient.py
|
JVR01/3DPrinted_Robot_ArmRepo
|
0f0b5aa7ddb9279f11aba6ad7c2a38c2d8926714
|
[
"MIT"
] | null | null | null |
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import array
import errno
import imp
import io
import multiprocessing
import queue
import socket
import struct
import sys
import threading
import time
from serial import Serial, SerialException, SerialTimeoutException
import roslib
import rospy
from std_msgs.msg import Time
from rosserial_msgs.msg import TopicInfo, Log
from rosserial_msgs.srv import RequestParamRequest, RequestParamResponse
import diagnostic_msgs.msg
ERROR_MISMATCHED_PROTOCOL = "Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client"
ERROR_NO_SYNC = "no sync with device"
ERROR_PACKET_FAILED = "Packet Failed : Failed to read msg data"
def load_pkg_module(package, directory):
#check if its in the python path
path = sys.path
try:
imp.find_module(package)
except ImportError:
roslib.load_manifest(package)
try:
m = __import__( package + '.' + directory )
except ImportError:
rospy.logerr( "Cannot import package : %s"% package )
rospy.logerr( "sys.path was " + str(path) )
return None
return m
def load_message(package, message):
m = load_pkg_module(package, 'msg')
m2 = getattr(m, 'msg')
return getattr(m2, message)
def load_service(package,service):
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
srv = getattr(s, service)
mreq = getattr(s, service+"Request")
mres = getattr(s, service+"Response")
return srv,mreq,mres
class Publisher:
"""
Publisher forwards messages from the serial device to ROS.
"""
def __init__(self, topic_info):
""" Create a new publisher. """
self.topic = topic_info.topic_name
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.publisher = rospy.Publisher(self.topic, self.message, queue_size=10)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def handlePacket(self, data):
""" Forward message to ROS network. """
m = self.message()
m.deserialize(data)
self.publisher.publish(m)
class Subscriber:
"""
Subscriber forwards messages from ROS to the serial device.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.id = topic_info.topic_id
self.parent = parent
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.subscriber = rospy.Subscriber(self.topic, self.message, self.callback)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def callback(self, msg):
""" Forward message to serial device. """
data_buffer = io.BytesIO()
msg.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
def unregister(self):
rospy.loginfo("Removing subscriber: %s", self.topic)
self.subscriber.unregister()
class ServiceServer:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
self.service = rospy.Service(self.topic, srv, self.callback)
# response message
self.data = None
def unregister(self):
rospy.loginfo("Removing service: %s", self.topic)
self.service.shutdown()
def callback(self, req):
""" Forward request to serial device. """
data_buffer = io.BytesIO()
req.serialize(data_buffer)
self.response = None
self.parent.send(self.id, data_buffer.getvalue())
while self.response is None:
pass
return self.response
def handlePacket(self, data):
""" Forward response to ROS network. """
r = self.mres()
r.deserialize(data)
self.response = r
class ServiceClient:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
rospy.loginfo("Starting service client, waiting for service '" + self.topic + "'")
rospy.wait_for_service(self.topic)
self.proxy = rospy.ServiceProxy(self.topic, srv)
def handlePacket(self, data):
""" Forward request to ROS network. """
req = self.mreq()
req.deserialize(data)
# call service proxy
resp = self.proxy(req)
# serialize and publish
data_buffer = io.BytesIO()
resp.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
class RosSerialServer:
"""
RosSerialServer waits for a socket connection then passes itself, forked as a
new process, to SerialClient which uses it as a serial port. It continues to listen
for additional connections. Each forked process is a new ros node, and proxies ros
operations (e.g. publish/subscribe) from its connection to the rest of ros.
"""
def __init__(self, tcp_portnum, fork_server=False):
rospy.loginfo("Fork_server is: %s" % fork_server)
self.tcp_portnum = tcp_portnum
self.fork_server = fork_server
def listen(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#bind the socket to a public host, and a well-known port
self.serversocket.bind(("", self.tcp_portnum)) #become a server socket
self.serversocket.listen(1)
while True:
#accept connections
rospy.loginfo("Waiting for socket connection")
clientsocket, address = self.serversocket.accept()
#now do something with the clientsocket
rospy.loginfo("Established a socket connection from %s on port %s" % address)
self.socket = clientsocket
self.isConnected = True
if self.fork_server: # if configured to launch server in a separate process
rospy.loginfo("Forking a socket server process")
process = multiprocessing.Process(target=self.startSocketServer, args=address)
process.daemon = True
process.start()
rospy.loginfo("launched startSocketServer")
else:
rospy.loginfo("calling startSerialClient")
self.startSerialClient()
rospy.loginfo("startSerialClient() exited")
def startSerialClient(self):
client = SerialClient(self)
try:
client.run()
except KeyboardInterrupt:
pass
except RuntimeError:
rospy.loginfo("RuntimeError exception caught")
self.isConnected = False
except socket.error:
rospy.loginfo("socket.error exception caught")
self.isConnected = False
finally:
rospy.loginfo("Client has exited, closing socket.")
self.socket.close()
for sub in client.subscribers.values():
sub.unregister()
for srv in client.services.values():
srv.unregister()
def startSocketServer(self, port, address):
rospy.loginfo("starting ROS Serial Python Node serial_node-%r" % address)
rospy.init_node("serial_node_%r" % address)
self.startSerialClient()
def flushInput(self):
pass
def write(self, data):
if not self.isConnected:
return
length = len(data)
totalsent = 0
while totalsent < length:
try:
totalsent += self.socket.send(data[totalsent:])
except BrokenPipeError:
raise RuntimeError("RosSerialServer.write() socket connection broken")
def read(self, rqsted_length):
self.msg = b''
if not self.isConnected:
return self.msg
while len(self.msg) < rqsted_length:
chunk = self.socket.recv(rqsted_length - len(self.msg))
if chunk == b'':
raise RuntimeError("RosSerialServer.read() socket connection broken")
self.msg = self.msg + chunk
return self.msg
def inWaiting(self):
try: # the caller checks just for <1, so we'll peek at just one byte
chunk = self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK)
if chunk == b'':
raise RuntimeError("RosSerialServer.inWaiting() socket connection broken")
return len(chunk)
except BlockingIOError:
return 0
class SerialClient(object):
"""
ServiceServer responds to requests from the serial device.
"""
header = b'\xff'
# hydro introduces protocol ver2 which must match node_handle.h
# The protocol version is sent as the 2nd sync byte emitted by each end
protocol_ver1 = b'\xff'
protocol_ver2 = b'\xfe'
protocol_ver = protocol_ver2
def __init__(self, port=None, baud=57600, timeout=5.0, fix_pyserial_for_test=False):
""" Initialize node, connect to bus, attempt to negotiate topics. """
self.read_lock = threading.RLock()
self.write_lock = threading.RLock()
self.write_queue = queue.Queue()
self.write_thread = None
self.lastsync = rospy.Time(0)
self.lastsync_lost = rospy.Time(0)
self.lastsync_success = rospy.Time(0)
self.last_read = rospy.Time(0)
self.last_write = rospy.Time(0)
self.timeout = timeout
self.synced = False
self.fix_pyserial_for_test = fix_pyserial_for_test
self.publishers = dict() # id:Publishers
self.subscribers = dict() # topic:Subscriber
self.services = dict() # topic:Service
self.pub_diagnostics = rospy.Publisher('/diagnostics', diagnostic_msgs.msg.DiagnosticArray, queue_size=10)
if port is None:
# no port specified, listen for any new port?
pass
elif hasattr(port, 'read'):
#assume its a filelike object
self.port=port
else:
# open a specific port
while not rospy.is_shutdown():
try:
if self.fix_pyserial_for_test:
# see https://github.com/pyserial/pyserial/issues/59
self.port = Serial(port, baud, timeout=self.timeout, write_timeout=10, rtscts=True, dsrdtr=True)
else:
self.port = Serial(port, baud, timeout=self.timeout, write_timeout=10)
break
except SerialException as e:
rospy.logerr("Error opening serial: %s", e)
time.sleep(3)
if rospy.is_shutdown():
return
time.sleep(0.1) # Wait for ready (patch for Uno)
self.buffer_out = -1
self.buffer_in = -1
self.callbacks = dict()
# endpoints for creating new pubs/subs
self.callbacks[TopicInfo.ID_PUBLISHER] = self.setupPublisher
self.callbacks[TopicInfo.ID_SUBSCRIBER] = self.setupSubscriber
# service client/servers have 2 creation endpoints (a publisher and a subscriber)
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_PUBLISHER] = self.setupServiceServerPublisher
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_SUBSCRIBER] = self.setupServiceServerSubscriber
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_PUBLISHER] = self.setupServiceClientPublisher
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_SUBSCRIBER] = self.setupServiceClientSubscriber
# custom endpoints
self.callbacks[TopicInfo.ID_PARAMETER_REQUEST] = self.handleParameterRequest
self.callbacks[TopicInfo.ID_LOG] = self.handleLoggingRequest
self.callbacks[TopicInfo.ID_TIME] = self.handleTimeRequest
rospy.sleep(2.0)
self.requestTopics()
self.lastsync = rospy.Time.now()
def requestTopics(self):
""" Determine topics to subscribe/publish. """
rospy.loginfo('Requesting topics...')
# TODO remove if possible
if not self.fix_pyserial_for_test:
with self.read_lock:
self.port.flushInput()
# request topic sync
self.write_queue.put(self.header + self.protocol_ver + b"\x00\x00\xff\x00\x00\xff")
def txStopRequest(self):
""" Send stop tx request to client before the node exits. """
if not self.fix_pyserial_for_test:
with self.read_lock:
self.port.flushInput()
self.write_queue.put(self.header + self.protocol_ver + b"\x00\x00\xff\x0b\x00\xf4")
rospy.loginfo("Sending tx stop request")
def tryRead(self, length):
try:
read_start = time.time()
bytes_remaining = length
result = bytearray()
while bytes_remaining != 0 and time.time() - read_start < self.timeout:
with self.read_lock:
received = self.port.read(bytes_remaining)
if len(received) != 0:
self.last_read = rospy.Time.now()
result.extend(received)
bytes_remaining -= len(received)
if bytes_remaining != 0:
raise IOError("Returned short (expected %d bytes, received %d instead)." % (length, length - bytes_remaining))
return bytes(result)
except Exception as e:
raise IOError("Serial Port read failure: %s" % e)
def run(self):
""" Forward recieved messages to appropriate publisher. """
# Launch write thread.
if self.write_thread is None:
self.write_thread = threading.Thread(target=self.processWriteQueue)
self.write_thread.daemon = True
self.write_thread.start()
# Handle reading.
data = ''
read_step = None
while self.write_thread.is_alive() and not rospy.is_shutdown():
if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):
if self.synced:
rospy.logerr("Lost sync with device, restarting...")
else:
rospy.logerr("Unable to sync with device; possible link problem or link software version mismatch such as hydro rosserial_python with groovy Arduino")
self.lastsync_lost = rospy.Time.now()
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_NO_SYNC)
self.requestTopics()
self.lastsync = rospy.Time.now()
# This try-block is here because we make multiple calls to read(). Any one of them can throw
# an IOError if there's a serial problem or timeout. In that scenario, a single handler at the
# bottom attempts to reconfigure the topics.
try:
with self.read_lock:
if self.port.inWaiting() < 1:
time.sleep(0.001)
continue
# Find sync flag.
flag = [0, 0]
read_step = 'syncflag'
flag[0] = self.tryRead(1)
if (flag[0] != self.header):
continue
# Find protocol version.
read_step = 'protocol'
flag[1] = self.tryRead(1)
if flag[1] != self.protocol_ver:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_MISMATCHED_PROTOCOL)
rospy.logerr("Mismatched protocol version in packet (%s): lost sync or rosserial_python is from different ros release than the rosserial client" % repr(flag[1]))
protocol_ver_msgs = {
self.protocol_ver1: 'Rev 0 (rosserial 0.4 and earlier)',
self.protocol_ver2: 'Rev 1 (rosserial 0.5+)',
b'\xfd': 'Some future rosserial version'
}
if flag[1] in protocol_ver_msgs:
found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]]
else:
found_ver_msg = "Protocol version of client is unrecognized"
rospy.loginfo("%s, expected %s" % (found_ver_msg, protocol_ver_msgs[self.protocol_ver]))
continue
# Read message length, checksum (3 bytes)
read_step = 'message length'
msg_len_bytes = self.tryRead(3)
msg_length, _ = struct.unpack("<hB", msg_len_bytes)
# Validate message length checksum.
if sum(array.array("B", msg_len_bytes)) % 256 != 255:
rospy.loginfo("Wrong checksum for msg length, length %d, dropping message." % (msg_length))
continue
# Read topic id (2 bytes)
read_step = 'topic id'
topic_id_header = self.tryRead(2)
topic_id, = struct.unpack("<H", topic_id_header)
# Read serialized message data.
read_step = 'data'
try:
msg = self.tryRead(msg_length)
except IOError:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_PACKET_FAILED)
rospy.loginfo("Packet Failed : Failed to read msg data")
rospy.loginfo("expected msg length is %d", msg_length)
raise
# Reada checksum for topic id and msg
read_step = 'data checksum'
chk = self.tryRead(1)
checksum = sum(array.array('B', topic_id_header + msg + chk))
# Validate checksum.
if checksum % 256 == 255:
self.synced = True
self.lastsync_success = rospy.Time.now()
try:
self.callbacks[topic_id](msg)
except KeyError:
rospy.logerr("Tried to publish before configured, topic id %d" % topic_id)
self.requestTopics()
time.sleep(0.001)
else:
rospy.loginfo("wrong checksum for topic id and msg")
except IOError as exc:
rospy.logwarn('Last read step: %s' % read_step)
rospy.logwarn('Run loop error: %s' % exc)
# One of the read calls had an issue. Just to be safe, request that the client
# reinitialize their topics.
with self.read_lock:
self.port.flushInput()
with self.write_lock:
self.port.flushOutput()
self.requestTopics()
self.txStopRequest()
self.write_thread.join()
def setPublishSize(self, size):
if self.buffer_out < 0:
self.buffer_out = size
rospy.loginfo("Note: publish buffer size is %d bytes" % self.buffer_out)
def setSubscribeSize(self, size):
if self.buffer_in < 0:
self.buffer_in = size
rospy.loginfo("Note: subscribe buffer size is %d bytes" % self.buffer_in)
def setupPublisher(self, data):
""" Register a new publisher. """
try:
msg = TopicInfo()
msg.deserialize(data)
pub = Publisher(msg)
self.publishers[msg.topic_id] = pub
self.callbacks[msg.topic_id] = pub.handlePacket
self.setPublishSize(msg.buffer_size)
rospy.loginfo("Setup publisher on %s [%s]" % (msg.topic_name, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of publisher failed: %s", e)
def setupSubscriber(self, data):
""" Register a new subscriber. """
try:
msg = TopicInfo()
msg.deserialize(data)
if not msg.topic_name in list(self.subscribers.keys()):
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Setup subscriber on %s [%s]" % (msg.topic_name, msg.message_type) )
elif msg.message_type != self.subscribers[msg.topic_name].message._type:
old_message_type = self.subscribers[msg.topic_name].message._type
self.subscribers[msg.topic_name].unregister()
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Change the message type of subscriber on %s from [%s] to [%s]" % (msg.topic_name, old_message_type, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of subscriber failed: %s", e)
def setupServiceServerPublisher(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceServerSubscriber(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceClientPublisher(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def setupServiceClientSubscriber(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def handleTimeRequest(self, data):
""" Respond to device with system time. """
t = Time()
t.data = rospy.Time.now()
data_buffer = io.BytesIO()
t.serialize(data_buffer)
self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )
self.lastsync = rospy.Time.now()
def handleParameterRequest(self, data):
""" Send parameters to device. Supports only simple datatypes and arrays of such. """
req = RequestParamRequest()
req.deserialize(data)
resp = RequestParamResponse()
try:
param = rospy.get_param(req.name)
except KeyError:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if param is None:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if isinstance(param, dict):
rospy.logerr("Cannot send param %s because it is a dictionary"%req.name)
return
if not isinstance(param, list):
param = [param]
#check to make sure that all parameters in list are same type
t = type(param[0])
for p in param:
if t!= type(p):
rospy.logerr('All Paramers in the list %s must be of the same type'%req.name)
return
if t == int or t == bool:
resp.ints = param
if t == float:
resp.floats =param
if t == str:
resp.strings = param
data_buffer = io.BytesIO()
resp.serialize(data_buffer)
self.send(TopicInfo.ID_PARAMETER_REQUEST, data_buffer.getvalue())
def handleLoggingRequest(self, data):
""" Forward logging information from serial device into ROS. """
msg = Log()
msg.deserialize(data)
if msg.level == Log.ROSDEBUG:
rospy.logdebug(msg.msg)
elif msg.level == Log.INFO:
rospy.loginfo(msg.msg)
elif msg.level == Log.WARN:
rospy.logwarn(msg.msg)
elif msg.level == Log.ERROR:
rospy.logerr(msg.msg)
elif msg.level == Log.FATAL:
rospy.logfatal(msg.msg)
def send(self, topic, msg):
"""
Queues data to be written to the serial port.
"""
self.write_queue.put((topic, msg))
def _write(self, data):
"""
Writes raw data over the serial port. Assumes the data is formatting as a packet. http://wiki.ros.org/rosserial/Overview/Protocol
"""
with self.write_lock:
self.port.write(data)
self.last_write = rospy.Time.now()
def _send(self, topic, msg_bytes):
"""
Send a message on a particular topic to the device.
"""
length = len(msg_bytes)
if self.buffer_in > 0 and length > self.buffer_in:
rospy.logerr("Message from ROS network dropped: message larger than buffer.\n%s" % msg)
return -1
else:
# frame : header (1b) + version (1b) + msg_len(2b) + msg_len_chk(1b) + topic_id(2b) + msg(nb) + msg_topic_id_chk(1b)
length_bytes = struct.pack('<h', length)
length_checksum = 255 - (sum(array.array('B', length_bytes)) % 256)
length_checksum_bytes = struct.pack('B', length_checksum)
topic_bytes = struct.pack('<h', topic)
msg_checksum = 255 - (sum(array.array('B', topic_bytes + msg_bytes)) % 256)
msg_checksum_bytes = struct.pack('B', msg_checksum)
self._write(self.header + self.protocol_ver + length_bytes + length_checksum_bytes + topic_bytes + msg_bytes + msg_checksum_bytes)
return length
def processWriteQueue(self):
"""
Main loop for the thread that processes outgoing data to write to the serial port.
"""
while not rospy.is_shutdown():
if self.write_queue.empty():
time.sleep(0.01)
else:
data = self.write_queue.get()
while True:
try:
if isinstance(data, tuple):
topic, msg = data
self._send(topic, msg)
elif isinstance(data, bytes):
self._write(data)
else:
rospy.logerr("Trying to write invalid data type: %s" % type(data))
break
except SerialTimeoutException as exc:
rospy.logerr('Write timeout: %s' % exc)
time.sleep(1)
except RuntimeError as exc:
rospy.logerr('Write thread exception: %s' % exc)
break
def sendDiagnostics(self, level, msg_text):
msg = diagnostic_msgs.msg.DiagnosticArray()
status = diagnostic_msgs.msg.DiagnosticStatus()
status.name = "rosserial_python"
msg.header.stamp = rospy.Time.now()
msg.status.append(status)
status.message = msg_text
status.level = level
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[0].key="last sync"
if self.lastsync.to_sec()>0:
status.values[0].value=time.ctime(self.lastsync.to_sec())
else:
status.values[0].value="never"
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[1].key="last sync lost"
status.values[1].value=time.ctime(self.lastsync_lost.to_sec())
self.pub_diagnostics.publish(msg)
| 40.183047
| 181
| 0.593812
|
505d4ee52422ed0383f10227bae7a5c0b5eba290
| 18,384
|
py
|
Python
|
aries_cloudagent/protocols/present_proof/indy/proof.py
|
rbeltranmontijo/aries-python
|
77f6e0dd2f98cb70c2a17a1c72b729f0766f8f61
|
[
"Apache-2.0"
] | 1
|
2021-04-15T09:44:00.000Z
|
2021-04-15T09:44:00.000Z
|
aries_cloudagent/protocols/present_proof/indy/proof.py
|
rbeltranmontijo/aries-python
|
77f6e0dd2f98cb70c2a17a1c72b729f0766f8f61
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/protocols/present_proof/indy/proof.py
|
rbeltranmontijo/aries-python
|
77f6e0dd2f98cb70c2a17a1c72b729f0766f8f61
|
[
"Apache-2.0"
] | null | null | null |
"""Marshmallow bindings for indy proofs."""
from typing import Mapping, Sequence
from marshmallow import fields, validate
from ....messaging.models.base import BaseModel, BaseModelSchema
from ....messaging.valid import (
INDY_CRED_DEF_ID,
INDY_REV_REG_ID,
INDY_SCHEMA_ID,
INT_EPOCH,
NUM_STR_WHOLE,
)
from ....utils.tracing import AdminAPIMessageTracingSchema
from ..indy.requested_creds import (
IndyRequestedCredsRequestedAttrSchema,
IndyRequestedCredsRequestedPredSchema,
)
from .predicate import Predicate
class IndyEQProof(BaseModel):
"""Equality proof for indy primary proof."""
class Meta:
"""Equality proof metadata."""
schema_class = "IndyEQProofMeta"
def __init__(
self,
revealed_attrs: Mapping[str, str] = None,
a_prime: str = None,
e: str = None,
v: str = None,
m: Mapping[str, str] = None,
m2: str = None,
**kwargs,
):
"""Initialize equality proof object."""
super().__init__(**kwargs)
self.revealed_attrs = revealed_attrs
self.a_prime = a_prime
self.e = e
self.v = v
self.m = m
self.m2 = m2
class IndyEQProofSchema(BaseModelSchema):
"""Indy equality proof schema."""
class Meta:
"""Indy equality proof metadata."""
model_class = IndyEQProof
revealed_attrs = fields.Dict(
keys=fields.Str(example="preference"),
values=fields.Str(**NUM_STR_WHOLE),
)
a_prime = fields.Str(**NUM_STR_WHOLE)
e = fields.Str(**NUM_STR_WHOLE)
v = fields.Str(**NUM_STR_WHOLE)
m = fields.Dict(
keys=fields.Str(example="master_secret"),
values=fields.Str(**NUM_STR_WHOLE),
)
m2 = fields.Str(**NUM_STR_WHOLE)
class IndyGEProofPred(BaseModel):
"""Indy GE proof predicate."""
class Meta:
"""Indy GE proof predicate metadata."""
schema_class = "IndyGEProofPredSchema"
def __init__(
self,
attr_name: str = None,
p_type: str = None,
value: int = None,
**kwargs,
):
"""Initialize indy GE proof predicate."""
super().__init__(**kwargs)
self.attr_name = attr_name
self.p_type = p_type
self.value = value
class IndyGEProofPredSchema(BaseModelSchema):
"""Indy GE proof predicate schema."""
class Meta:
"""Indy GE proof predicate metadata."""
model_class = IndyGEProofPred
attr_name = fields.Str(description="Attribute name, indy-canonicalized")
p_type = fields.Str(
description="Predicate type",
validate=validate.OneOf([p.fortran for p in Predicate]),
)
value = fields.Integer(strict=True, description="Predicate threshold value")
class IndyGEProof(BaseModel):
"""Greater-than-or-equal-to proof for indy primary proof."""
class Meta:
"""GE proof metadata."""
schema_class = "IndyGEProofMeta"
def __init__(
self,
u: Mapping[str, str] = None,
r: Mapping[str, str] = None,
mj: str = None,
alpha: str = None,
t: Mapping[str, str] = None,
predicate: IndyGEProofPred = None,
**kwargs,
):
"""Initialize GE proof object."""
super().__init__(**kwargs)
self.u = u
self.r = r
self.mj = mj
self.alpha = alpha
self.t = t
self.predicate = predicate
class IndyGEProofSchema(BaseModelSchema):
"""Indy GE proof schema."""
class Meta:
"""Indy GE proof schema metadata."""
model_class = IndyGEProof
u = fields.Dict(keys=fields.Str(), values=fields.Str(**NUM_STR_WHOLE))
r = fields.Dict(keys=fields.Str(), values=fields.Str(**NUM_STR_WHOLE))
mj = fields.Str(**NUM_STR_WHOLE)
alpha = fields.Str(**NUM_STR_WHOLE)
t = fields.Dict(keys=fields.Str(), values=fields.Str(**NUM_STR_WHOLE))
predicate = fields.Nested(IndyGEProofPredSchema)
class IndyPrimaryProof(BaseModel):
"""Indy primary proof."""
class Meta:
"""Indy primary proof metadata."""
schema_class = "IndyPrimaryProofSchema"
def __init__(
self,
eq_proof: IndyEQProof = None,
ge_proofs: Sequence[IndyGEProof] = None,
**kwargs,
):
"""Initialize indy primary proof."""
super().__init__(**kwargs)
self.eq_proof = eq_proof
self.ge_proofs = ge_proofs
class IndyPrimaryProofSchema(BaseModelSchema):
"""Indy primary proof schema."""
class Meta:
"""Indy primary proof schema metadata."""
model_class = IndyPrimaryProof
eq_proof = fields.Nested(
IndyEQProofSchema, allow_none=True, description="Indy equality proof"
)
ge_proofs = fields.Nested(
IndyGEProofSchema,
many=True,
allow_none=True,
description="Indy GE proofs",
)
class IndyNonRevocProof(BaseModel):
"""Indy non-revocation proof."""
class Meta:
"""Indy non-revocation proof metadata."""
schema_class = "IndyNonRevocProofSchema"
def __init__(
self,
x_list: Mapping = None,
c_list: Mapping = None,
**kwargs,
):
"""Initialize indy non-revocation proof."""
super().__init__(**kwargs)
self.x_list = x_list
self.c_list = c_list
class IndyNonRevocProofSchema(BaseModelSchema):
"""Indy non-revocation proof schema."""
class Meta:
"""Indy non-revocation proof schema metadata."""
model_class = IndyNonRevocProof
x_list = fields.Dict(keys=fields.Str(), values=fields.Str())
c_list = fields.Dict(keys=fields.Str(), values=fields.Str())
class IndyProofProofProofsProof(BaseModel):
"""Indy proof.proof.proofs constituent proof."""
class Meta:
"""Indy proof.proof.proofs constituent proof schema."""
schema_class = "IndyProofProofProofsProofSchema"
def __init__(
self,
primary_proof: IndyPrimaryProof = None,
non_revoc_proof: IndyNonRevocProof = None,
**kwargs,
):
"""Initialize proof.proof.proofs constituent proof."""
super().__init__(**kwargs)
self.primary_proof = primary_proof
self.non_revoc_proof = non_revoc_proof
class IndyProofProofProofsProofSchema(BaseModelSchema):
"""Indy proof.proof.proofs constituent proof schema."""
class Meta:
"""Indy proof.proof.proofs constituent proof schema metadata."""
model_class = IndyProofProofProofsProof
primary_proof = fields.Nested(
IndyPrimaryProofSchema,
description="Indy primary proof",
)
non_revoc_proof = fields.Nested(
IndyNonRevocProofSchema,
allow_none=True,
description="Indy non-revocation proof",
)
class IndyProofProofAggregatedProof(BaseModel):
"""Indy proof.proof aggregated proof."""
class Meta:
"""Indy proof.proof aggregated proof metadata."""
schema_class = "IndyProofProofAggregatedProofSchema"
def __init__(
self,
c_hash: str = None,
c_list: Sequence[Sequence[int]] = None,
**kwargs,
):
"""Initialize indy proof.proof agreggated proof."""
super().__init__(**kwargs)
self.c_hash = c_hash
self.c_list = c_list
class IndyProofProofAggregatedProofSchema(BaseModelSchema):
"""Indy proof.proof aggregated proof schema."""
class Meta:
"""Indy proof.proof aggregated proof schema metadata."""
model_class = IndyProofProofAggregatedProof
c_hash = fields.Str(description="c_hash value")
c_list = fields.List(
fields.List(fields.Int(strict=True)),
description="c_list value",
)
class IndyProofProof(BaseModel):
"""Indy proof.proof content."""
class Meta:
"""Indy proof.proof content metadata."""
schema_class = "IndyProofProofSchema"
def __init__(
self,
proofs: Sequence[IndyProofProofProofsProof] = None,
aggregated_proof: IndyProofProofAggregatedProof = None,
**kwargs,
):
"""Initialize indy proof.proof content."""
super().__init__(**kwargs)
self.proofs = proofs
self.aggregated_proof = aggregated_proof
class IndyProofProofSchema(BaseModelSchema):
"""Indy proof.proof content schema."""
class Meta:
"""Indy proof.proof content schema metadata."""
model_class = IndyProofProof
proofs = fields.Nested(
IndyProofProofProofsProofSchema,
many=True,
description="Indy proof proofs",
)
aggregated_proof = fields.Nested(
IndyProofProofAggregatedProofSchema,
description="Indy proof aggregated proof",
)
class RawEncoded(BaseModel):
"""Raw and encoded attribute values."""
class Meta:
"""Raw and encoded attribute values metadata."""
schema_class = "RawEncodedSchema"
def __init__(
self,
raw: str = None,
encoded: str = None,
**kwargs,
):
"""Initialize raw and encoded attribute values."""
super().__init__(**kwargs)
self.raw = raw
self.encoded = encoded
class RawEncodedSchema(BaseModelSchema):
"""Raw and encoded attribute values schema."""
class Meta:
"""Raw and encoded attribute values schema metadata."""
model_class = RawEncoded
raw = fields.Str(description="Raw value")
encoded = fields.Str(description="Encoded value", **NUM_STR_WHOLE)
class IndyProofRequestedProofRevealedAttr(RawEncoded):
"""Indy proof requested proof revealed attr."""
class Meta:
"""Indy proof requested proof revealed attr metadata."""
schema_class = "IndyProofRequestedProofRevealedAttrSchema"
def __init__(
self,
sub_proof_index: int = None,
**kwargs,
):
"""Initialize indy proof requested proof revealed attr."""
super().__init__(**kwargs)
self.sub_proof_index = sub_proof_index
class IndyProofRequestedProofRevealedAttrSchema(RawEncodedSchema):
"""Indy proof requested proof revealed attr schema."""
class Meta:
"""Indy proof requested proof revealed attr schema metadata."""
model_class = IndyProofRequestedProofRevealedAttr
sub_proof_index = fields.Int(strict=True, description="Sub-proof index")
class IndyProofRequestedProofRevealedAttrGroup(BaseModel):
"""Indy proof requested proof revealed attr group."""
class Meta:
"""Indy proof requested proof revealed attr group metadata."""
schema_class = "IndyProofRequestedProofRevealedAttrGroupSchema"
def __init__(
self,
sub_proof_index: int = None,
values: Mapping[str, RawEncoded] = None,
**kwargs,
):
"""Initialize indy proof requested proof revealed attr."""
super().__init__(**kwargs)
self.sub_proof_index = sub_proof_index
self.values = values
class IndyProofRequestedProofRevealedAttrGroupSchema(BaseModelSchema):
"""Indy proof requested proof revealed attr group schema."""
class Meta:
"""Indy proof requested proof revealed attr group schema metadata."""
model_class = IndyProofRequestedProofRevealedAttrGroup
sub_proof_index = fields.Int(strict=True, description="Sub-proof index")
values = fields.Dict(
keys=fields.Str(),
values=fields.Nested(RawEncodedSchema),
description="Indy proof requested proof revealed attr groups group value",
)
class IndyProofRequestedProofPredicate(BaseModel):
"""Indy proof requested proof predicate."""
class Meta:
"""Indy proof requested proof requested proof predicate metadata."""
schema_class = "IndyProofRequestedProofPredicateSchema"
def __init__(
self,
sub_proof_index: int = None,
**kwargs,
):
"""Initialize indy proof requested proof predicate."""
super().__init__(**kwargs)
self.sub_proof_index = sub_proof_index
class IndyProofRequestedProofPredicateSchema(BaseModelSchema):
"""Indy proof requested prrof predicate schema."""
class Meta:
"""Indy proof requested proof requested proof predicate schema metadata."""
model_class = IndyProofRequestedProofPredicate
sub_proof_index = fields.Int(strict=True, description="Sub-proof index")
class IndyProofRequestedProof(BaseModel):
"""Indy proof.requested_proof content."""
class Meta:
"""Indy proof.requested_proof content metadata."""
schema_class = "IndyProofRequestedProofSchema"
def __init__(
self,
revealed_attrs: Mapping[str, IndyProofRequestedProofRevealedAttr] = None,
revealed_attr_groups: Mapping[
str,
IndyProofRequestedProofRevealedAttrGroup,
] = None,
self_attested_attrs: Mapping = None,
unrevealed_attrs: Mapping = None,
predicates: Mapping[str, IndyProofRequestedProofPredicate] = None,
**kwargs,
):
"""Initialize indy proof requested proof."""
super().__init__(**kwargs)
self.revealed_attrs = revealed_attrs
self.revealed_attr_groups = revealed_attr_groups
self.self_attested_attrs = self_attested_attrs
self.unrevealed_attrs = unrevealed_attrs
self.predicates = predicates
class IndyProofRequestedProofSchema(BaseModelSchema):
"""Indy proof requested proof schema."""
class Meta:
"""Indy proof requested proof schema metadata."""
model_class = IndyProofRequestedProof
revealed_attrs = fields.Dict(
keys=fields.Str(),
values=fields.Nested(IndyProofRequestedProofRevealedAttrSchema),
allow_none=True,
description="Proof requested proof revealed attributes",
)
revealed_attr_groups = fields.Dict(
keys=fields.Str(),
values=fields.Nested(IndyProofRequestedProofRevealedAttrGroupSchema),
allow_none=True,
description="Proof requested proof revealed attribute groups",
)
self_attested_attrs = fields.Dict(
description="Proof requested proof self-attested attributes"
)
unrevealed_attrs = fields.Dict(description="Unrevealed attributes")
predicates = fields.Dict(
keys=fields.Str(),
values=fields.Nested(
IndyProofRequestedProofPredicateSchema,
),
description="Proof requested proof predicates.",
)
class IndyProofIdentifier(BaseModel):
"""Indy proof identifier."""
class Meta:
"""Indy proof identifier metadata."""
schema_class = "IndyProofIdentifierSchema"
def __init__(
self,
schema_id: str = None,
cred_def_id: str = None,
rev_reg_id: str = None,
timestamp: int = None,
**kwargs,
):
"""Initialize indy proof identifier."""
super().__init__(**kwargs)
self.schema_id = schema_id
self.cred_def_id = cred_def_id
self.rev_reg_id = rev_reg_id
self.timestamp = timestamp
class IndyProofIdentifierSchema(BaseModelSchema):
"""Indy proof identifier schema."""
class Meta:
"""Indy proof identifier schema metadata."""
model_class = IndyProofIdentifier
schema_id = fields.Str(description="Schema identifier", **INDY_SCHEMA_ID)
cred_def_id = fields.Str(
description="Credential definition identifier",
**INDY_CRED_DEF_ID,
)
rev_reg_id = fields.Str(
description="Revocation registry identifier",
allow_none=True,
**INDY_REV_REG_ID,
)
timestamp = fields.Int(
strict=True,
allow_none=True,
description="Timestamp epoch",
**INT_EPOCH,
)
class IndyProof(BaseModel):
"""Indy proof."""
class Meta:
"""Indy proof metadata."""
schema_class = "IndyProofSchema"
def __init__(
self,
proof: IndyProofProof = None,
requested_proof: IndyProofRequestedProof = None,
identifiers: Sequence[IndyProofIdentifier] = None,
**kwargs,
):
"""Initialize indy proof."""
super().__init__(**kwargs)
self.proof = proof
self.requested_proof = requested_proof
self.identifiers = identifiers
class IndyProofSchema(BaseModelSchema):
"""Indy proof schema."""
class Meta:
"""Indy proof schema metadata."""
model_class = IndyProof
proof = fields.Nested(
IndyProofProofSchema,
description="Indy proof.proof content",
)
requested_proof = fields.Nested(
IndyProofRequestedProofSchema,
description="Indy proof.requested_proof content",
)
identifiers = fields.Nested(
IndyProofIdentifierSchema,
many=True,
description="Indy proof.identifiers content",
)
class IndyPresSpecSchema(AdminAPIMessageTracingSchema):
"""Request schema for indy proof specification to send as presentation."""
self_attested_attributes = fields.Dict(
description="Self-attested attributes to build into proof",
required=True,
keys=fields.Str(example="attr_name"), # marshmallow/apispec v3.0 ignores
values=fields.Str(
example="self_attested_value",
description=(
"Self-attested attribute values to use in requested-credentials "
"structure for proof construction"
),
),
)
requested_attributes = fields.Dict(
description=(
"Nested object mapping proof request attribute referents to "
"requested-attribute specifiers"
),
required=True,
keys=fields.Str(example="attr_referent"), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyRequestedCredsRequestedAttrSchema),
)
requested_predicates = fields.Dict(
description=(
"Nested object mapping proof request predicate referents to "
"requested-predicate specifiers"
),
required=True,
keys=fields.Str(example="pred_referent"), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyRequestedCredsRequestedPredSchema),
)
trace = fields.Bool(
description="Whether to trace event (default false)",
required=False,
example=False,
)
| 27.770393
| 85
| 0.645888
|
9f237a40c972eec607f4897b4fb4d7aa5bccd058
| 3,144
|
py
|
Python
|
tests/test_data_date_ranges.py
|
ksilo/LiuAlgoTrader
|
90b3ffdf4fd61adf37880e7b01ca4137a013f79c
|
[
"MIT"
] | null | null | null |
tests/test_data_date_ranges.py
|
ksilo/LiuAlgoTrader
|
90b3ffdf4fd61adf37880e7b01ca4137a013f79c
|
[
"MIT"
] | null | null | null |
tests/test_data_date_ranges.py
|
ksilo/LiuAlgoTrader
|
90b3ffdf4fd61adf37880e7b01ca4137a013f79c
|
[
"MIT"
] | null | null | null |
import time
from datetime import date
import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
from liualgotrader.common.types import DataConnectorType, TimeScale
from liualgotrader.data.data_factory import data_loader_factory
def test_alpaca_num_trading_day():
print("test_alpaca_num_trading_days_positive")
alpaca_dl = data_loader_factory(DataConnectorType.alpaca)
if (
alpaca_dl.num_trading_days(
symbol="AAPL", start="2022-01-06", end="2022-01-07"
)
!= 2
):
raise AssertionError("expected 2")
if (
alpaca_dl.num_trading_days(
symbol="aapl", start="2022-01-06", end="2022-01-07"
)
!= 2
):
raise AssertionError("expected 2")
if (
alpaca_dl.num_trading_days(
symbol="AAPL", start="2022-01-01", end="2022-01-07"
)
!= 5
):
raise AssertionError("expected 5")
if (
alpaca_dl.num_trading_days(
symbol="AAPL", start="2021-12-28", end="2022-01-07"
)
!= 9
):
raise AssertionError("expected 9")
if (
alpaca_dl.num_trading_days(
symbol="AAPL", start="2021-07-01", end="2021-07-07"
)
!= 4
):
raise AssertionError("expected 4")
if (
alpaca_dl.num_trading_days(
symbol="BTCUSD", start="2021-07-01", end="2021-07-07"
)
!= 7
):
raise AssertionError("BTCUSD - expected 7")
if (
alpaca_dl.num_trading_days(
symbol="AAPL", start="2022-08-06", end="2022-01-07"
)
!= 0
):
raise AssertionError("expected 0")
return True
def test_alpaca_concurrency_ranges_min():
print("test_alpaca_concurrency_ranges_min")
alpaca_dl = data_loader_factory(DataConnectorType.alpaca)
t = time.time()
if (
len(
alpaca_dl.data_concurrency_ranges(
symbol="AAPL",
start="2021-11-10",
end="2022-01-07",
scale=TimeScale.minute,
)
)
!= 5
):
raise AssertionError("expected 5")
print(time.time() - t)
t = time.time()
if (
len(
alpaca_dl.data_concurrency_ranges(
symbol="AAPL",
start="2022-01-01",
end="2022-01-07",
scale=TimeScale.minute,
)
)
!= 2
):
raise AssertionError("expected 2")
print(time.time() - t)
return True
@settings(deadline=None, max_examples=100)
@given(
start=st.dates(min_value=date(2018, 1, 1), max_value=date(2022, 1, 1)),
end=st.dates(min_value=date(2018, 1, 1), max_value=date(2022, 1, 1)),
)
def test_hpy_alpaca_concurrency_ranges_min(start: date, end: date):
alpaca_dl = data_loader_factory(DataConnectorType.alpaca)
t = time.time()
r = alpaca_dl.data_concurrency_ranges(
symbol="AAPL", start=start, end=end, scale=TimeScale.minute
)
duration = time.time() - t
print(f"duration={duration}, len={len(r)} -> {r}")
| 24.952381
| 75
| 0.573791
|
1f4fe594d257e9b74502c8009498b12da3d52f2b
| 17,705
|
py
|
Python
|
gpMgmt/bin/ext/yaml/representer.py
|
wapache-org/greenplum-gpdb
|
79e2bd251c1d27054846f630acd52e7903854829
|
[
"PostgreSQL",
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/yaml/lib/yaml/representer.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/yaml/lib/yaml/representer.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 278
|
2015-09-21T19:15:06.000Z
|
2018-08-31T00:36:51.000Z
|
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
try:
set
except NameError:
from sets import Set as set
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
| 36.206544
| 91
| 0.62118
|
e1f8fdcc0553fca3069503f9a5ff7dbd08a9121c
| 721
|
py
|
Python
|
search.py
|
rajat19/bluecord
|
623bcd4eb8a5e66f2c9c5ff8932dfed567f353cc
|
[
"MIT"
] | 1
|
2021-07-09T12:09:42.000Z
|
2021-07-09T12:09:42.000Z
|
search.py
|
rajat19/bluecord
|
623bcd4eb8a5e66f2c9c5ff8932dfed567f353cc
|
[
"MIT"
] | null | null | null |
search.py
|
rajat19/bluecord
|
623bcd4eb8a5e66f2c9c5ff8932dfed567f353cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = 'Rajat Srivastava'
import pprint
import settings as my_settings
from googleapiclient.discovery import build
def search_main(query):
service = build("customsearch", "v1",
developerKey=my_settings.GOOGLE_API_KEY)
result = service.cse().list(
q=query,
cx=my_settings.GOOGLE_CSE_KEY,
).execute()
# print("response", result)
try:
items = result["items"]
top_five_links = []
for i in items:
if(len(top_five_links) < 5):
top_five_links.append(i["link"])
pprint.pprint(result["items"])
# print(top_five_links)
return top_five_links
except:
return
| 25.75
| 60
| 0.613037
|
ed11e81faff7c4638056f1f14114800b1af32135
| 1,640
|
py
|
Python
|
commands_execution.py
|
rahmaevao/remote_develop
|
104ebd28ec7d2e530582324dca0eef33fe5f44a6
|
[
"CC0-1.0"
] | null | null | null |
commands_execution.py
|
rahmaevao/remote_develop
|
104ebd28ec7d2e530582324dca0eef33fe5f44a6
|
[
"CC0-1.0"
] | 2
|
2020-09-01T20:38:14.000Z
|
2020-09-02T06:07:51.000Z
|
commands_execution.py
|
rahmaevao/remote_develop
|
104ebd28ec7d2e530582324dca0eef33fe5f44a6
|
[
"CC0-1.0"
] | null | null | null |
"""
This script launches executable files of the remote machine, with the transfer of graphics via ssh.
For working you must pass the label command to the script as an argument.
"""
import subprocess
from constants import USER_AND_ADDRESS, PORT, command_groups
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('Label', nargs='?')
namespace = parser.parse_args()
# This magic string will spoil the keys every time.
# See https://superuser.com/questions/806637/xauth-not-creating-xauthority-file/807112#807112.
if namespace.Label:
for this_command_group in command_groups:
if namespace.Label == this_command_group.label:
if this_command_group.gui_flag:
header = 'ssh -X ' + USER_AND_ADDRESS + ' -p ' + PORT + " '" + \
'xauth list $DISPLAY > temp_list_of_xauth.txt; ' + \
'while IFS= read -r line; do export THIS_AUTH=$line; done < temp_list_of_xauth.txt; ' + \
'rm temp_list_of_xauth.txt; ' + \
'sudo xauth add $THIS_AUTH; '
else:
header = 'ssh ' + USER_AND_ADDRESS + ' -p ' + PORT + " '"
for this_command in this_command_group.commands:
header += this_command + '; '
header += "'"
subprocess.call(header, shell=True)
sys.exit(0)
print('😕 Unknown command line argument')
sys.exit(1)
print('😠 Use command line arguments')
sys.exit(2)
| 37.272727
| 113
| 0.583537
|
89ca3a8c60e2b380d17c25b9ef89cd073f59ef24
| 6,086
|
py
|
Python
|
colossalai/nn/optimizer/fused_adam.py
|
SMesForoush/ColossalAI
|
104cbbb313348e04cf83bda9f2dbfbe3b0f369fb
|
[
"Apache-2.0"
] | null | null | null |
colossalai/nn/optimizer/fused_adam.py
|
SMesForoush/ColossalAI
|
104cbbb313348e04cf83bda9f2dbfbe3b0f369fb
|
[
"Apache-2.0"
] | null | null | null |
colossalai/nn/optimizer/fused_adam.py
|
SMesForoush/ColossalAI
|
104cbbb313348e04cf83bda9f2dbfbe3b0f369fb
|
[
"Apache-2.0"
] | null | null | null |
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_adam.py
import torch
from colossalai.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier
@OPTIMIZERS.register_module
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires ColossalAI to be installed via
``pip install .``.
This version of fused Adam implements 2 fusions.
* Fusion of the Adam update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`colossalai.nn.optimizer.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
or ``torch.optim.Adam`` with ``adamw_mode=False``
:class:`colossalai.nn.optimizer.FusedAdam` may be used with or without Amp.
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
adamw_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
adamw_mode=True,
weight_decay=0.,
amsgrad=False,
set_grad_none=True):
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay)
super(FusedAdam, self).__init__(params, defaults)
self.adamw_mode = 1 if adamw_mode else 0
self.set_grad_none = set_grad_none
if multi_tensor_applier.available:
import colossal_C
# Skip buffer
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_adam = colossal_C.multi_tensor_adam
else:
raise RuntimeError('FusedAdam requires cuda extensions')
def zero_grad(self, set_to_none=False):
if set_to_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdam, self).zero_grad()
def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
"""
if any(p is not None for p in [grads, output_params, scale, grad_norms]):
raise RuntimeError(
'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.'
)
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_l, p_l, m_l, v_l = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype not in [torch.float16, torch.float32]:
raise RuntimeError('FusedAdam only support fp16 and fp32.')
g_l.append(p.grad.data)
p_l.append(p.data)
m_l.append(state['exp_avg'])
v_l.append(state['exp_avg_sq'])
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_l, p_l, m_l, v_l],
group['lr'], beta1, beta2, group['eps'], group['step'], self.adamw_mode,
bias_correction, group['weight_decay'])
return loss
| 42.859155
| 145
| 0.605652
|
ce33c99523fec35706a4ea9375cc106966bd9750
| 115,071
|
py
|
Python
|
practical-neo4j-apress/practicalneo4j-python-master/virtual/env/lib/python2.7/site-packages/py2neo/neo4j.py
|
dynamicdeploy/networkx
|
6a57d49503295f4e5b8b5d728aa142e2efd5188a
|
[
"Apache-2.0"
] | 4
|
2015-07-05T03:23:04.000Z
|
2021-03-21T16:42:59.000Z
|
practical-neo4j-apress/practicalneo4j-python-master/virtual/env/lib/python2.7/site-packages/py2neo/neo4j.py
|
dynamicdeploy/networkx
|
6a57d49503295f4e5b8b5d728aa142e2efd5188a
|
[
"Apache-2.0"
] | null | null | null |
practical-neo4j-apress/practicalneo4j-python-master/virtual/env/lib/python2.7/site-packages/py2neo/neo4j.py
|
dynamicdeploy/networkx
|
6a57d49503295f4e5b8b5d728aa142e2efd5188a
|
[
"Apache-2.0"
] | 1
|
2021-10-08T03:41:54.000Z
|
2021-10-08T03:41:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The neo4j module provides the main `Neo4j <http://neo4j.org/>`_ client
functionality and will be the starting point for most applications. The main
classes provided are:
- :py:class:`GraphDatabaseService` - an instance of a Neo4j database server,
providing a number of graph-global methods for handling nodes and
relationships
- :py:class:`Node` - a representation of a database node
- :py:class:`Relationship` - a representation of a relationship between two
database nodes
- :py:class:`Path` - a sequence of alternating nodes and relationships
- :py:class:`Index` - a index of key-value pairs for storing links to nodes or
relationships
- :py:class:`ReadBatch` - a batch of read requests to be carried out within a
single transaction
- :py:class:`WriteBatch` - a batch of write requests to be carried out within
a single transaction
"""
from __future__ import division, unicode_literals
from collections import namedtuple
from datetime import datetime
import base64
import json
import logging
import re
from .packages.httpstream import (http,
Resource as _Resource,
ResourceTemplate as _ResourceTemplate,
ClientError as _ClientError,
ServerError as _ServerError)
from .packages.jsonstream import assembled, grouped
from .packages.httpstream.numbers import CREATED, NOT_FOUND, CONFLICT
from .packages.urimagic import (Authority, URI, URITemplate,
Query, percent_encode)
from . import __version__
from .exceptions import *
from .util import *
DEFAULT_SCHEME = "http"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 7474
DEFAULT_NETLOC = "{0}:{1}".format(DEFAULT_HOST, DEFAULT_PORT)
DEFAULT_URI = "{0}://{1}".format(DEFAULT_SCHEME, DEFAULT_NETLOC)
PRODUCT = ("py2neo", __version__)
NON_ALPHA_NUM = re.compile("[^0-9A-Za-z_]")
SIMPLE_NAME = re.compile(r"[A-Za-z_][0-9A-Za-z_]*")
http.default_encoding = "UTF-8"
batch_log = logging.getLogger(__name__ + ".batch")
cypher_log = logging.getLogger(__name__ + ".cypher")
_headers = {
None: [("X-Stream", "true")]
}
_http_rewrites = {}
def _add_header(key, value, host_port=None):
""" Add an HTTP header to be sent with all requests if no `host_port`
is provided or only to those matching the value supplied otherwise.
"""
if host_port in _headers:
_headers[host_port].append((key, value))
else:
_headers[host_port] = [(key, value)]
def _get_headers(host_port):
"""Fetch all HTTP headers relevant to the `host_port` provided.
"""
uri_headers = {}
for n, headers in _headers.items():
if n is None or n == host_port:
uri_headers.update(headers)
return uri_headers
def authenticate(host_port, user_name, password):
""" Set HTTP basic authentication values for specified `host_port`. The
code below shows a simple example::
# set up authentication parameters
neo4j.authenticate("camelot:7474", "arthur", "excalibur")
# connect to authenticated graph database
graph_db = neo4j.GraphDatabaseService("http://camelot:7474/db/data/")
Note: a `netloc` can be either a server name or a server name and port
number but must match exactly that used within the GraphDatabaseService
URI.
:param host_port: the host and optional port requiring authentication
(e.g. "bigserver", "camelot:7474")
:param user_name: the user name to authenticate as
:param password: the password
"""
credentials = (user_name + ":" + password).encode("UTF-8")
value = "Basic " + base64.b64encode(credentials).decode("ASCII")
_add_header("Authorization", value, host_port=host_port)
def familiar(*resources):
""" Return :py:const:`True` if all resources share a common service root.
:param resources:
:return:
"""
if len(resources) < 2:
return True
return all(_.service_root == resources[0].service_root for _ in resources)
def rewrite(from_scheme_host_port, to_scheme_host_port):
""" Automatically rewrite all URIs directed to the scheme, host and port
specified in `from_scheme_host_port` to that specified in
`to_scheme_host_port`.
As an example::
# implicitly convert all URIs beginning with <http://localhost:7474>
# to instead use <https://dbserver:9999>
neo4j.rewrite(("http", "localhost", 7474), ("https", "dbserver", 9999))
If `to_scheme_host_port` is :py:const:`None` then any rewrite rule for
`from_scheme_host_port` is removed.
This facility is primarily intended for use by database servers behind
proxies which are unaware of their externally visible network address.
"""
global _http_rewrites
if to_scheme_host_port is None:
try:
del _http_rewrites[from_scheme_host_port]
except KeyError:
pass
else:
_http_rewrites[from_scheme_host_port] = to_scheme_host_port
def _hydrated(data, hydration_cache=None):
""" Takes input iterable, assembles and resolves any Resource objects,
returning the result.
"""
if hydration_cache is None:
hydration_cache = {}
if isinstance(data, dict):
if has_all(data, Relationship.signature):
self_uri = data["self"]
try:
return hydration_cache[self_uri]
except KeyError:
hydrated = Relationship._hydrated(data)
hydration_cache[self_uri] = hydrated
return hydrated
elif has_all(data, Node.signature):
self_uri = data["self"]
try:
return hydration_cache[self_uri]
except KeyError:
hydrated = Node._hydrated(data)
hydration_cache[self_uri] = hydrated
return hydrated
elif has_all(data, Path.signature):
return Path._hydrated(data)
else:
raise ValueError("Cannot determine object type", data)
elif is_collection(data):
return type(data)([_hydrated(datum, hydration_cache) for datum in data])
else:
return data
def _node(*args, **kwargs):
""" Cast the arguments provided to a :py:class:`neo4j.Node`. The following
general combinations are possible:
- ``node()``
- ``node(node_instance)``
- ``node(property_dict)``
- ``node(**properties)``
If :py:const:`None` is passed as the only argument, :py:const:`None` is
returned instead of a ``Node`` instance.
Examples::
node()
node(Node("http://localhost:7474/db/data/node/1"))
node({"name": "Alice"})
node(name="Alice")
Other representations::
{"name": "Alice"}
"""
if len(args) == 0:
return Node.abstract(**kwargs)
elif len(args) == 1 and not kwargs:
arg = args[0]
if arg is None:
return None
elif isinstance(arg, Node):
return arg
elif isinstance(arg, dict):
return Node.abstract(**arg)
else:
raise TypeError("Cannot cast node from {0}".format(arg))
else:
raise TypeError("Cannot cast node from {0}".format((args, kwargs)))
def _rel(*args, **kwargs):
""" Cast the arguments provided to a :py:class:`neo4j.Relationship`. The
following general combinations are possible:
- ``rel(relationship_instance)``
- ``rel((start_node, type, end_node))``
- ``rel((start_node, type, end_node, properties))``
- ``rel((start_node, (type, properties), end_node))``
- ``rel(start_node, (type, properties), end_node)``
- ``rel(start_node, type, end_node, properties)``
- ``rel(start_node, type, end_node, **properties)``
Examples::
rel(Relationship("http://localhost:7474/db/data/relationship/1"))
rel((alice, "KNOWS", bob))
rel((alice, "KNOWS", bob, {"since": 1999}))
rel((alice, ("KNOWS", {"since": 1999}), bob))
rel(alice, ("KNOWS", {"since": 1999}), bob)
rel(alice, "KNOWS", bob, {"since": 1999})
rel(alice, "KNOWS", bob, since=1999)
Other representations::
(alice, "KNOWS", bob)
(alice, "KNOWS", bob, {"since": 1999})
(alice, ("KNOWS", {"since": 1999}), bob)
"""
if len(args) == 1 and not kwargs:
arg = args[0]
if isinstance(arg, Relationship):
return arg
elif isinstance(arg, tuple):
if len(arg) == 3:
return _UnboundRelationship.cast(arg[1]).bind(arg[0], arg[2])
elif len(arg) == 4:
return Relationship.abstract(arg[0], arg[1], arg[2], **arg[3])
else:
raise TypeError("Cannot cast relationship from {0}".format(arg))
else:
raise TypeError("Cannot cast relationship from {0}".format(arg))
elif len(args) == 3:
rel = _UnboundRelationship.cast(args[1])
rel._properties.update(kwargs)
return rel.bind(args[0], args[2])
elif len(args) == 4:
props = args[3]
props.update(kwargs)
return Relationship.abstract(*args[0:3], **props)
else:
raise TypeError("Cannot cast relationship from {0}".format((args, kwargs)))
class Resource(object):
""" Basic RESTful web resource with JSON metadata. Wraps an
`httpstream.Resource`.
"""
def __init__(self, uri):
uri = URI(uri)
scheme_host_port = (uri.scheme, uri.host, uri.port)
if scheme_host_port in _http_rewrites:
scheme_host_port = _http_rewrites[scheme_host_port]
# This is fine - it's all my code anyway...
uri._URI__set_scheme(scheme_host_port[0])
uri._URI__set_authority("{0}:{1}".format(scheme_host_port[1],
scheme_host_port[2]))
if uri.user_info:
authenticate(uri.host_port, *uri.user_info.partition(":")[0::2])
self._resource = _Resource(uri)
self._metadata = None
self._subresources = {}
self._headers = _get_headers(self.__uri__.host_port)
self._product = PRODUCT
def __repr__(self):
""" Return a valid Python representation of this object.
"""
return repr(self._resource)
def __eq__(self, other):
""" Determine equality of two objects based on URI.
"""
return self._resource == other._resource
def __ne__(self, other):
""" Determine inequality of two objects based on URI.
"""
return self._resource != other._resource
@property
def __uri__(self):
return self._resource.__uri__
@property
def __metadata__(self):
if not self._metadata:
self.refresh()
return self._metadata
@property
def is_abstract(self):
""" Indicates whether this entity is abstract (i.e. not bound
to a concrete entity within the database)
"""
return not bool(self.__uri__)
@property
def service_root(self):
return ServiceRoot.get_instance(URI(self._resource).resolve("/"))
@property
def graph_db(self):
return self.service_root.graph_db
def refresh(self):
""" Refresh resource metadata.
"""
if not self.is_abstract:
self._metadata = ResourceMetadata(self._get().content)
def _get(self):
try:
return self._resource.get(headers=self._headers,
product=self._product)
except _ClientError as e:
raise ClientError(e)
except _ServerError as e:
raise ServerError(e)
def _put(self, body=None):
try:
return self._resource.put(body=body,
headers=self._headers,
product=self._product)
except _ClientError as e:
raise ClientError(e)
except _ServerError as e:
raise ServerError(e)
def _post(self, body=None):
try:
return self._resource.post(body=body,
headers=self._headers,
product=self._product)
except _ClientError as e:
raise ClientError(e)
except _ServerError as e:
raise ServerError(e)
def _delete(self):
try:
return self._resource.delete(headers=self._headers,
product=self._product)
except _ClientError as e:
raise ClientError(e)
except _ServerError as e:
raise ServerError(e)
def _subresource(self, key, cls=None):
if key not in self._subresources:
try:
uri = URI(self.__metadata__[key])
except KeyError:
raise KeyError("Key {0} not found in resource "
"metadata".format(repr(key)), self.__metadata__)
if not cls:
cls = Resource
self._subresources[key] = cls(uri)
return self._subresources[key]
class ResourceMetadata(object):
def __init__(self, metadata):
self._metadata = dict(metadata)
def __contains__(self, key):
return key in self._metadata
def __getitem__(self, key):
return self._metadata[key]
def __iter__(self):
return iter(self._metadata.items())
class ResourceTemplate(_ResourceTemplate):
def expand(self, **values):
return Resource(_ResourceTemplate.expand(self, **values).uri)
class Cacheable(object):
_instances = {}
@classmethod
def get_instance(cls, uri):
""" Fetch a cached instance if one is available, otherwise create,
cache and return a new instance.
:param uri: URI of the cached resource
:return: a resource instance
"""
if uri not in cls._instances:
cls._instances[uri] = cls(uri)
return cls._instances[uri]
class ServiceRoot(Cacheable, Resource):
""" Neo4j REST API service root resource.
"""
def __init__(self, uri=None):
Resource.__init__(self, uri or DEFAULT_URI)
self._load2neo = None
self._load2neo_checked = False
@property
def graph_db(self):
return GraphDatabaseService.get_instance(self.__metadata__["data"])
@property
def load2neo(self):
if not self._load2neo_checked:
self._load2neo = Resource(URI(self).resolve("/load2neo"))
try:
self._load2neo.refresh()
except ClientError:
self._load2neo = None
finally:
self._load2neo_checked = True
if self._load2neo is None:
raise NotImplementedError("Load2neo extension not available")
else:
return self._load2neo
@property
def monitor(self):
manager = Resource(self.__metadata__["management"])
return Monitor(manager.__metadata__["services"]["monitor"])
class Monitor(Cacheable, Resource):
def __init__(self, uri=None):
if uri is None:
uri = ServiceRoot().monitor.__uri__
Resource.__init__(self, uri)
def fetch_latest_stats(self):
""" Fetch the latest server statistics as a list of 2-tuples, each
holding a `datetime` object and a named tuple of node, relationship and
property counts.
"""
counts = namedtuple("Stats", ("node_count",
"relationship_count",
"property_count"))
uri = self.__metadata__["resources"]["latest_data"]
latest_data = Resource(uri)._get().content
timestamps = latest_data["timestamps"]
data = latest_data["data"]
data = zip(
(datetime.fromtimestamp(t) for t in timestamps),
(counts(*x) for x in zip(
(numberise(n) for n in data["node_count"]),
(numberise(n) for n in data["relationship_count"]),
(numberise(n) for n in data["property_count"]),
)),
)
return data
class GraphDatabaseService(Cacheable, Resource):
""" An instance of a `Neo4j <http://neo4j.org/>`_ database identified by
its base URI. Generally speaking, this is the only URI which a system
attaching to this service should need to be directly aware of; all further
entity URIs will be discovered automatically from within response content
when possible (see `Hypermedia <http://en.wikipedia.org/wiki/Hypermedia>`_)
or will be derived from existing URIs.
The following code illustrates how to connect to a database server and
display its version number::
from py2neo import neo4j
graph_db = neo4j.GraphDatabaseService()
print(graph_db.neo4j_version)
:param uri: the base URI of the database (defaults to <http://localhost:7474/db/data/>)
"""
def __init__(self, uri=None):
if uri is None:
uri = ServiceRoot().graph_db.__uri__
Resource.__init__(self, uri)
self._indexes = {Node: {}, Relationship: {}}
def __len__(self):
""" Return the size of this graph (i.e. the number of relationships).
"""
return self.size
@property
def _load2neo(self):
return self.service_root.load2neo
def clear(self):
""" Clear all nodes and relationships from the graph.
.. warning::
This method will permanently remove **all** nodes and relationships
from the graph and cannot be undone.
"""
batch = WriteBatch(self)
batch.append_cypher("START r=rel(*) DELETE r")
batch.append_cypher("START n=node(*) DELETE n")
batch.run()
def create(self, *abstracts):
""" Create multiple nodes and/or relationships as part of a single
batch.
The abstracts provided may use any accepted notation, as described in
the section on py2neo fundamentals.
For a node, simply pass a dictionary of properties; for a relationship, pass a tuple of
(start, type, end) or (start, type, end, data) where start and end
may be :py:class:`Node` instances or zero-based integral references
to other node entities within this batch::
# create a single node
alice, = graph_db.create({"name": "Alice"})
# create multiple nodes
people = graph_db.create(
{"name": "Alice", "age": 33}, {"name": "Bob", "age": 44},
{"name": "Carol", "age": 55}, {"name": "Dave", "age": 66},
)
# create two nodes with a connecting relationship
alice, bob, rel = graph_db.create(
{"name": "Alice"}, {"name": "Bob"},
(0, "KNOWS", 1, {"since": 2006})
)
# create a node plus a relationship to pre-existing node
ref_node = graph_db.get_reference_node()
alice, rel = graph_db.create(
{"name": "Alice"}, (ref_node, "PERSON", 0)
)
:return: list of :py:class:`Node` and/or :py:class:`Relationship`
instances
.. warning::
This method will *always* return a list, even when only creating
a single node or relationship. To automatically unpack a list
containing a single item, append a trailing comma to the variable
name on the left of the assignment operation.
"""
if not abstracts:
return []
batch = WriteBatch(self)
for abstract in abstracts:
batch.create(abstract)
return batch.submit()
def delete(self, *entities):
""" Delete multiple nodes and/or relationships as part of a single
batch.
"""
if not entities:
return
batch = WriteBatch(self)
for entity in entities:
if entity is not None:
batch.delete(entity)
batch.run()
def find(self, label, property_key=None, property_value=None):
""" Iterate through a set of labelled nodes, optionally filtering
by property key and value
"""
uri = URI(self).resolve("/".join(["label", label, "nodes"]))
if property_key:
uri = uri.resolve("?" + percent_encode({property_key: json.dumps(property_value, ensure_ascii=False)}))
try:
for i, result in grouped(Resource(uri)._get()):
yield _hydrated(assembled(result))
except ClientError as err:
if err.status_code != NOT_FOUND:
raise
def get_properties(self, *entities):
""" Fetch properties for multiple nodes and/or relationships as part
of a single batch; returns a list of dictionaries in the same order
as the supplied entities.
"""
if not entities:
return []
if len(entities) == 1:
return [entities[0].get_properties()]
batch = BatchRequestList(self)
for entity in entities:
batch.append_get(batch._uri_for(entity, "properties"))
responses = batch._execute()
try:
return [BatchResponse(rs, raw=True).body or {}
for rs in responses.json]
finally:
responses.close()
def load_geoff(self, geoff):
""" Load Geoff data via the load2neo extension.
::
>>> from py2neo import neo4j
>>> graph_db = neo4j.GraphDatabaseService()
>>> graph_db.load_geoff("(alice)<-[:KNOWS]->(bob)")
[{u'alice': Node('http://localhost:7474/db/data/node/1'),
u'bob': Node('http://localhost:7474/db/data/node/2')}]
:param geoff: geoff data to load
:return: list of node mappings
"""
loader = Resource(self._load2neo.__metadata__["geoff_loader"])
return [
dict((key, self.node(value)) for key, value in line[0].items())
for line in loader._post(geoff).tsj
]
@property
def load2neo_version(self):
""" The load2neo extension version, if available.
"""
return version_tuple(self._load2neo.__metadata__["load2neo_version"])
def match(self, start_node=None, rel_type=None, end_node=None,
bidirectional=False, limit=None):
""" Iterate through all relationships matching specified criteria.
Examples are as follows::
# all relationships from the graph database
# ()-[r]-()
rels = list(graph_db.match())
# all relationships outgoing from `alice`
# (alice)-[r]->()
rels = list(graph_db.match(start_node=alice))
# all relationships incoming to `alice`
# ()-[r]->(alice)
rels = list(graph_db.match(end_node=alice))
# all relationships attached to `alice`, regardless of direction
# (alice)-[r]-()
rels = list(graph_db.match(start_node=alice, bidirectional=True))
# all relationships from `alice` to `bob`
# (alice)-[r]->(bob)
rels = list(graph_db.match(start_node=alice, end_node=bob))
# all relationships outgoing from `alice` of type "FRIEND"
# (alice)-[r:FRIEND]->()
rels = list(graph_db.match(start_node=alice, rel_type="FRIEND"))
# up to three relationships outgoing from `alice` of type "FRIEND"
# (alice)-[r:FRIEND]->()
rels = list(graph_db.match(start_node=alice, rel_type="FRIEND", limit=3))
:param start_node: concrete start :py:class:`Node` to match or
:py:const:`None` if any
:param rel_type: type of relationships to match or :py:const:`None` if
any
:param end_node: concrete end :py:class:`Node` to match or
:py:const:`None` if any
:param bidirectional: :py:const:`True` if reversed relationships should
also be included
:param limit: maximum number of relationships to match or
:py:const:`None` if no limit
:return: matching relationships
:rtype: generator
"""
if start_node is None and end_node is None:
query = "START a=node(*)"
params = {}
elif end_node is None:
query = "START a=node({A})"
start_node = _cast(start_node, Node, abstract=False)
params = {"A": start_node._id}
elif start_node is None:
query = "START b=node({B})"
end_node = _cast(end_node, Node, abstract=False)
params = {"B": end_node._id}
else:
query = "START a=node({A}),b=node({B})"
start_node = _cast(start_node, Node, abstract=False)
end_node = _cast(end_node, Node, abstract=False)
params = {"A": start_node._id, "B": end_node._id}
if rel_type is None:
rel_clause = ""
elif is_collection(rel_type):
if self.neo4j_version >= (2, 0, 0):
# yuk, version sniffing :-(
separator = "|:"
else:
separator = "|"
rel_clause = ":" + separator.join("`{0}`".format(_)
for _ in rel_type)
else:
rel_clause = ":`{0}`".format(rel_type)
if bidirectional:
query += " MATCH (a)-[r" + rel_clause + "]-(b) RETURN r"
else:
query += " MATCH (a)-[r" + rel_clause + "]->(b) RETURN r"
if limit is not None:
query += " LIMIT {0}".format(int(limit))
results = CypherQuery(self, query).stream(**params)
try:
for result in results:
yield result[0]
finally:
results.close()
def match_one(self, start_node=None, rel_type=None, end_node=None,
bidirectional=False):
""" Fetch a single relationship matching specified criteria.
:param start_node: concrete start :py:class:`Node` to match or
:py:const:`None` if any
:param rel_type: type of relationships to match or :py:const:`None` if
any
:param end_node: concrete end :py:class:`Node` to match or
:py:const:`None` if any
:param bidirectional: :py:const:`True` if reversed relationships should
also be included
:return: a matching :py:class:`Relationship` or :py:const:`None`
.. seealso::
:py:func:`GraphDatabaseService.match <py2neo.neo4j.GraphDatabaseService.match>`
"""
rels = list(self.match(start_node, rel_type, end_node,
bidirectional, 1))
if rels:
return rels[0]
else:
return None
@property
def neo4j_version(self):
""" The database software version as a 4-tuple of (``int``, ``int``,
``int``, ``str``).
"""
return version_tuple(self.__metadata__["neo4j_version"])
def node(self, id_):
""" Fetch a node by ID.
"""
return Node(URI(self).resolve("node/" + str(id_)))
@property
def node_labels(self):
""" The set of node labels currently defined within the graph.
"""
resource = Resource(URI(self).resolve("labels"))
try:
return set(_hydrated(assembled(resource._get())))
except ClientError as err:
if err.status_code == NOT_FOUND:
raise NotImplementedError("Node labels not available for this "
"Neo4j server version")
else:
raise
@property
def order(self):
""" The number of nodes in this graph.
"""
return CypherQuery(self, "START n=node(*) "
"RETURN count(n)").execute_one()
def relationship(self, id_):
""" Fetch a relationship by ID.
"""
return Relationship(URI(self).resolve("relationship/" + str(id_)))
@property
def relationship_types(self):
""" The set of relationship types currently defined within the graph.
"""
resource = self._subresource("relationship_types")
return set(_hydrated(assembled(resource._get())))
@property
def schema(self):
""" The Schema resource for this graph.
.. seealso::
:py:func:`Schema <py2neo.neo4j.Schema>`
"""
return Schema.get_instance(URI(self).resolve("schema"))
@property
def size(self):
""" The number of relationships in this graph.
"""
return CypherQuery(self, "START r=rel(*) "
"RETURN count(r)").execute_one()
@property
def supports_foreach_pipe(self):
""" Indicates whether the server supports pipe syntax for FOREACH.
"""
return self.neo4j_version >= (2, 0)
@property
def supports_index_uniqueness_modes(self):
""" Indicates whether the server supports `get_or_create` and
`create_or_fail` uniqueness modes on batched index methods.
"""
return self.neo4j_version >= (1, 9)
@property
def supports_node_labels(self):
""" Indicates whether the server supports node labels.
"""
return self.neo4j_version >= (2, 0)
@property
def supports_optional_match(self):
""" Indicates whether the server supports Cypher OPTIONAL MATCH
clauses.
"""
return self.neo4j_version >= (2, 0)
@property
def supports_schema_indexes(self):
""" Indicates whether the server supports schema indexes.
"""
return self.neo4j_version >= (2, 0)
@property
def supports_cypher_transactions(self):
""" Indicates whether the server supports explicit Cypher transactions.
"""
return "transaction" in self.__metadata__
def _index_manager(self, content_type):
""" Fetch the index management resource for the given `content_type`.
:param content_type:
:return:
"""
if content_type is Node:
uri = self.__metadata__["node_index"]
elif content_type is Relationship:
uri = self.__metadata__["relationship_index"]
else:
raise IndexTypeError(content_type.__class__.__name__)
return Resource(uri)
def get_indexes(self, content_type):
""" Fetch a dictionary of all available indexes of a given type.
:param content_type: either :py:class:`neo4j.Node` or
:py:class:`neo4j.Relationship`
:return: a list of :py:class:`Index` instances of the specified type
"""
index_manager = self._index_manager(content_type)
index_index = index_manager._get().content
if index_index:
self._indexes[content_type] = dict(
(key, Index(content_type, value["template"]))
for key, value in index_index.items()
)
else:
self._indexes[content_type] = {}
return self._indexes[content_type]
def get_index(self, content_type, index_name):
""" Fetch a specific index from the current database, returning an
:py:class:`Index` instance. If an index with the supplied `name` and
content `type` does not exist, :py:const:`None` is returned.
:param content_type: either :py:class:`neo4j.Node` or
:py:class:`neo4j.Relationship`
:param index_name: the name of the required index
:return: an :py:class:`Index` instance or :py:const:`None`
.. seealso:: :py:func:`get_or_create_index`
.. seealso:: :py:class:`Index`
"""
if index_name not in self._indexes[content_type]:
self.get_indexes(content_type)
if index_name in self._indexes[content_type]:
return self._indexes[content_type][index_name]
else:
return None
def get_or_create_index(self, content_type, index_name, config=None):
""" Fetch a specific index from the current database, returning an
:py:class:`Index` instance. If an index with the supplied `name` and
content `type` does not exist, one is created with either the
default configuration or that supplied in `config`::
# get or create a node index called "People"
people = graph_db.get_or_create_index(neo4j.Node, "People")
# get or create a relationship index called "Friends"
friends = graph_db.get_or_create_index(neo4j.Relationship, "Friends")
:param content_type: either :py:class:`neo4j.Node` or
:py:class:`neo4j.Relationship`
:param index_name: the name of the required index
:return: an :py:class:`Index` instance
.. seealso:: :py:func:`get_index`
.. seealso:: :py:class:`Index`
"""
index = self.get_index(content_type, index_name)
if index:
return index
index_manager = self._index_manager(content_type)
rs = index_manager._post({"name": index_name, "config": config or {}})
index = Index(content_type, assembled(rs)["template"])
self._indexes[content_type].update({index_name: index})
return index
def delete_index(self, content_type, index_name):
""" Delete the entire index identified by the type and name supplied.
:param content_type: either :py:class:`neo4j.Node` or
:py:class:`neo4j.Relationship`
:param index_name: the name of the index to delete
:raise LookupError: if the specified index does not exist
"""
if index_name not in self._indexes[content_type]:
self.get_indexes(content_type)
if index_name in self._indexes[content_type]:
index = self._indexes[content_type][index_name]
index._delete()
del self._indexes[content_type][index_name]
else:
raise LookupError("Index not found")
def get_indexed_node(self, index_name, key, value):
""" Fetch the first node indexed with the specified details, returning
:py:const:`None` if none found.
:param index_name: the name of the required index
:param key: the index key
:param value: the index value
:return: a :py:class:`Node` instance
"""
index = self.get_index(Node, index_name)
if index:
nodes = index.get(key, value)
if nodes:
return nodes[0]
return None
def get_or_create_indexed_node(self, index_name, key, value, properties=None):
""" Fetch the first node indexed with the specified details, creating
and returning a new indexed node if none found.
:param index_name: the name of the required index
:param key: the index key
:param value: the index value
:param properties: properties for the new node, if one is created
(optional)
:return: a :py:class:`Node` instance
"""
index = self.get_or_create_index(Node, index_name)
return index.get_or_create(key, value, properties or {})
def get_indexed_relationship(self, index_name, key, value):
""" Fetch the first relationship indexed with the specified details,
returning :py:const:`None` if none found.
:param index_name: the name of the required index
:param key: the index key
:param value: the index value
:return: a :py:class:`Relationship` instance
"""
index = self.get_index(Relationship, index_name)
if index:
relationships = index.get(key, value)
if relationships:
return relationships[0]
return None
class CypherQuery(object):
""" A reusable Cypher query. To create a new query object, a graph and the
query text need to be supplied::
>>> from py2neo import neo4j
>>> graph_db = neo4j.GraphDatabaseService()
>>> query = neo4j.CypherQuery(graph_db, "CREATE (a) RETURN a")
"""
def __init__(self, graph_db, query):
self._cypher = Resource(graph_db.__metadata__["cypher"])
self._query = query
def __str__(self):
return self._query
@property
def string(self):
""" The text of the query.
"""
return self._query
def _execute(self, **params):
if __debug__:
cypher_log.debug("Query: " + repr(self._query))
if params:
cypher_log.debug("Params: " + repr(params))
try:
return self._cypher._post({
"query": self._query,
"params": dict(params or {}),
})
except ClientError as e:
if e.exception:
# A CustomCypherError is a dynamically created subclass of
# CypherError with the same name as the underlying server
# exception
CustomCypherError = type(str(e.exception), (CypherError,), {})
raise CustomCypherError(e)
else:
raise CypherError(e)
def run(self, **params):
""" Execute the query and discard any results.
:param params:
"""
self._execute(**params).close()
def execute(self, **params):
""" Execute the query and return the results.
:param params:
:return:
:rtype: :py:class:`CypherResults <py2neo.neo4j.CypherResults>`
"""
return CypherResults(self._execute(**params))
def execute_one(self, **params):
""" Execute the query and return the first value from the first row.
:param params:
:return:
"""
try:
return self.execute(**params).data[0][0]
except IndexError:
return None
def stream(self, **params):
""" Execute the query and return a result iterator.
:param params:
:return:
:rtype: :py:class:`IterableCypherResults <py2neo.neo4j.IterableCypherResults>`
"""
return IterableCypherResults(self._execute(**params))
class CypherResults(object):
""" A static set of results from a Cypher query.
"""
signature = ("columns", "data")
@classmethod
def _hydrated(cls, data, hydration_cache=None):
""" Takes assembled data...
"""
producer = RecordProducer(data["columns"])
return [
producer.produce(_hydrated(row, hydration_cache))
for row in data["data"]
]
def __init__(self, response):
content = response.json
self._columns = tuple(content["columns"])
self._producer = RecordProducer(self._columns)
self._data = [
self._producer.produce(_hydrated(row))
for row in content["data"]
]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def __len__(self):
return len(self._data)
def __getitem__(self, item):
return self._data[item]
@property
def columns(self):
""" Column names.
"""
return self._columns
@property
def data(self):
""" List of result records.
"""
return self._data
def __iter__(self):
return iter(self._data)
class IterableCypherResults(object):
""" An iterable set of results from a Cypher query.
::
query = graph_db.cypher.query("START n=node(*) RETURN n LIMIT 10")
for record in query.stream():
print record[0]
Each record returned is cast into a :py:class:`namedtuple` with names
derived from the resulting column names.
.. note ::
Results are available as returned from the server and are decoded
incrementally. This means that there is no need to wait for the
entire response to be received before processing can occur.
"""
def __init__(self, response):
self._response = response
self._redo_buffer = []
self._buffered = self._buffered_results()
self._columns = None
self._fetch_columns()
self._producer = RecordProducer(self._columns)
def _fetch_columns(self):
redo = []
section = []
for key, value in self._buffered:
if key and key[0] == "columns":
section.append((key, value))
else:
redo.append((key, value))
if key and key[0] == "data":
break
self._redo_buffer.extend(redo)
self._columns = tuple(assembled(section)["columns"])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def _buffered_results(self):
for result in self._response:
while self._redo_buffer:
yield self._redo_buffer.pop(0)
yield result
def __iter__(self):
hydration_cache = {}
for key, section in grouped(self._buffered):
if key[0] == "data":
for i, row in grouped(section):
yield self._producer.produce(_hydrated(assembled(row),
hydration_cache))
@property
def columns(self):
""" Column names.
"""
return self._columns
def close(self):
""" Close results and free resources.
"""
self._response.close()
class Schema(Cacheable, Resource):
def __init__(self, *args, **kwargs):
Resource.__init__(self, *args, **kwargs)
if not self.service_root.graph_db.supports_schema_indexes:
raise NotImplementedError("Schema index support requires "
"version 2.0 or above")
self._index_template = \
URITemplate(str(URI(self)) + "/index/{label}")
self._index_key_template = \
URITemplate(str(URI(self)) + "/index/{label}/{property_key}")
def get_indexed_property_keys(self, label):
""" Fetch a list of indexed property keys for a label.
:param label:
:return:
"""
if not label:
raise ValueError("Label cannot be empty")
resource = Resource(self._index_template.expand(label=label))
try:
response = resource._get()
except ClientError as err:
if err.status_code == NOT_FOUND:
return []
else:
raise
else:
return [
indexed["property_keys"][0]
for indexed in response.json
]
def create_index(self, label, property_key):
""" Index a property key for a label.
:param label:
:param property_key:
:return:
"""
if not label or not property_key:
raise ValueError("Neither label nor property key can be empty")
resource = Resource(self._index_template.expand(label=label))
property_key = bytearray(property_key, "utf-8").decode("utf-8")
try:
resource._post({"property_keys": [property_key]})
except ClientError as err:
if err.status_code == CONFLICT:
raise ValueError("Property key already indexed")
else:
raise
def drop_index(self, label, property_key):
""" Remove label index for a given property key.
:param label:
:param property_key:
:return:
"""
if not label or not property_key:
raise ValueError("Neither label nor property key can be empty")
uri = self._index_key_template.expand(label=label,
property_key=property_key)
resource = Resource(uri)
try:
resource._delete()
except ClientError as err:
if err.status_code == NOT_FOUND:
raise LookupError("Property key not found")
else:
raise
class _Entity(Resource):
""" Base class from which :py:class:`Node` and :py:class:`Relationship`
classes inherit. Provides property management functionality by defining
standard Python container handler methods.
"""
def __init__(self, uri):
Resource.__init__(self, uri)
self._properties = {}
def __contains__(self, key):
return key in self.get_properties()
def __delitem__(self, key):
self.update_properties({key: None})
def __getitem__(self, key):
return self.get_properties().get(key, None)
def __iter__(self):
return self.get_properties().__iter__()
def __len__(self):
return len(self.get_properties())
def __nonzero__(self):
return True
def __setitem__(self, key, value):
self.update_properties({key: value})
@property
def _properties_resource(self):
return self._subresource("properties")
@property
def _id(self):
""" Return the internal ID for this entity.
:return: integer ID of this entity within the database or
:py:const:`None` if abstract
"""
if self.is_abstract:
return None
else:
return int(URI(self).path.segments[-1])
def delete(self):
""" Delete this entity from the database.
"""
self._delete()
@property
def exists(self):
""" Detects whether this entity still exists in the database.
"""
try:
self._get()
except ClientError as err:
if err.status_code == NOT_FOUND:
return False
else:
raise
else:
return True
def get_cached_properties(self):
""" Fetch last known properties without calling the server.
:return: dictionary of properties
"""
if self.is_abstract:
return self._properties
else:
return self.__metadata__["data"]
def get_properties(self):
""" Fetch all properties.
:return: dictionary of properties
"""
if not self.is_abstract:
self._properties = assembled(self._properties_resource._get()) or {}
return self._properties
def set_properties(self, properties):
""" Replace all properties with those supplied.
:param properties: dictionary of new properties
"""
self._properties = dict(properties)
if not self.is_abstract:
if self._properties:
self._properties_resource._put(compact(self._properties))
else:
self._properties_resource._delete()
def delete_properties(self):
""" Delete all properties.
"""
self.set_properties({})
def update_properties(self, properties):
raise NotImplementedError("_Entity.update_properties")
class Node(_Entity):
""" A node within a graph, identified by a URI. For example:
>>> from py2neo import neo4j
>>> alice = neo4j.Node("http://localhost:7474/db/data/node/1")
Typically, concrete nodes will not be constructed directly in this way
by client applications. Instead, methods such as
:py:func:`GraphDatabaseService.create` build node objects indirectly as
required. Once created, nodes can be treated like any other container type
so as to manage properties::
# get the `name` property of `node`
name = node["name"]
# set the `name` property of `node` to `Alice`
node["name"] = "Alice"
# delete the `name` property from `node`
del node["name"]
# determine the number of properties within `node`
count = len(node)
# determine existence of the `name` property within `node`
if "name" in node:
pass
# iterate through property keys in `node`
for key in node:
value = node[key]
:param uri: URI identifying this node
"""
signature = ("self",)
@classmethod
def _hydrated(cls, data):
obj = cls(data["self"])
obj._metadata = ResourceMetadata(data)
obj._properties = data.get("data", {})
return obj
@classmethod
def abstract(cls, **properties):
""" Create and return a new abstract node containing properties drawn
from the keyword arguments supplied. An abstract node is not bound to
a concrete node within a database but properties can be managed
similarly to those within bound nodes::
>>> alice = Node.abstract(name="Alice")
>>> alice["name"]
'Alice'
>>> alice["age"] = 34
alice.get_properties()
{'age': 34, 'name': 'Alice'}
If more complex property keys are required, abstract nodes may be
instantiated with the ``**`` syntax::
>>> alice = Node.abstract(**{"first name": "Alice"})
>>> alice["first name"]
'Alice'
:param properties: node properties
"""
instance = cls(None)
instance._properties = dict(properties)
return instance
def __init__(self, uri):
_Entity.__init__(self, uri)
def __eq__(self, other):
other = _cast(other, Node)
if self.__uri__:
return _Entity.__eq__(self, other)
else:
return self._properties == other._properties
def __ne__(self, other):
other = _cast(other, Node)
if self.__uri__:
return _Entity.__ne__(self, other)
else:
return self._properties != other._properties
def __repr__(self):
if not self.is_abstract:
return "{0}({1})".format(
self.__class__.__name__,
repr(str(self.__uri__))
)
elif self._properties:
return "node({1})".format(
self.__class__.__name__,
repr(self._properties)
)
else:
return "node()".format(
self.__class__.__name__
)
def __str__(self):
""" Return Cypher/Geoff style representation of this node.
"""
if self.is_abstract:
return "({0})".format(json.dumps(self._properties, separators=(",", ":"), ensure_ascii=False))
elif self._properties:
return "({0} {1})".format(
"" if self._id is None else self._id,
json.dumps(self._properties, separators=(",", ":"), ensure_ascii=False),
)
else:
return "({0})".format("" if self._id is None else self._id)
def __hash__(self):
if self.is_abstract:
return hash(tuple(sorted(self._properties.items())))
else:
return hash(self.__uri__)
def delete_related(self):
""" Delete this node along with all related nodes and relationships.
"""
if self.graph_db.supports_foreach_pipe:
query = ("START a=node({a}) "
"MATCH (a)-[rels*0..]-(z) "
"FOREACH(r IN rels| DELETE r) "
"DELETE a, z")
else:
query = ("START a=node({a}) "
"MATCH (a)-[rels*0..]-(z) "
"FOREACH(r IN rels: DELETE r) "
"DELETE a, z")
CypherQuery(self.graph_db, query).execute(a=self._id)
def isolate(self):
""" Delete all relationships connected to this node, both incoming and
outgoing.
"""
CypherQuery(self.graph_db, "START a=node({a}) "
"MATCH a-[r]-b "
"DELETE r").execute(a=self._id)
def match(self, rel_type=None, other_node=None, limit=None):
""" Iterate through matching relationships attached to this node,
regardless of direction.
:param rel_type: type of relationships to match or :py:const:`None` if
any
:param other_node: concrete :py:class:`Node` to match for other end of
relationship or :py:const:`None` if any
:param limit: maximum number of relationships to match or
:py:const:`None` if no limit
:return: matching relationships
:rtype: generator
.. seealso::
:py:func:`GraphDatabaseService.match <py2neo.neo4j.GraphDatabaseService.match>`
"""
return self.service_root.graph_db.match(self, rel_type, other_node,
True, limit)
def match_incoming(self, rel_type=None, start_node=None, limit=None):
""" Iterate through matching relationships where this node is the end
node.
:param rel_type: type of relationships to match or :py:const:`None` if
any
:param start_node: concrete start :py:class:`Node` to match or
:py:const:`None` if any
:param limit: maximum number of relationships to match or
:py:const:`None` if no limit
:return: matching relationships
:rtype: generator
.. seealso::
:py:func:`GraphDatabaseService.match <py2neo.neo4j.GraphDatabaseService.match>`
"""
return self.service_root.graph_db.match(start_node, rel_type, self,
False, limit)
def match_outgoing(self, rel_type=None, end_node=None, limit=None):
""" Iterate through matching relationships where this node is the start
node.
:param rel_type: type of relationships to match or :py:const:`None` if
any
:param end_node: concrete end :py:class:`Node` to match or
:py:const:`None` if any
:param limit: maximum number of relationships to match or
:py:const:`None` if no limit
:return: matching relationships
:rtype: generator
.. seealso::
:py:func:`GraphDatabaseService.match <py2neo.neo4j.GraphDatabaseService.match>`
"""
return self.service_root.graph_db.match(self, rel_type, end_node,
False, limit)
def create_path(self, *items):
""" Create a new path, starting at this node and chaining together the
alternating relationships and nodes provided::
(self)-[rel_0]->(node_0)-[rel_1]->(node_1) ...
|-----| |------| |-----| |------|
item: 0 1 2 3
Each relationship may be specified as one of the following:
- an existing Relationship instance
- a string holding the relationship type, e.g. "KNOWS"
- a (`str`, `dict`) tuple holding both the relationship type and
its properties, e.g. ("KNOWS", {"since": 1999})
Nodes can be any of the following:
- an existing Node instance
- an integer containing the ID of an existing node
- a `dict` holding a set of properties for a new node
- a 3-tuple holding an index name, key and value for identifying
indexed nodes, e.g. ("People", "email", "bob@example.com")
- :py:const:`None`, representing an unspecified node that will be
created as required
:param items: alternating relationships and nodes
:return: `Path` object representing the newly-created path
"""
path = Path(self, *items)
return path.create(self.service_root.graph_db)
def get_or_create_path(self, *items):
""" Identical to `create_path` except will reuse parts of the path
which already exist.
Some examples::
# add dates to calendar, starting at calendar_root
christmas_day = calendar_root.get_or_create_path(
"YEAR", {"number": 2000},
"MONTH", {"number": 12},
"DAY", {"number": 25},
)
# `christmas_day` will now contain a `Path` object
# containing the nodes and relationships used:
# (CAL)-[:YEAR]->(2000)-[:MONTH]->(12)-[:DAY]->(25)
# adding a second, overlapping path will reuse
# nodes and relationships wherever possible
christmas_eve = calendar_root.get_or_create_path(
"YEAR", {"number": 2000},
"MONTH", {"number": 12},
"DAY", {"number": 24},
)
# `christmas_eve` will contain the same year and month nodes
# as `christmas_day` but a different (new) day node:
# (CAL)-[:YEAR]->(2000)-[:MONTH]->(12)-[:DAY]->(25)
# |
# [:DAY]
# |
# v
# (24)
"""
path = Path(self, *items)
return path.get_or_create(self.service_root.graph_db)
def update_properties(self, properties):
""" Update properties with the values supplied.
:param properties: dictionary of properties to integrate with existing
properties
"""
if self.is_abstract:
self._properties.update(properties)
self._properties = compact(self._properties)
else:
query, params = ["START a=node({A})"], {"A": self._id}
for i, (key, value) in enumerate(properties.items()):
value_tag = "V" + str(i)
query.append("SET a.`" + key + "`={" + value_tag + "}")
params[value_tag] = value
query.append("RETURN a")
rel = CypherQuery(self.graph_db, " ".join(query)).execute_one(**params)
self._properties = rel.__metadata__["data"]
def _label_resource(self):
if self.is_abstract:
raise TypeError("Abstract nodes cannot have labels")
try:
return self._subresource("labels")
except KeyError:
raise NotImplementedError("Labels are not supported in this "
"version of Neo4j")
def get_labels(self):
""" Fetch all labels associated with this node.
:return: :py:class:`set` of text labels
"""
return set(assembled(self._label_resource()._get()))
def add_labels(self, *labels):
""" Add one or more labels to this node.
For example::
>>> from py2neo import neo4j, node
>>> graph_db = neo4j.GraphDatabaseService()
>>> alice, = graph_db.create(node(name="Alice"))
>>> alice.add_labels("female", "human")
:param labels: one or more text labels
"""
labels = [ustr(label) for label in set(flatten(labels))]
self._label_resource()._post(labels)
def remove_labels(self, *labels):
""" Remove one or more labels from this node.
:param labels: one or more text labels
"""
labels = [ustr(label) for label in set(flatten(labels))]
batch = WriteBatch(self.graph_db)
for label in labels:
batch.remove_label(self, label)
batch.run()
def set_labels(self, *labels):
""" Replace all labels on this node.
:param labels: one or more text labels
"""
labels = [ustr(label) for label in set(flatten(labels))]
self._label_resource()._put(labels)
class Relationship(_Entity):
""" A relationship within a graph, identified by a URI.
:param uri: URI identifying this relationship
"""
signature = ("self", "type")
@classmethod
def _hydrated(cls, data):
obj = cls(data["self"])
obj._metadata = ResourceMetadata(data)
obj._properties = data.get("data", {})
return obj
@classmethod
def abstract(cls, start_node, type_, end_node, **properties):
""" Create and return a new abstract relationship.
"""
instance = cls(None)
instance._start_node = start_node
instance._type = type_
instance._end_node = end_node
instance._properties = dict(properties)
return instance
def __init__(self, uri):
_Entity.__init__(self, uri)
self._start_node = None
self._type = None
self._end_node = None
def __eq__(self, other):
other = _cast(other, Relationship)
if self.__uri__:
return _Entity.__eq__(self, other)
else:
return (self._start_node == other._start_node and
self._type == other._type and
self._end_node == other._end_node and
self._properties == other._properties)
def __ne__(self, other):
other = _cast(other, Relationship)
if self.__uri__:
return _Entity.__ne__(self, other)
else:
return (self._start_node != other._start_node or
self._type != other._type or
self._end_node != other._end_node or
self._properties != other._properties)
def __repr__(self):
if not self.is_abstract:
return "{0}({1})".format(
self.__class__.__name__,
repr(str(self.__uri__))
)
elif self._properties:
return "rel({1}, {2}, {3}, {4})".format(
self.__class__.__name__,
repr(self.start_node),
repr(self.type),
repr(self.end_node),
repr(self._properties)
)
else:
return "rel({1}, {2}, {3})".format(
self.__class__.__name__,
repr(self.start_node),
repr(self.type),
repr(self.end_node)
)
def __str__(self):
type_str = str(self.type)
if not SIMPLE_NAME.match(type_str):
type_str = json.dumps(type_str, ensure_ascii=False)
if self._properties:
return "{0}-[:{1} {2}]->{3}".format(
str(self.start_node),
type_str,
json.dumps(self._properties, separators=(",", ":"), ensure_ascii=False),
str(self.end_node),
)
else:
return "{0}-[:{1}]->{2}".format(
str(self.start_node),
type_str,
str(self.end_node),
)
def __hash__(self):
if self.__uri__:
return hash(self.__uri__)
else:
return hash(tuple(sorted(self._properties.items())))
@property
def end_node(self):
""" Return the end node of this relationship.
"""
if self.__uri__ and not self._end_node:
self._end_node = Node(self.__metadata__['end'])
return self._end_node
@property
def start_node(self):
""" Return the start node of this relationship.
"""
if self.__uri__ and not self._start_node:
self._start_node = Node(self.__metadata__['start'])
return self._start_node
@property
def type(self):
""" Return the type of this relationship as a string.
"""
if self.__uri__ and not self._type:
self._type = self.__metadata__['type']
return self._type
def update_properties(self, properties):
""" Update the properties for this relationship with the values
supplied.
"""
if self.is_abstract:
self._properties.update(properties)
self._properties = compact(self._properties)
else:
query, params = ["START a=rel({A})"], {"A": self._id}
for i, (key, value) in enumerate(properties.items()):
value_tag = "V" + str(i)
query.append("SET a.`" + key + "`={" + value_tag + "}")
params[value_tag] = value
query.append("RETURN a")
rel = CypherQuery(self.graph_db, " ".join(query)).execute_one(**params)
self._properties = rel.__metadata__["data"]
class _UnboundRelationship(object):
""" An abstract, partial relationship with no start or end nodes.
"""
@classmethod
def cast(cls, arg):
if isinstance(arg, cls):
return arg
elif isinstance(arg, Relationship):
return cls(arg.type, **arg.get_properties())
elif isinstance(arg, tuple):
if len(arg) == 1:
return cls(str(arg[0]))
elif len(arg) == 2:
return cls(str(arg[0]), **arg[1])
else:
raise TypeError(arg)
else:
return cls(str(arg))
def __init__(self, type_, **properties):
self._type = type_
self._properties = dict(properties)
def __eq__(self, other):
return (self._type == other._type and
self._properties == other._properties)
def __ne__(self, other):
return (self._type != other._type or
self._properties != other._properties)
def __repr__(self):
return "({0}, {1})".format(
repr(str(self._type)),
repr(self._properties),
)
def __str__(self):
return "-[:{0}]->".format(
json.dumps(str(self._type), ensure_ascii=False),
)
def bind(self, start_node, end_node):
return Relationship.abstract(start_node, self._type, end_node,
**self._properties)
class Path(object):
""" A representation of a sequence of nodes connected by relationships. for
example::
>>> from py2neo import neo4j, node
>>> alice, bob, carol = node(name="Alice"), node(name="Bob"), node(name="Carol")
>>> abc = neo4j.Path(alice, "KNOWS", bob, "KNOWS", carol)
>>> abc.nodes
[node(**{'name': 'Alice'}), node(**{'name': 'Bob'}), node(**{'name': 'Carol'})]
>>> dave, eve = node(name="Dave"), node(name="Eve")
>>> de = neo4j.Path(dave, "KNOWS", eve)
>>> de.nodes
[node(**{'name': 'Dave'}), node(**{'name': 'Eve'})]
>>> abcde = neo4j.Path.join(abc, "KNOWS", de)
>>> str(abcde)
'({"name":"Alice"})-[:"KNOWS"]->({"name":"Bob"})-[:"KNOWS"]->({"name":"Carol"})-[:"KNOWS"]->({"name":"Dave"})-[:"KNOWS"]->({"name":"Eve"})'
"""
signature = ("length", "nodes", "relationships", "start", "end")
@classmethod
def _hydrated(cls, data):
nodes = map(Node, data["nodes"])
rels = map(Relationship, data["relationships"])
return Path(*round_robin(nodes, rels))
def __init__(self, node, *rels_and_nodes):
self._nodes = [_node(node)]
self._nodes.extend(_node(n) for n in rels_and_nodes[1::2])
if len(rels_and_nodes) % 2 != 0:
# If a trailing relationship is supplied, add a dummy end node
self._nodes.append(_node())
self._relationships = [
_UnboundRelationship.cast(r)
for r in rels_and_nodes[0::2]
]
def __repr__(self):
out = ", ".join(repr(item) for item in round_robin(self._nodes,
self._relationships))
return "Path({0})".format(out)
def __str__(self):
out = []
for i, rel in enumerate(self._relationships):
out.append(str(self._nodes[i]))
out.append(str(rel))
out.append(str(self._nodes[-1]))
return "".join(out)
def __nonzero__(self):
return bool(self._relationships)
def __len__(self):
return len(self._relationships)
def __eq__(self, other):
return (self._nodes == other._nodes and
self._relationships == other._relationships)
def __ne__(self, other):
return (self._nodes != other._nodes or
self._relationships != other._relationships)
def __getitem__(self, item):
size = len(self._relationships)
def adjust(value, default=None):
if value is None:
return default
if value < 0:
return value + size
else:
return value
if isinstance(item, slice):
if item.step is not None:
raise ValueError("Steps not supported in path slicing")
start, stop = adjust(item.start, 0), adjust(item.stop, size)
path = Path(self._nodes[start])
for i in range(start, stop):
path._relationships.append(self._relationships[i])
path._nodes.append(self._nodes[i + 1])
return path
else:
i = int(item)
if i < 0:
i += len(self._relationships)
return Path(self._nodes[i], self._relationships[i],
self._nodes[i + 1])
def __iter__(self):
return iter(
_rel((self._nodes[i], rel, self._nodes[i + 1]))
for i, rel in enumerate(self._relationships)
)
@property
def order(self):
""" The number of nodes within this path.
"""
return len(self._nodes)
@property
def size(self):
""" The number of relationships within this path.
"""
return len(self._relationships)
@property
def nodes(self):
""" Return a list of all the nodes which make up this path.
"""
return list(self._nodes)
@property
def relationships(self):
""" Return a list of all the relationships which make up this path.
"""
return [
_rel((self._nodes[i], rel, self._nodes[i + 1]))
for i, rel in enumerate(self._relationships)
]
@classmethod
def join(cls, left, rel, right):
""" Join the two paths `left` and `right` with the relationship `rel`.
"""
if isinstance(left, Path):
left = left[:]
else:
left = Path(left)
if isinstance(right, Path):
right = right[:]
else:
right = Path(right)
left._relationships.append(_UnboundRelationship.cast(rel))
left._nodes.extend(right._nodes)
left._relationships.extend(right._relationships)
return left
def _create_query(self, unique):
nodes, path, values, params = [], [], [], {}
def append_node(i, node):
if node is None:
path.append("(n{0})".format(i))
values.append("n{0}".format(i))
elif node.is_abstract:
path.append("(n{0} {{p{0}}})".format(i))
params["p{0}".format(i)] = compact(node._properties)
values.append("n{0}".format(i))
else:
path.append("(n{0})".format(i))
nodes.append("n{0}=node({{i{0}}})".format(i))
params["i{0}".format(i)] = node._id
values.append("n{0}".format(i))
def append_rel(i, rel):
if rel._properties:
path.append("-[r{0}:`{1}` {{q{0}}}]->".format(i, rel._type))
params["q{0}".format(i)] = compact(rel._properties)
values.append("r{0}".format(i))
else:
path.append("-[r{0}:`{1}`]->".format(i, rel._type))
values.append("r{0}".format(i))
append_node(0, self._nodes[0])
for i, rel in enumerate(self._relationships):
append_rel(i, rel)
append_node(i + 1, self._nodes[i + 1])
clauses = []
if nodes:
clauses.append("START {0}".format(",".join(nodes)))
if unique:
clauses.append("CREATE UNIQUE p={0}".format("".join(path)))
else:
clauses.append("CREATE p={0}".format("".join(path)))
#clauses.append("RETURN {0}".format(",".join(values)))
clauses.append("RETURN p")
query = " ".join(clauses)
return query, params
def _create(self, graph_db, unique):
query, params = self._create_query(unique=unique)
try:
results = CypherQuery(graph_db, query).execute(**params)
except CypherError:
raise NotImplementedError(
"The Neo4j server at <{0}> does not support "
"Cypher CREATE UNIQUE clauses or the query contains "
"an unsupported property type".format(graph_db.__uri__)
)
else:
for row in results:
return row[0]
def create(self, graph_db):
""" Construct a path within the specified `graph_db` from the nodes
and relationships within this :py:class:`Path` instance. This makes
use of Cypher's ``CREATE`` clause.
"""
return self._create(graph_db, unique=False)
def get_or_create(self, graph_db):
""" Construct a unique path within the specified `graph_db` from the
nodes and relationships within this :py:class:`Path` instance. This
makes use of Cypher's ``CREATE UNIQUE`` clause.
"""
return self._create(graph_db, unique=True)
class Index(Cacheable, Resource):
""" Searchable database index which can contain either nodes or
relationships.
.. seealso:: :py:func:`GraphDatabaseService.get_or_create_index`
"""
def __init__(self, content_type, uri, name=None):
self._content_type = content_type
key_value_pos = uri.find("/{key}/{value}")
if key_value_pos >= 0:
self._searcher = ResourceTemplate(uri)
Resource.__init__(self, URI(uri[:key_value_pos]))
else:
Resource.__init__(self, uri)
self._searcher = ResourceTemplate(uri.string + "/{key}/{value}")
uri = URI(self)
if self.graph_db.neo4j_version >= (1, 9):
self._create_or_fail = Resource(uri.resolve("?uniqueness=create_or_fail"))
self._get_or_create = Resource(uri.resolve("?uniqueness=get_or_create"))
else:
self._create_or_fail = None
self._get_or_create = Resource(uri.resolve("?unique"))
self._query_template = ResourceTemplate(uri.string + "{?query,order}")
self._name = name or uri.path.segments[-1]
self.__searcher_stem_cache = {}
def __repr__(self):
return "{0}({1}, {2})".format(
self.__class__.__name__,
self._content_type.__name__,
repr(URI(self).string)
)
def _searcher_stem_for_key(self, key):
if key not in self.__searcher_stem_cache:
stem = self._searcher.uri_template.string.partition("{key}")[0]
self.__searcher_stem_cache[key] = stem + percent_encode(key) + "/"
return self.__searcher_stem_cache[key]
def add(self, key, value, entity):
""" Add an entity to this index under the `key`:`value` pair supplied::
# create a node and obtain a reference to the "People" node index
alice, = graph_db.create({"name": "Alice Smith"})
people = graph_db.get_or_create_index(neo4j.Node, "People")
# add the node to the index
people.add("family_name", "Smith", alice)
Note that while Neo4j indexes allow multiple entities to be added under
a particular key:value, the same entity may only be represented once;
this method is therefore idempotent.
"""
self._post({
"key": key,
"value": value,
"uri": str(URI(entity))
})
return entity
def add_if_none(self, key, value, entity):
""" Add an entity to this index under the `key`:`value` pair
supplied if no entry already exists at that point::
# obtain a reference to the "Rooms" node index and
# add node `alice` to room 100 if empty
rooms = graph_db.get_or_create_index(neo4j.Node, "Rooms")
rooms.add_if_none("room", 100, alice)
If added, this method returns the entity, otherwise :py:const:`None`
is returned.
"""
rs = self._get_or_create._post({
"key": key,
"value": value,
"uri": str(URI(entity))
})
if rs.status_code == CREATED:
return entity
else:
return None
@property
def content_type(self):
""" Return the type of entity contained within this index. Will return
either :py:class:`Node` or :py:class:`Relationship`.
"""
return self._content_type
@property
def name(self):
""" Return the name of this index.
"""
return self._name
def get(self, key, value):
""" Fetch a list of all entities from the index which are associated
with the `key`:`value` pair supplied::
# obtain a reference to the "People" node index and
# get all nodes where `family_name` equals "Smith"
people = graph_db.get_or_create_index(neo4j.Node, "People")
smiths = people.get("family_name", "Smith")
..
"""
return [
_hydrated(assembled(result))
for i, result in grouped(self._searcher.expand(key=key, value=value)._get())
]
def create(self, key, value, abstract):
""" Create and index a new node or relationship using the abstract
provided.
"""
batch = WriteBatch(self.service_root.graph_db)
if self._content_type is Node:
batch.create(abstract)
batch.add_indexed_node(self, key, value, 0)
elif self._content_type is Relationship:
batch.create(abstract)
batch.add_indexed_relationship(self, key, value, 0)
else:
raise TypeError(self._content_type)
entity, index_entry = batch.submit()
return entity
def _create_unique(self, key, value, abstract):
""" Internal method to support `get_or_create` and `create_if_none`.
"""
if self._content_type is Node:
body = {
"key": key,
"value": value,
"properties": abstract
}
elif self._content_type is Relationship:
body = {
"key": key,
"value": value,
"start": str(abstract[0].__uri__),
"type": abstract[1],
"end": str(abstract[2].__uri__),
"properties": abstract[3] if len(abstract) > 3 else None
}
else:
raise TypeError(self._content_type)
return self._get_or_create._post(body)
def get_or_create(self, key, value, abstract):
""" Fetch a single entity from the index which is associated with the
`key`:`value` pair supplied, creating a new entity with the supplied
details if none exists::
# obtain a reference to the "Contacts" node index and
# ensure that Alice exists therein
contacts = graph_db.get_or_create_index(neo4j.Node, "Contacts")
alice = contacts.get_or_create("name", "SMITH, Alice", {
"given_name": "Alice Jane", "family_name": "Smith",
"phone": "01234 567 890", "mobile": "07890 123 456"
})
# obtain a reference to the "Friendships" relationship index and
# ensure that Alice and Bob's friendship is registered (`alice`
# and `bob` refer to existing nodes)
friendships = graph_db.get_or_create_index(neo4j.Relationship, "Friendships")
alice_and_bob = friendships.get_or_create(
"friends", "Alice & Bob", (alice, "KNOWS", bob)
)
..
"""
return _hydrated(assembled(self._create_unique(key, value, abstract)))
def create_if_none(self, key, value, abstract):
""" Create a new entity with the specified details within the current
index, under the `key`:`value` pair supplied, if no such entity already
exists. If creation occurs, the new entity will be returned, otherwise
:py:const:`None` will be returned::
# obtain a reference to the "Contacts" node index and
# create a node for Alice if one does not already exist
contacts = graph_db.get_or_create_index(neo4j.Node, "Contacts")
alice = contacts.create_if_none("name", "SMITH, Alice", {
"given_name": "Alice Jane", "family_name": "Smith",
"phone": "01234 567 890", "mobile": "07890 123 456"
})
..
"""
rs = self._create_unique(key, value, abstract)
if rs.status_code == CREATED:
return _hydrated(assembled(rs))
else:
return None
def remove(self, key=None, value=None, entity=None):
""" Remove any entries from the index which match the parameters
supplied. The allowed parameter combinations are:
`key`, `value`, `entity`
remove a specific entity indexed under a given key-value pair
`key`, `value`
remove all entities indexed under a given key-value pair
`key`, `entity`
remove a specific entity indexed against a given key but with
any value
`entity`
remove all occurrences of a specific entity regardless of
key and value
"""
if key and value and entity:
t = ResourceTemplate(URI(self).string + "/{key}/{value}/{entity}")
t.expand(key=key, value=value, entity=entity._id)._delete()
elif key and value:
uris = [
URI(entity.__metadata__["indexed"])
for entity in self.get(key, value)
]
batch = WriteBatch(self.service_root.graph_db)
for uri in uris:
batch.append_delete(uri)
batch.run()
elif key and entity:
t = ResourceTemplate(URI(self).string + "/{key}/{entity}")
t.expand(key=key, entity=entity._id)._delete()
elif entity:
t = ResourceTemplate(URI(self).string + "/{entity}")
t.expand(entity=entity._id)._delete()
else:
raise TypeError("Illegal parameter combination for index removal")
def query(self, query):
""" Query the index according to the supplied query criteria, returning
a list of matched entities::
# obtain a reference to the "People" node index and
# get all nodes where `family_name` equals "Smith"
people = graph_db.get_or_create_index(neo4j.Node, "People")
s_people = people.query("family_name:S*")
The query syntax used should be appropriate for the configuration of
the index being queried. For indexes with default configuration, this
should be Apache Lucene query syntax.
"""
resource = self._query_template.expand(query=query)
for i, result in grouped(resource._get()):
yield _hydrated(assembled(result))
def _query_with_score(self, query, order):
resource = self._query_template.expand(query=query, order=order)
for i, result in grouped(resource._get()):
meta = assembled(result)
yield _hydrated(meta), meta["score"]
def query_by_index(self, query):
return self._query_with_score(query, "index")
def query_by_relevance(self, query):
return self._query_with_score(query, "relevance")
def query_by_score(self, query):
return self._query_with_score(query, "score")
def _cast(obj, cls=(Node, Relationship), abstract=None):
if obj is None:
return None
elif isinstance(obj, Node) or isinstance(obj, dict):
entity = _node(obj)
elif isinstance(obj, Relationship) or isinstance(obj, tuple):
entity = _rel(obj)
else:
raise TypeError(obj)
if not isinstance(entity, cls):
raise TypeError(obj)
if abstract is not None and bool(abstract) != bool(entity.is_abstract):
raise TypeError(obj)
return entity
class BatchRequest(object):
""" Individual batch request.
"""
def __init__(self, method, uri, body=None):
self._method = method
self._uri = uri
self._body = body
def __eq__(self, other):
return id(self) == id(other)
def __ne__(self, other):
return id(self) != id(other)
def __hash__(self):
return hash(id(self))
@property
def method(self):
return self._method
@property
def uri(self):
return self._uri
@property
def body(self):
return self._body
class BatchResponse(object):
""" Individual batch response.
"""
@classmethod
def __hydrate(cls, result, hydration_cache=None):
body = result.get("body")
if isinstance(body, dict):
if has_all(body, CypherResults.signature):
records = CypherResults._hydrated(body, hydration_cache)
if len(records) == 0:
return None
elif len(records) == 1:
if len(records[0]) == 1:
return records[0][0]
else:
return records[0]
else:
return records
elif has_all(body, ("exception", "stacktrace")):
err = ServerException(body)
try:
CustomBatchError = type(err.exception, (BatchError,), {})
except TypeError:
# for Python 2.x
CustomBatchError = type(str(err.exception), (BatchError,), {})
raise CustomBatchError(err)
else:
return _hydrated(body, hydration_cache)
else:
return _hydrated(body, hydration_cache)
def __init__(self, result, raw=False, hydration_cache=None):
self.id_ = result.get("id")
self.uri = result.get("from")
self.body = result.get("body")
self.status_code = result.get("status", 200)
self.location = URI(result.get("location"))
if __debug__:
batch_log.debug("<<< {{{0}}} {1} {2} {3}".format(self.id_, self.status_code, self.location, self.body))
# We need to hydrate on construction to catch any errors in the batch
# responses contained in the body
if raw:
self.__hydrated = None
else:
self.__hydrated = self.__hydrate(result, hydration_cache)
@property
def __uri__(self):
return self.uri
@property
def hydrated(self):
return self.__hydrated
class BatchRequestList(object):
def __init__(self, graph_db):
self._graph_db = graph_db
self._batch = graph_db._subresource("batch")
self._cypher = graph_db._subresource("cypher")
self.clear()
def __len__(self):
return len(self._requests)
def __nonzero__(self):
return bool(self._requests)
def append(self, request):
self._requests.append(request)
return request
def append_get(self, uri):
return self.append(BatchRequest("GET", uri))
def append_put(self, uri, body=None):
return self.append(BatchRequest("PUT", uri, body))
def append_post(self, uri, body=None):
return self.append(BatchRequest("POST", uri, body))
def append_delete(self, uri):
return self.append(BatchRequest("DELETE", uri))
def append_cypher(self, query, params=None):
""" Append a Cypher query to this batch. Resources returned from Cypher
queries cannot be referenced by other batch requests.
:param query: Cypher query
:type query: :py:class:`str`
:param params: query parameters
:type params: :py:class:`dict`
:return: batch request object
:rtype: :py:class:`_Batch.Request`
"""
if params:
body = {"query": str(query), "params": dict(params)}
else:
body = {"query": str(query)}
return self.append_post(self._uri_for(self._cypher), body)
@property
def _body(self):
return [
{
"id": i,
"method": request.method,
"to": str(request.uri),
"body": request.body,
}
for i, request in enumerate(self._requests)
]
def clear(self):
""" Clear all requests from this batch.
"""
self._requests = []
def find(self, request):
""" Find the position of a request within this batch.
"""
for i, req in pendulate(self._requests):
if req == request:
return i
raise ValueError("Request not found")
def _uri_for(self, resource, *segments, **kwargs):
""" Return a relative URI in string format for the entity specified
plus extra path segments.
"""
if isinstance(resource, int):
uri = "{{{0}}}".format(resource)
elif isinstance(resource, BatchRequest):
uri = "{{{0}}}".format(self.find(resource))
else:
offset = len(resource.service_root.graph_db.__uri__)
uri = str(resource.__uri__)[offset:]
if segments:
if not uri.endswith("/"):
uri += "/"
uri += "/".join(map(percent_encode, segments))
query = kwargs.get("query")
if query is not None:
uri += "?" + query
return uri
def _execute(self):
request_count = len(self)
request_text = "request" if request_count == 1 else "requests"
batch_log.info("Executing batch with {0} {1}".format(request_count, request_text))
if __debug__:
for id_, request in enumerate(self._requests):
batch_log.debug(">>> {{{0}}} {1} {2} {3}".format(id_, request.method, request.uri, request.body))
try:
response = self._batch._post(self._body)
except (ClientError, ServerError) as e:
if e.exception:
# A CustomBatchError is a dynamically created subclass of
# BatchError with the same name as the underlying server
# exception
CustomBatchError = type(str(e.exception), (BatchError,), {})
raise CustomBatchError(e)
else:
raise BatchError(e)
else:
return response
def run(self):
""" Execute the batch on the server and discard the results. If the
batch results are not required, this will generally be the fastest
execution method.
"""
return self._execute().close()
def stream(self):
""" Execute the batch on the server and return iterable results. This
method allows handling of results as they are received from the server.
:return: iterable results
:rtype: :py:class:`BatchResponseList`
"""
return BatchResponseList(self._execute())
def submit(self):
""" Execute the batch on the server and return a list of results. This
method blocks until all results are received.
:return: result records
:rtype: :py:class:`list`
"""
responses = self._execute()
hydration_cache = {}
try:
return [BatchResponse(rs, hydration_cache=hydration_cache).hydrated
for rs in responses.json]
finally:
responses.close()
def _index(self, content_type, index):
""" Fetch an Index object.
"""
if isinstance(index, Index):
if content_type == index._content_type:
return index
else:
raise TypeError("Index is not for {0}s".format(content_type))
else:
return self._graph_db.get_or_create_index(content_type, str(index))
class BatchResponseList(object):
def __init__(self, response):
self._response = response
def __iter__(self):
hydration_cache = {}
for i, result in grouped(self._response):
yield BatchResponse(assembled(result),
hydration_cache=hydration_cache).hydrated
self.close()
@property
def closed(self):
return self._response.closed
def close(self):
self._response.close()
class ReadBatch(BatchRequestList):
""" Generic batch execution facility for data read requests,
"""
def __init__(self, graph_db):
BatchRequestList.__init__(self, graph_db)
def get_indexed_nodes(self, index, key, value):
""" Fetch all nodes indexed under a given key-value pair.
:param index: index name or instance
:type index: :py:class:`str` or :py:class:`Index`
:param key: key under which nodes are indexed
:type key: :py:class:`str`
:param value: value under which nodes are indexed
:return: batch request object
"""
index = self._index(Node, index)
uri = index._searcher_stem_for_key(key) + percent_encode(value)
return self.append_get(uri)
class WriteBatch(BatchRequestList):
""" Generic batch execution facility for data write requests. Most methods
return a :py:class:`BatchRequest <py2neo.neo4j.BatchRequest>` object that
can be used as a reference in other methods. See the
:py:meth:`create <py2neo.neo4j.WriteBatch.create>` method for an example
of this.
"""
def __init__(self, graph_db):
BatchRequestList.__init__(self, graph_db)
self.__new_uniqueness_modes = None
@property
def supports_index_uniqueness_modes(self):
return self._graph_db.supports_index_uniqueness_modes
def _assert_can_create_or_fail(self):
if not self.supports_index_uniqueness_modes:
raise NotImplementedError("Uniqueness mode `create_or_fail` "
"requires version 1.9 or above")
def create(self, abstract):
""" Create a node or relationship based on the abstract entity
provided. For example::
batch = WriteBatch(graph_db)
a = batch.create(node(name="Alice"))
b = batch.create(node(name="Bob"))
batch.create(rel(a, "KNOWS", b))
results = batch.submit()
:param abstract: node or relationship
:type abstract: abstract
:return: batch request object
"""
entity = _cast(abstract, abstract=True)
if isinstance(entity, Node):
uri = self._uri_for(self._graph_db._subresource("node"))
body = compact(entity._properties)
elif isinstance(entity, Relationship):
uri = self._uri_for(entity.start_node, "relationships")
body = {
"type": entity._type,
"to": self._uri_for(entity.end_node)
}
if entity._properties:
body["data"] = compact(entity._properties)
else:
raise TypeError(entity)
return self.append_post(uri, body)
def create_path(self, node, *rels_and_nodes):
""" Construct a path across a specified set of nodes and relationships.
Nodes may be existing concrete node instances, abstract nodes or
:py:const:`None` but references to other requests are not supported.
:param node: start node
:type node: concrete, abstract or :py:const:`None`
:param rels_and_nodes: alternating relationships and nodes
:type rels_and_nodes: concrete, abstract or :py:const:`None`
:return: batch request object
"""
query, params = Path(node, *rels_and_nodes)._create_query(unique=False)
self.append_cypher(query, params)
def get_or_create_path(self, node, *rels_and_nodes):
""" Construct a unique path across a specified set of nodes and
relationships, adding only parts that are missing. Nodes may be
existing concrete node instances, abstract nodes or :py:const:`None`
but references to other requests are not supported.
:param node: start node
:type node: concrete, abstract or :py:const:`None`
:param rels_and_nodes: alternating relationships and nodes
:type rels_and_nodes: concrete, abstract or :py:const:`None`
:return: batch request object
"""
query, params = Path(node, *rels_and_nodes)._create_query(unique=True)
self.append_cypher(query, params)
@deprecated("WriteBatch.get_or_create is deprecated, please use "
"get_or_create_path instead")
def get_or_create(self, rel_abstract):
""" Use the abstract supplied to create a new relationship if one does
not already exist.
:param rel_abstract: relationship abstract to be fetched or created
"""
rel = _cast(rel_abstract, cls=Relationship, abstract=True)
if not (isinstance(rel._start_node, Node) or rel._start_node is None):
raise TypeError("Relationship start node must be a "
"Node instance or None")
if not (isinstance(rel._end_node, Node) or rel._end_node is None):
raise TypeError("Relationship end node must be a "
"Node instance or None")
if rel._start_node and rel._end_node:
query = (
"START a=node({A}), b=node({B}) "
"CREATE UNIQUE (a)-[ab:`" + str(rel._type) + "` {P}]->(b) "
"RETURN ab"
)
elif rel._start_node:
query = (
"START a=node({A}) "
"CREATE UNIQUE (a)-[ab:`" + str(rel._type) + "` {P}]->() "
"RETURN ab"
)
elif rel._end_node:
query = (
"START b=node({B}) "
"CREATE UNIQUE ()-[ab:`" + str(rel._type) + "` {P}]->(b) "
"RETURN ab"
)
else:
raise ValueError("Either start node or end node must be "
"specified for a unique relationship")
params = {"P": compact(rel._properties or {})}
if rel._start_node:
params["A"] = rel._start_node._id
if rel._end_node:
params["B"] = rel._end_node._id
return self.append_cypher(query, params)
def delete(self, entity):
""" Delete a node or relationship from the graph.
:param entity: node or relationship to delete
:type entity: concrete or reference
:return: batch request object
"""
return self.append_delete(self._uri_for(entity))
def set_property(self, entity, key, value):
""" Set a single property on a node or relationship.
:param entity: node or relationship on which to set property
:type entity: concrete or reference
:param key: property key
:type key: :py:class:`str`
:param value: property value
:return: batch request object
"""
if value is None:
self.delete_property(entity, key)
else:
uri = self._uri_for(entity, "properties", key)
return self.append_put(uri, value)
def set_properties(self, entity, properties):
""" Replace all properties on a node or relationship.
:param entity: node or relationship on which to set properties
:type entity: concrete or reference
:param properties: properties
:type properties: :py:class:`dict`
:return: batch request object
"""
uri = self._uri_for(entity, "properties")
return self.append_put(uri, compact(properties))
def delete_property(self, entity, key):
""" Delete a single property from a node or relationship.
:param entity: node or relationship from which to delete property
:type entity: concrete or reference
:param key: property key
:type key: :py:class:`str`
:return: batch request object
"""
uri = self._uri_for(entity, "properties", key)
return self.append_delete(uri)
def delete_properties(self, entity):
""" Delete all properties from a node or relationship.
:param entity: node or relationship from which to delete properties
:type entity: concrete or reference
:return: batch request object
"""
uri = self._uri_for(entity, "properties")
return self.append_delete(uri)
def add_labels(self, node, *labels):
""" Add labels to a node.
:param node: node to which to add labels
:type entity: concrete or reference
:param labels: text labels
:type labels: :py:class:`str`
:return: batch request object
"""
uri = self._uri_for(node, "labels")
return self.append_post(uri, list(labels))
def remove_label(self, node, label):
""" Remove a label from a node.
:param node: node from which to remove labels (can be a reference to
another request within the same batch)
:param label: text label
:type label: :py:class:`str`
:return: batch request object
"""
uri = self._uri_for(node, "labels", label)
return self.append_delete(uri)
def set_labels(self, node, *labels):
""" Replace all labels on a node.
:param node: node on which to replace labels (can be a reference to
another request within the same batch)
:param labels: text labels
:type labels: :py:class:`str`
:return: batch request object
"""
uri = self._uri_for(node, "labels")
return self.append_put(uri, list(labels))
### ADD TO INDEX ###
def _add_to_index(self, cls, index, key, value, entity, query=None):
uri = self._uri_for(self._index(cls, index), query=query)
return self.append_post(uri, {
"key": key,
"value": value,
"uri": self._uri_for(entity),
})
def add_to_index(self, cls, index, key, value, entity):
""" Add an existing node or relationship to an index.
:param cls: the type of indexed entity
:type cls: :py:class:`Node <py2neo.neo4j.Node>` or
:py:class:`Relationship <py2neo.neo4j.Relationship>`
:param index: index or index name
:type index: :py:class:`Index <py2neo.neo4j.Index>` or :py:class:`str`
:param key: index entry key
:type key: :py:class:`str`
:param value: index entry value
:param entity: node or relationship to add to the index
:type entity: concrete or reference
:return: batch request object
"""
return self._add_to_index(cls, index, key, value, entity)
def add_to_index_or_fail(self, cls, index, key, value, entity):
""" Add an existing node or relationship uniquely to an index, failing
the entire batch if such an entry already exists.
.. warning::
Uniqueness modes for legacy indexes have been broken in recent
server versions and therefore this method may not work as expected.
:param cls: the type of indexed entity
:type cls: :py:class:`Node <py2neo.neo4j.Node>` or
:py:class:`Relationship <py2neo.neo4j.Relationship>`
:param index: index or index name
:type index: :py:class:`Index <py2neo.neo4j.Index>` or :py:class:`str`
:param key: index entry key
:type key: :py:class:`str`
:param value: index entry value
:param entity: node or relationship to add to the index
:type entity: concrete or reference
:return: batch request object
"""
self._assert_can_create_or_fail()
query = "uniqueness=create_or_fail"
return self._add_to_index(cls, index, key, value, entity, query)
def get_or_add_to_index(self, cls, index, key, value, entity):
""" Fetch a uniquely indexed node or relationship if one exists,
otherwise add an existing entity to the index.
:param cls: the type of indexed entity
:type cls: :py:class:`Node <py2neo.neo4j.Node>` or
:py:class:`Relationship <py2neo.neo4j.Relationship>`
:param index: index or index name
:type index: :py:class:`Index <py2neo.neo4j.Index>` or :py:class:`str`
:param key: index entry key
:type key: :py:class:`str`
:param value: index entry value
:param entity: node or relationship to add to the index
:type entity: concrete or reference
:return: batch request object
"""
if self.supports_index_uniqueness_modes:
query = "uniqueness=get_or_create"
else:
query = "unique"
return self._add_to_index(cls, index, key, value, entity, query)
### CREATE IN INDEX ###
def _create_in_index(self, cls, index, key, value, abstract, query=None):
uri = self._uri_for(self._index(cls, index), query=query)
abstract = _cast(abstract, cls=cls, abstract=True)
if cls is Node:
return self.append_post(uri, {
"key": key,
"value": value,
"properties": compact(abstract._properties or {}),
})
elif cls is Relationship:
return self.append_post(uri, {
"key": key,
"value": value,
"start": self._uri_for(abstract._start_node),
"type": str(abstract._type),
"end": self._uri_for(abstract._end_node),
"properties": abstract._properties or {},
})
else:
raise TypeError(cls)
# Removed create_in_index as parameter combination not supported by server
def create_in_index_or_fail(self, cls, index, key, value, abstract=None):
""" Create a new node or relationship and add it uniquely to an index,
failing the entire batch if such an entry already exists.
.. warning::
Uniqueness modes for legacy indexes have been broken in recent
server versions and therefore this method may not work as expected.
:param cls: the type of indexed entity
:type cls: :py:class:`Node <py2neo.neo4j.Node>` or
:py:class:`Relationship <py2neo.neo4j.Relationship>`
:param index: index or index name
:type index: :py:class:`Index <py2neo.neo4j.Index>` or :py:class:`str`
:param key: index entry key
:type key: :py:class:`str`
:param value: index entry value
:param abstract: abstract node or relationship to create
:return: batch request object
"""
self._assert_can_create_or_fail()
query = "uniqueness=create_or_fail"
return self._create_in_index(cls, index, key, value, abstract, query)
def get_or_create_in_index(self, cls, index, key, value, abstract=None):
""" Fetch a uniquely indexed node or relationship if one exists,
otherwise create a new entity and add that to the index.
:param cls: the type of indexed entity
:type cls: :py:class:`Node <py2neo.neo4j.Node>` or
:py:class:`Relationship <py2neo.neo4j.Relationship>`
:param index: index or index name
:type index: :py:class:`Index <py2neo.neo4j.Index>` or :py:class:`str`
:param key: index entry key
:type key: :py:class:`str`
:param value: index entry value
:param abstract: abstract node or relationship to create
:return: batch request object
"""
if self.supports_index_uniqueness_modes:
query = "uniqueness=get_or_create"
else:
query = "unique"
return self._create_in_index(cls, index, key, value, abstract, query)
### REMOVE FROM INDEX ###
def remove_from_index(self, cls, index, key=None, value=None, entity=None):
""" Remove any nodes or relationships from an index that match a
particular set of criteria. Allowed parameter combinations are:
`key`, `value`, `entity`
remove a specific node or relationship indexed under a given
key-value pair
`key`, `entity`
remove a specific node or relationship indexed against a given key
and with any value
`entity`
remove all occurrences of a specific node or relationship
regardless of key or value
:param cls: the type of indexed entity
:type cls: :py:class:`Node <py2neo.neo4j.Node>` or
:py:class:`Relationship <py2neo.neo4j.Relationship>`
:param index: index or index name
:type index: :py:class:`Index <py2neo.neo4j.Index>` or :py:class:`str`
:param key: index entry key
:type key: :py:class:`str`
:param value: index entry value
:param entity: node or relationship to remove from the index
:type entity: concrete or reference
:return: batch request object
"""
index = self._index(cls, index)
if key and value and entity:
uri = self._uri_for(index, key, value, entity._id)
elif key and entity:
uri = self._uri_for(index, key, entity._id)
elif entity:
uri = self._uri_for(index, entity._id)
else:
raise TypeError("Illegal parameter combination for index removal")
return self.append_delete(uri)
### START OF DEPRECATED METHODS ###
@deprecated("WriteBatch.add_indexed_node is deprecated, "
"use add_to_index instead")
def add_indexed_node(self, index, key, value, node):
return self.add_to_index(Node, index, key, value, node)
@deprecated("WriteBatch.add_indexed_relationship is deprecated, "
"use add_to_index instead")
def add_indexed_relationship(self, index, key, value, relationship):
return self.add_to_index(Relationship, index, key, value, relationship)
@deprecated("WriteBatch.add_indexed_node_or_fail is deprecated, "
"use add_to_index_or_fail instead")
def add_indexed_node_or_fail(self, index, key, value, node):
return self.add_to_index_or_fail(Node, index, key, value, node)
@deprecated("WriteBatch.add_indexed_relationship_or_fail is deprecated, "
"use add_to_index_or_fail instead")
def add_indexed_relationship_or_fail(self, index, key, value, relationship):
return self.add_to_index_or_fail(Relationship, index, key, value,
relationship)
@deprecated("WriteBatch.create_indexed_node_or_fail is deprecated, "
"use create_in_index_or_fail instead")
def create_indexed_node_or_fail(self, index, key, value, properties=None):
self._assert_can_create_or_fail()
abstract = properties or {}
return self.create_in_index_or_fail(Node, index, key, value, abstract)
@deprecated("WriteBatch.create_indexed_relationship_or_fail is deprecated, "
"use create_in_index_or_fail instead")
def create_indexed_relationship_or_fail(self, index, key, value,
start_node, type_, end_node,
properties=None):
self._assert_can_create_or_fail()
if properties:
abstract = _rel(start_node, (type_, properties), end_node)
else:
abstract = _rel(start_node, type_, end_node)
return self.create_in_index_or_fail(Relationship, index, key, value,
abstract)
@deprecated("WriteBatch.get_or_add_indexed_node is deprecated, "
"use get_or_add_to_index instead")
def get_or_add_indexed_node(self, index, key, value, node):
self.get_or_add_to_index(Node, index, key, value, node)
@deprecated("WriteBatch.get_or_add_indexed_relationship is deprecated, "
"use get_or_add_to_index instead")
def get_or_add_indexed_relationship(self, index, key, value, relationship):
self.get_or_add_to_index(Relationship, index, key, value, relationship)
@deprecated("WriteBatch.get_or_create_indexed_node is deprecated, "
"use get_or_create_in_index instead")
def get_or_create_indexed_node(self, index, key, value, properties=None):
abstract = properties or {}
return self.get_or_create_in_index(Node, index, key, value, abstract)
@deprecated("WriteBatch.get_or_create_indexed_relationship is deprecated, "
"use get_or_create_indexed instead")
def get_or_create_indexed_relationship(self, index, key, value, start_node,
type_, end_node, properties=None):
if properties:
abstract = _rel(start_node, (type_, properties), end_node)
else:
abstract = _rel(start_node, type_, end_node)
return self.get_or_create_in_index(Relationship, index, key, value,
abstract)
@deprecated("WriteBatch.remove_indexed_node is deprecated, "
"use remove_indexed instead")
def remove_indexed_node(self, index, key=None, value=None, node=None):
return self.remove_from_index(Node, index, key, value, node)
@deprecated("WriteBatch.remove_indexed_relationship is deprecated, "
"use remove_indexed instead")
def remove_indexed_relationship(self, index, key=None, value=None,
relationship=None):
return self.remove_from_index(Relationship, index, key, value,
relationship)
### END OF DEPRECATED METHODS ###
| 35.780784
| 147
| 0.584969
|
e3a604517e62253b33e4ef722735bc71e555b8f2
| 1,289
|
py
|
Python
|
tests/test_enum_type.py
|
Attsun1031/schematics
|
90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_enum_type.py
|
Attsun1031/schematics
|
90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1
|
[
"BSD-3-Clause"
] | 1
|
2020-09-18T08:21:35.000Z
|
2020-09-18T08:21:35.000Z
|
tests/test_enum_type.py
|
Attsun1031/schematics
|
90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from schematics.contrib.enum_type import EnumType
from schematics.exceptions import ConversionError
try:
from enum import Enum
class E(Enum):
A = 1
B = 'b'
class F(Enum):
A = 1
B = 1
except ImportError:
Enum = None
pytestmark = pytest.mark.skipif(Enum is None,
reason='requires enum')
def test_to_native_by_name():
field = EnumType(E)
assert field.to_native("A") == E.A
assert field.to_native("B") == E.B
with pytest.raises(ConversionError):
field.to_native("a")
def test_to_native_by_value():
field = EnumType(E, use_values=True)
assert field.to_native(1) == E.A
assert field.to_native("b") == field.to_native("B")
with pytest.raises(ConversionError):
field.to_native(2)
def test_to_native_by_value_duplicate():
field = EnumType(F, use_values=True)
assert field.to_native(1) == F.A
def test_passthrough():
field = EnumType(E, use_values=True)
assert field.to_native(E.A) == E.A
def test_to_primitive_by_name():
field = EnumType(E, use_values=False)
assert field.to_primitive(E.A) == "A"
def test_to_primitive_by_value():
field = EnumType(E, use_values=True)
assert field.to_primitive(E.A) == 1
| 21.483333
| 55
| 0.653995
|
8115f3070d6e47f1cc207abd12a34736ce996e0d
| 1,340
|
py
|
Python
|
main.py
|
kevinzakka/hypersearch
|
8fddfd3a23d3f6b8401924af49bfaacebd20a409
|
[
"MIT"
] | 270
|
2018-06-06T00:53:18.000Z
|
2022-03-19T13:11:21.000Z
|
main.py
|
thanhkaist/hypersearch
|
8fddfd3a23d3f6b8401924af49bfaacebd20a409
|
[
"MIT"
] | 4
|
2018-06-07T15:48:01.000Z
|
2020-09-10T11:42:47.000Z
|
main.py
|
thanhkaist/hypersearch
|
8fddfd3a23d3f6b8401924af49bfaacebd20a409
|
[
"MIT"
] | 31
|
2018-06-06T00:53:20.000Z
|
2021-10-01T14:39:31.000Z
|
import config
from hyperband import Hyperband
from model import get_base_model
from utils import prepare_dirs, save_results
def main(args):
# ensure directories are setup
dirs = [args.data_dir, args.ckpt_dir]
prepare_dirs(dirs)
# create base model
model = get_base_model()
# define params
params = {
# '0_dropout': ['uniform', 0.1, 0.5],
# '0_act': ['choice', ['relu', 'selu', 'elu', 'tanh', 'sigmoid']],
# '0_l2': ['log_uniform', 1e-1, 2],
# '2_act': ['choice', ['selu', 'elu', 'tanh', 'sigmoid']],
# '2_l1': ['log_uniform', 1e-1, 2],
# '2_hidden': ['quniform', 512, 1000, 1],
# '4_hidden': ['quniform', 128, 512, 1],
# 'all_act': ['choice', [[0], ['choice', ['selu', 'elu', 'tanh']]]],
'all_dropout': ['choice', [[0], ['uniform', 0.1, 0.5]]],
# 'all_batchnorm': ['choice', [0, 1]],
'all_l2': ['uniform', 1e-8, 1e-5],
# 'optim': ['choice', ["adam", "sgd"]],
# 'lr': ['uniform', 1e-3, 8e-3],
# 'batch_size': ['quniform', 32, 128, 1]
}
# instantiate hyperband object
hyperband = Hyperband(args, model, params)
# tune
results = hyperband.tune()
# dump results
save_results(results)
if __name__ == '__main__':
args, unparsed = config.get_args()
main(args)
| 27.916667
| 76
| 0.54403
|
fea61c5590bf7cc704ab7ebdf37710e73facfaae
| 7,155
|
py
|
Python
|
src/olympia/browse/feeds.py
|
makyen/Mozilla-addons-server
|
555d9f31cc4b00799466f16c8809edd5f1858ab8
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T10:02:15.000Z
|
2020-12-03T10:02:15.000Z
|
src/olympia/browse/feeds.py
|
makyen/Mozilla-addons-server
|
555d9f31cc4b00799466f16c8809edd5f1858ab8
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/browse/feeds.py
|
makyen/Mozilla-addons-server
|
555d9f31cc4b00799466f16c8809edd5f1858ab8
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext
from olympia import amo
from olympia.amo.feeds import NonAtomicFeed
from olympia.amo.urlresolvers import reverse
from olympia.amo.helpers import absolutify, url, page_name
from olympia.addons.models import Addon, Category
from .views import addon_listing, SearchToolsFilter
class AddonFeedMixin(object):
"""Common pieces for add-ons in a feed."""
def item_link(self, addon):
"""Link for a particular addon (<item><link>...</)"""
return absolutify(reverse('addons.detail', args=[addon.slug]))
def item_title(self, addon):
version = ''
if addon.current_version:
version = u' %s' % addon.current_version
return u'%s%s' % (addon.name, version)
def item_description(self, addon):
"""Description for particular add-on (<item><description>)"""
return unicode(addon.description) or ''
def item_author_name(self, addon):
"""Author for a particuar add-on (<item><dc:creator>)"""
if addon.listed_authors:
return addon.listed_authors[0].name
else:
return ''
def item_pubdate(self, addon):
"""Pubdate for a particuar add-on (<item><pubDate>)"""
sort = self.request.GET.get('sort')
return addon.created if sort == 'created' else addon.last_updated
def item_guid(self, addon):
"""Guid for a particuar version (<item><guid>)"""
url_ = reverse('addons.versions',
args=[addon.slug, addon.current_version])
return absolutify(url_)
class CategoriesRss(AddonFeedMixin, NonAtomicFeed):
def get_object(self, request, category_name=None):
"""
Get the Category for which we are about to output
the RSS feed of its Addons
"""
self.request = request
if category_name is None:
return None
q = Category.objects.filter(application=request.APP.id, type=self.TYPE)
self.category = get_object_or_404(q, slug=category_name)
return self.category
def title(self, category):
"""Title for the feed as a whole"""
name = category.name if category else ugettext('Extensions')
return u'%s :: %s' % (name, page_name(self.request.APP))
def link(self, category):
"""Link for the feed as a whole"""
return absolutify(url('home'))
def description(self, category):
"""Description for the feed as a whole"""
if category:
# L10n: %s is a category name.
return ugettext(u'%s Add-ons') % category.name
else:
return ugettext('Extensions')
def items(self, category):
"""Return the Addons for this Category to be output as RSS <item>'s"""
addons, _ = addon_listing(self.request, [self.TYPE], default='updated')
if category:
addons = addons.filter(categories__id=category.id)
return addons[:20]
class ExtensionCategoriesRss(CategoriesRss):
category = None
request = None
TYPE = amo.ADDON_EXTENSION
title = ugettext('Extensions')
def description(self, category):
"""Description for the feed as a whole."""
if category:
# L10n: %s is a category name.
return ugettext(u'%s Add-ons') % category.name
else:
return ugettext('Extensions')
class ThemeCategoriesRss(CategoriesRss):
category = None
request = None
TYPE = amo.ADDON_THEME
title = ugettext('Themes')
def description(self, category):
"""Description for the feed as a whole."""
if category:
# L10n: %s is a category name.
return ugettext(u'%s Themes') % category.name
else:
return self.title
class FeaturedRss(AddonFeedMixin, NonAtomicFeed):
request = None
def get_object(self, request):
self.request = request
self.app = request.APP
self.appname = unicode(request.APP.pretty)
def title(self):
"""Title for the feed"""
return ugettext('Featured Add-ons :: %s') % page_name(self.app)
def link(self):
"""Link for the feed"""
return absolutify(url('home'))
def description(self):
"""Description for the feed"""
# L10n: %s is an app name.
return ugettext(
'Here\'s a few of our favorite add-ons to help you get'
' started customizing %s.') % self.appname
def items(self):
"""Return the Addons to be output as RSS <item>'s"""
return Addon.objects.featured(self.app)[:20]
class SearchToolsRss(AddonFeedMixin, NonAtomicFeed):
category = None
request = None
TYPES = None
sort = ''
def description(self):
"""Description of this feed."""
if self.category:
# L10n: %s is a category name.
return ugettext(
u'Search tools relating to %s') % self.category.name
elif self.show_featured:
return ugettext('Search tools and search-related extensions')
else:
return ugettext('Search tools')
def get_object(self, request, category=None):
if category:
# Note that we don't need to include extensions
# when looking up a category
qs = Category.objects.filter(application=request.APP.id,
type=amo.ADDON_SEARCH)
self.category = get_object_or_404(qs, slug=category)
else:
self.category = None
self.request = request
self.sort = self.request.GET.get('sort', 'popular')
self.show_featured = self.sort == 'featured'
self.TYPES = [amo.ADDON_SEARCH]
if not self.category and self.show_featured:
self.TYPES.append(amo.ADDON_EXTENSION)
# We don't actually need to return anything, just hijacking the hook.
return None
def items(self):
"""Return search related Add-ons to be output as RSS <item>'s
Just like on the landing page, the following rules apply:
- when viewing featured search tools, include
extensions in the search category
- when viewing categories or any other sorting, do not
include extensions.
"""
addons, filter = addon_listing(self.request, self.TYPES,
SearchToolsFilter, default='popular')
if self.category:
addons = addons.filter(categories__id=self.category.id)
return addons[:30]
def link(self, category):
"""Link for the feed as a whole"""
if self.category:
base = url('browse.search-tools.rss', self.category.slug)
else:
base = url('browse.search-tools.rss')
return absolutify(base + '?sort=' + self.sort)
def title(self):
"""Title for the feed as a whole"""
base = ugettext('Search Tools')
if self.category:
base = u'%s :: %s' % (self.category.name, base)
return u'%s :: %s' % (base, page_name(self.request.APP))
| 33.591549
| 79
| 0.608526
|
5aa87692078537fb3b7c9a9fe901f2e0a40d30cf
| 1,830
|
py
|
Python
|
providers/airlaunch/opcua/transfers/opcua_to_s3.py
|
airlaunch-ch/airflow-dags-demo-industry
|
a07fdca734694c9ba5ae06a4b4fa95c51289ee7f
|
[
"Apache-2.0"
] | null | null | null |
providers/airlaunch/opcua/transfers/opcua_to_s3.py
|
airlaunch-ch/airflow-dags-demo-industry
|
a07fdca734694c9ba5ae06a4b4fa95c51289ee7f
|
[
"Apache-2.0"
] | null | null | null |
providers/airlaunch/opcua/transfers/opcua_to_s3.py
|
airlaunch-ch/airflow-dags-demo-industry
|
a07fdca734694c9ba5ae06a4b4fa95c51289ee7f
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
from dateutil import parser
from typing import List, Optional, Union
import json
from io import BytesIO
from providers.airlaunch.opcua.transfers.opcua_transfer_base import OPCUATransferBaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.exceptions import AirflowBadRequest
class OPCUAToS3Operator(OPCUATransferBaseOperator):
template_fields = ('opcua_node', 's3_bucket', 's3_key')
def __init__(
self,
*,
opcua_node: str,
opcua_startdate: str,
opcua_enddate: str,
s3_bucket: str,
s3_key: str,
opcua_conn_id: str = 'opcua_default',
opcua_numvalues: int = 0,
aws_conn_id: str = 'aws_default',
upload_format: str = "json",
replace: bool = False,
encrypt: bool = False,
acl_policy: str = None,
**kwargs,
):
super().__init__(
opcua_node=opcua_node,
opcua_startdate=opcua_startdate,
opcua_enddate=opcua_enddate,
opcua_conn_id=opcua_conn_id,
opcua_numvalues=opcua_numvalues,
upload_format=upload_format,
**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.aws_conn_id = aws_conn_id
self.replace = replace
self.encrypt = encrypt
self.acl_policy = acl_policy
self.s3_hook = None
def _upload(self, data: BytesIO):
self.s3_hook = S3Hook(self.aws_conn_id)
self.s3_hook.load_file_obj(
file_obj=data,
key=self.s3_key,
bucket_name=self.s3_bucket,
replace=self.replace,
encrypt=self.encrypt,
acl_policy=self.acl_policy)
self.log.info(f'File uploaded to {self.s3_key}')
| 30.5
| 93
| 0.633333
|
f959114cef0a1ced51ae5cb095fb72310bd0c918
| 30,273
|
py
|
Python
|
run_var_UCB_continuous.py
|
qphong/BayesOpt-LV
|
b852a94e9bae8716566e014ca1dd02186bcdf7ca
|
[
"MIT"
] | 1
|
2021-11-11T17:55:40.000Z
|
2021-11-11T17:55:40.000Z
|
run_var_UCB_continuous.py
|
qphong/BayesOpt-LV
|
b852a94e9bae8716566e014ca1dd02186bcdf7ca
|
[
"MIT"
] | null | null | null |
run_var_UCB_continuous.py
|
qphong/BayesOpt-LV
|
b852a94e9bae8716566e014ca1dd02186bcdf7ca
|
[
"MIT"
] | null | null | null |
import sys
import os
import argparse
import pickle
epsilon = 1e-12
parser = argparse.ArgumentParser(description="Bayesian Optimization for Value at Risk.")
parser.add_argument(
"-g",
"--gpu",
help="gpu device index for tensorflow",
required=False,
type=str,
default="0",
)
parser.add_argument(
"-q",
"--numqueries",
help="number/budget of queries",
required=False,
type=int,
default=1,
)
parser.add_argument(
"-r",
"--numruns",
help="number of random experiments",
required=False,
type=int,
default=2,
)
parser.add_argument(
"--ntrain",
help="number of optimizing iterations",
required=False,
type=int,
default=100,
)
parser.add_argument(
"--n_init_data",
help="number of initial observations",
required=False,
type=int,
default=2,
)
parser.add_argument(
"--n_rand_opt_init",
help="number of random initializers for optimization",
required=False,
type=int,
default=20,
)
parser.add_argument(
"--function",
help="function to optimize",
required=False,
type=str,
default="robot_pushing_optimization",
)
parser.add_argument(
"--quantile", help="quantile", required=False, type=float, default=0.2
)
parser.add_argument(
"--width", help="neighbor width", required=False, type=float, default=0.1
)
parser.add_argument(
"--nzsample", help="number of samples of z", required=False, type=int, default=50
)
parser.add_argument(
"--nxsample", help="number of samples of x", required=False, type=int, default=50
)
parser.add_argument(
"--ntrainsur",
help="number of optimizing iterations to optimize the surrogate",
required=False,
type=int,
default=2000,
)
parser.add_argument(
"--minvar", help="minimum noise variance", required=False, type=float, default=1e-4
)
parser.add_argument(
"--maxvar", help="maximum noise variance", required=False, type=float, default=4.0
)
parser.add_argument(
"--n_iter_fitgp",
help="fit the gp after n_iter_fitgp, if n_iter_fitgp = 0, never fit gp to the data (i.e., use pre-trained hyperparameters)",
required=False,
type=int,
default=3,
)
parser.add_argument(
"--shuffletie",
help="if shuffletie==1, when there are many values of z with the maximum value, we break tie by randomly selecting a value of z with the maximum value",
required=False,
type=int,
default=0,
)
parser.add_argument(
"-t",
"--dtype",
help="type of float: float32 or float64",
required=False,
type=str,
default="float64",
)
args = parser.parse_args()
# print all arguments
print("================================")
for arg in vars(args):
print(arg, getattr(args, arg))
print("================================")
gpu_device_id = args.gpu
folder = args.function
if not os.path.exists(folder):
os.makedirs(folder)
folder = "{}/continuous".format(folder)
if not os.path.exists(folder):
os.makedirs(folder)
nquery = args.numqueries
nrun = args.numruns
ntrain = args.ntrain
n_init_data = args.n_init_data
n_iter_fitgp = args.n_iter_fitgp
shuffletie = args.shuffletie
ntrainsur = args.ntrainsur
nxsample = args.nxsample
nzsample = args.nzsample
width = args.width
min_var = args.minvar
max_var = args.maxvar
func_name = args.function
print("nrun: {}".format(nrun))
print("nquery: {}".format(nquery))
print("n_init_data: {}".format(n_init_data))
print("n_iter_fitgp: {}".format(n_iter_fitgp))
print("Function: {}".format(func_name))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_device_id
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import scipy as sp
import time
import scipy.stats as spst
import matplotlib.pyplot as plt
import utils
import functions
import varopt
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = False
gpu_config.gpu_options.per_process_gpu_memory_fraction = 0.95
if args.dtype == "float32":
dtype = tf.float32
nptype = np.float32
elif args.dtype == "float64":
dtype = tf.float64
nptype = np.float64
else:
raise Exception("Unknown dtype: {}".format(args.dtype))
tf.reset_default_graph()
graph = tf.get_default_graph()
f_info = getattr(functions, func_name)()
"""
{
'function'
'name'
'xdim'
'zdim'
'xmin'
'xmax'
'is_z_discrete'
if z is continuous:
'zmin'
'zmax'
elif z is discrete:
'zvalues'
'zprobs'
# kernel hyperparameters
'lengthscale'
'signal_variance'
'likelihood_variance'
'rand_opt_init_x'
if 'rand_opt_init_x' is None: initialize x randomly
}
"""
print("Information of function:")
for k in f_info:
if k != "rand_opt_init_x":
print("{}: {}".format(k, f_info[k]))
else:
print("init_x.shape: {}".format(f_info["rand_opt_init_x"].shape))
func = f_info["function"]
func_tf = f_info["function_tf"]
xmin = f_info["xmin"]
xmax = f_info["xmax"]
xdim = f_info["xdim"]
zdim = f_info["zdim"]
zmin = f_info["zmin"]
zmax = f_info["zmax"]
z_generator = f_info["z_generator"]
z_lpdf = f_info["z_lpdf"]
lengthscale_np = f_info["lengthscale"]
signal_variance_np = f_info["signal_variance"]
generate_obs_noise_var_np = f_info["likelihood_variance"]
likelihood_variance_np = f_info["likelihood_variance"]
if f_info["rand_opt_init_x"] is None:
n_rand_opt_init = args.n_rand_opt_init
else:
n_rand_opt_init = f_info["rand_opt_init_x"].shape[0]
rand_opt_init_x_np = f_info["rand_opt_init_x"]
random_seed = 0
print("Random seed:", random_seed)
quantile = args.quantile
with graph.as_default():
X_plc = tf.placeholder(dtype=dtype, shape=(None, xdim), name="X_plc")
Z_plc = tf.placeholder(dtype=dtype, shape=(None, zdim), name="Z_plc")
input_dim = xdim + zdim
inputs = tf.concat([X_plc, Z_plc], axis=-1)
# (None,xdim+zdim)
Y_plc = tf.placeholder(dtype=dtype, shape=(None, 1), name="Y_plc")
beta_plc = tf.placeholder(dtype=dtype, shape=(), name="beta_plc")
lengthscale = tf.get_variable(
dtype=dtype, shape=(1, xdim + zdim), name="lengthscale"
)
signal_variance = tf.get_variable(dtype=dtype, shape=(), name="signal_variance")
likelihood_variance = tf.get_variable(
dtype=dtype, shape=(), name="likelihood_variance"
)
NKmm = utils.computeNKmm(
inputs, lengthscale, signal_variance, likelihood_variance, dtype
)
invK = utils.precomputeInvK(
input_dim, lengthscale, signal_variance, likelihood_variance, inputs, dtype
)
# (n_observations, n_observations)
invK_plc = tf.placeholder(dtype=dtype, shape=(None, None), name="invK_plc")
def get_bound(x, z, beta, type="lower"):
# x: (1,nx,nxsample,1,xdim)
# z: (nz,zdim)
# return (1,n_init_x,1,nz)
nx = tf.shape(x)[1]
nxsample = tf.shape(x)[2]
nz = tf.shape(z)[0]
x = tf.tile(x, multiples=(1, 1, 1, nz, 1))
# (1,nx,nxsample,nz,xdim)
z = tf.reshape(z, shape=(1, 1, 1, nz, zdim))
z = tf.tile(z, multiples=(1, nx, nxsample, 1, 1))
# (1,nx,nxsample,nz,zdim)
xz = tf.concat([x, z], axis=-1)
# (1,nx,nxsample,nz,xdim+zdim)
flatten_xz = tf.reshape(xz, shape=(-1, input_dim))
# (nx*nxsample*nz,xdim+zdim)
mean_f, var_f = utils.compute_mean_var_f(
flatten_xz,
inputs,
Y_plc,
lengthscale,
signal_variance,
likelihood_variance,
invK_plc,
fullcov=False,
dtype=dtype,
)
# mean_f: (nx*nz,1), var_f: (nx*nz,1)
std_f = tf.sqrt(var_f)
if type == "upper":
bound = mean_f + beta_plc * std_f
# (nx*nz,1)
elif type == "lower":
bound = mean_f - beta_plc * std_f
# (nx*nz,1)
elif type == "mean":
bound = mean_f
else:
raise Exception("Unknown bound!")
bound = tf.reshape(bound, shape=(1, nx, nxsample, nz))
return bound
# maximizer upper_varopt to find the query x
upper_varopt = varopt.VaROpt(
xdim,
zdim,
[xmin, xmax],
[zmin, zmax],
n_func=1,
f=lambda x, z: get_bound(x, z, beta_plc, type="upper"),
z_generator=z_generator,
n_init_x=n_rand_opt_init,
graph=graph,
surrogate_config={
"layer_sizes": [30, 30, 1],
"activations": ["sigmoid", "sigmoid", "linear"],
},
name="upper_varopt",
dtype=dtype,
)
lower_varopt = varopt.VaROpt(
xdim,
zdim,
[xmin, xmax],
[zmin, zmax],
n_func=1,
f=lambda x, z: get_bound(x, z, beta_plc, type="lower"),
z_generator=z_generator,
n_init_x=n_rand_opt_init,
graph=graph,
surrogate_config={
"layer_sizes": [30, 30, 1],
"activations": ["sigmoid", "sigmoid", "linear"],
},
name="lower_varopt",
dtype=dtype,
)
# find lower bound of at the query x
query_x_plc = tf.placeholder(dtype=dtype, shape=(1, 1, xdim), name="query_x_plc")
(
lower_quantile_f_val_at_queryx,
loss_lower_quantile_f_val_at_queryx,
train_lower_quantile_f_val_at_queryx,
) = lower_varopt.find_quantile(
query_x_plc, n_func=1, n_x=1, name="find_quantile_lower_at_query_x"
)
(
upper_quantile_f_val_at_queryx,
loss_upper_quantile_f_val_at_queryx,
train_upper_quantile_f_val_at_queryx,
) = upper_varopt.find_quantile(
query_x_plc, n_func=1, n_x=1, name="find_quantile_upper_at_query_x"
)
# lower_quantile_z_at_obs, lower_quantile_f_val_at_obs = lower_varopt.find_quantile(X_plc)
# estimate the maximizer
# by maximizing mean function
mean_varopt = varopt.VaROpt(
xdim,
zdim,
[xmin, xmax],
[zmin, zmax],
n_func=1,
f=lambda x, z: get_bound(x, z, beta_plc, type="mean"),
z_generator=z_generator,
n_init_x=n_rand_opt_init,
graph=graph,
surrogate_config={
"layer_sizes": [30, 30, 1],
"activations": ["sigmoid", "sigmoid", "linear"],
},
name="mean_varopt",
dtype=dtype,
)
# (2) by choosing observed input with max lower bound
# _, lower_quantile_f_val_at_X = lower_varopt.find_quantile(X_plc)
# for querying z
# optimize an objective function:
# query_x_plc
n_opt_init_z = 100
# NOTE: whenever optimizing query_z_tf
# need to load query_z_tf with random values
query_z_init = z_generator(n_opt_init_z)
query_z_tf = tf.get_variable(
initializer=np.zeros([n_opt_init_z, zdim], dtype=np.float64),
dtype=dtype,
name="query_z_tf",
)
upper_at_query = tf.squeeze(
get_bound(
tf.reshape(query_x_plc, shape=(1, tf.shape(query_x_plc)[0], 1, 1, xdim)),
query_z_tf,
beta_plc,
type="upper",
)
)
# (nz,)
lower_at_query = tf.squeeze(
get_bound(
tf.reshape(query_x_plc, shape=(1, tf.shape(query_x_plc)[0], 1, 1, xdim)),
query_z_tf,
beta_plc,
type="lower",
)
)
# (nz,)
z_logprobs = z_lpdf(query_z_tf)
# (nz,)
# loss_of_z_selection = tf.nn.relu(tf.squeeze(upper_quantile_f_val_at_queryx) - upper_at_query)\
# + tf.nn.relu(lower_at_query - tf.squeeze(lower_quantile_f_val_at_queryx))
# # (nz,)
upper_diff = tf.squeeze(upper_quantile_f_val_at_queryx) - upper_at_query
lower_diff = lower_at_query - tf.squeeze(lower_quantile_f_val_at_queryx)
bound_diff = upper_at_query - lower_at_query
# logprobs is zero if constraints are not satisfied:
# upper < upper_at_x or lower > lower_at_x
# eff_z_probs = tf.where(tf.nn.relu(-upper_diff) * tf.nn.relu(-lower_diff) >= -tf.cast(epsilon,dtype=dtype),
# tf.exp(z_logprobs),
# tf.zeros_like(z_logprobs, dtype=dtype))
cond = tf.cast(
tf.cast(upper_diff <= 0.0, dtype=tf.int32)
* tf.cast(lower_diff <= 0.0, dtype=tf.int32),
dtype=tf.bool,
)
eff_z_probs = tf.where(
cond, tf.exp(z_logprobs), tf.zeros_like(z_logprobs, dtype=dtype)
)
loss_of_z_selection = tf.nn.relu(upper_diff) + tf.nn.relu(lower_diff) - eff_z_probs
train_z_selection = tf.train.AdamOptimizer().minimize(
tf.reduce_mean(loss_of_z_selection), var_list=[query_z_tf]
)
if shuffletie:
print("Shuffling when tie exists in the loss of z!")
shuffled_idxs = tf.random.shuffle(tf.range(tf.shape(loss_of_z_selection)[0]))
shuffled_loss_of_z = tf.gather(loss_of_z_selection, indices=shuffled_idxs)
shuffled_min_z_selection_loss_idx = tf.math.argmax(-shuffled_loss_of_z)
min_z_selection_loss_idx = shuffled_idxs[shuffled_min_z_selection_loss_idx]
else:
print("No shuffling when tie exists in the loss of z")
min_z_selection_loss_idx = tf.math.argmax(-loss_of_z_selection)
selected_z = tf.gather(query_z_tf, indices=min_z_selection_loss_idx, axis=0)
selected_z_loss = tf.gather(
loss_of_z_selection, indices=min_z_selection_loss_idx, axis=0
)
selected_upper_diff = tf.gather(
upper_diff, indices=min_z_selection_loss_idx, axis=0
)
selected_lower_diff = tf.gather(
lower_diff, indices=min_z_selection_loss_idx, axis=0
)
selected_bound_diff = tf.gather(
bound_diff, indices=min_z_selection_loss_idx, axis=0
)
selected_z_logprob = tf.gather(z_logprobs, indices=min_z_selection_loss_idx, axis=0)
# find the ground truth function value for evaluating the regret
def ground_truth_func(x, z):
# x: (1,n_init_x,n_x_sample,1,xdim)
# z: (nz,zdim)
# return (1,n_init_x,1,nz)
n_init_x = tf.shape(x)[1]
n_x_sample = tf.shape(x)[2]
nz = tf.shape(z)[0]
x = tf.tile(x, multiples=(1, 1, 1, nz, 1))
# (1,n_init_x,n_x_sample,nz,xdim)
z = tf.reshape(z, shape=(1, 1, 1, nz, zdim))
z = tf.tile(z, multiples=(1, n_init_x, n_x_sample, 1, 1))
# (1,n_init_x,n_x_sample,nz,zdim)
xz = tf.concat([x, z], axis=-1)
# (1,n_init_x,1,nz,xdim+zdim)
flatten_xz = tf.reshape(xz, shape=(-1, input_dim))
# (n_init_x*nz,xdim+zdim)
vals = func_tf(flatten_xz)
return tf.reshape(vals, shape=(1, n_init_x, n_x_sample, nz))
ground_truth_varopt = varopt.VaROpt(
xdim,
zdim,
[xmin, xmax],
[zmin, zmax],
n_func=1,
f=ground_truth_func,
z_generator=z_generator,
n_init_x=n_rand_opt_init,
graph=graph,
surrogate_config={
"layer_sizes": [50, 50, 1],
"activations": ["sigmoid", "sigmoid", "linear"],
},
name="groundtruth_varopt",
dtype=dtype,
)
# compute the ground truth quantile at estimated maximizer for computing the regret
# find lower bound of at the query x
est_maximizer_plc = tf.placeholder(
dtype=dtype, shape=(1, 1, xdim), name="est_maximizer_plc"
)
(
ground_truth_quantile_f_val_at_est_max,
loss_ground_truth_quantile_f_val_at_est_max,
train_ground_truth_quantile_f_val_at_est_max,
) = ground_truth_varopt.find_quantile(
est_maximizer_plc, n_func=1, n_x=1, name="groundtruth"
)
np.random.seed(random_seed)
# general initial observations for all random runs:
init_X_np = np.random.rand(nrun, n_init_data, xdim) * (xmax - xmin) + xmin
init_Z_np = np.random.rand(nrun, n_init_data, zdim) * (zmax - zmin) + zmin
with open(
"{}/init_observations_seed{}.pkl".format(folder, random_seed), "wb"
) as outfile:
pickle.dump(
{"init_X_np": init_X_np, "init_Z_np": init_Z_np},
outfile,
protocol=pickle.HIGHEST_PROTOCOL,
)
with graph.as_default():
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("*********************")
print("rand_opt_init_x_np.shape = ", rand_opt_init_x_np.shape)
# opt_x_np, opt_var_np = ground_truth_varopt.maximize_in_session(
# sess,
# n_x_train = ntrain,
# n_z_train = ntrainsur,
# feed_dict = {
# ground_truth_varopt.quantile_plc: quantile,
# ground_truth_varopt.neighbor_center_plc: np.expand_dims(rand_opt_init_x_np, axis=0), #i.e., n_func = 1
# ground_truth_varopt.neighbor_width_plc: width,
# ground_truth_varopt.n_z_sample_plc: nzsample,
# ground_truth_varopt.n_x_sample_plc: nxsample
# },
# verbose = 100
# )
# print("Optimal X: {}, VaR: {}".format(opt_x_np, opt_var_np))
opt_var_np = f_info["max_var_continuous"]
print("Optimal VaR: {}".format(opt_var_np))
all_regrets_by_mean = np.zeros([nrun, nquery])
all_vars_at_est_maximizer_by_mean = np.zeros([nrun, nquery])
all_estimated_maximizers = np.zeros([nrun, nquery, xdim])
all_estimated_maximizers_by_lower = np.zeros([nrun, nquery, xdim])
all_estimated_max_var_by_mean = np.zeros([nrun, nquery])
all_estimated_max_var_by_lower = np.zeros([nrun, nquery])
for run_idx in range(nrun):
print("{}. RANDOM RUN".format(run_idx))
# Generate initial observations
X_np = init_X_np[run_idx, :, :]
Z_np = init_Z_np[run_idx, :]
input_np = np.concatenate([X_np, Z_np], axis=-1)
Y_np = func(input_np).reshape(-1, 1) + np.sqrt(
generate_obs_noise_var_np
) * np.random.randn(input_np.shape[0]).reshape(-1, 1)
sess.run(tf.global_variables_initializer())
# if n_iter_fitgp == 0: meanf_const is kept at 0. always
# it is updated accordingly
meanf_const = 0.0
for query_idx in range(nquery):
print("")
print("{}.{}. QUERYING".format(run_idx, query_idx))
print("Controlled variable:")
print(X_np)
print("Environment variable:")
print(Z_np)
beta_np = 2.0 * np.log((query_idx + 1) ** 2 * np.pi ** 2 / 6.0 / 0.1)
print("Observation:")
print(Y_np)
# NOTE: to generate Y_np we need generate_obs_noise_var_np for synthetic function, this is unknown to the BO algorithm. In BO, the noise variance is learned from the data
# NOTE: we do not scale Y_np, because it may cause the pre-trained GP model for the function incorrect!
# it also causes the noise min_var incorrectly estimated
# i.e., scaling Y_np changes the noise var
# scaled_Y_np = Y_np / (np.max(Y_np) - np.min(Y_np))
if n_iter_fitgp > 0 and query_idx % n_iter_fitgp == 0:
print("Fit GP to observations")
has_error, gp_hyperparameters = functions.fit_gp(
input_np,
Y_np,
noise_var=likelihood_variance_np,
train_noise_var=True,
min_var=min_var,
max_var=max_var,
)
if has_error:
print(" Skip due to numerical error!")
else:
print(
" Learned GP hyperparameters: {}".format(
gp_hyperparameters
)
)
meanf_const = gp_hyperparameters["meanf"]
signal_variance_np = gp_hyperparameters["signal_var"]
lengthscale_np = gp_hyperparameters["lengthscale"]
likelihood_variance_np = gp_hyperparameters["noise_var"]
shifted_Y_np = Y_np - meanf_const
lengthscale.load(lengthscale_np.reshape(1, input_dim), sess)
signal_variance.load(np.squeeze(signal_variance_np), sess)
likelihood_variance.load(np.squeeze(likelihood_variance_np), sess)
feed_dict = {
X_plc: X_np,
Z_plc: Z_np,
Y_plc: shifted_Y_np,
beta_plc: beta_np,
mean_varopt.quantile_plc: quantile,
mean_varopt.neighbor_center_plc: np.expand_dims(
rand_opt_init_x_np, axis=0
), # i.e., n_func = 1
mean_varopt.neighbor_width_plc: width,
mean_varopt.n_z_sample_plc: nzsample,
mean_varopt.n_x_sample_plc: nxsample,
upper_varopt.quantile_plc: quantile,
upper_varopt.neighbor_center_plc: np.expand_dims(
rand_opt_init_x_np, axis=0
), # i.e., n_func = 1
upper_varopt.neighbor_width_plc: width,
upper_varopt.n_z_sample_plc: nzsample,
upper_varopt.n_x_sample_plc: nxsample,
lower_varopt.quantile_plc: quantile,
lower_varopt.neighbor_center_plc: np.expand_dims(
rand_opt_init_x_np, axis=0
), # i.e., n_func = 1
lower_varopt.neighbor_width_plc: width,
lower_varopt.n_z_sample_plc: nzsample,
lower_varopt.n_x_sample_plc: nxsample,
ground_truth_varopt.quantile_plc: quantile,
ground_truth_varopt.neighbor_center_plc: np.expand_dims(
rand_opt_init_x_np, axis=0
), # i.e., n_func = 1
ground_truth_varopt.neighbor_width_plc: width,
ground_truth_varopt.n_z_sample_plc: nzsample,
ground_truth_varopt.n_x_sample_plc: nxsample,
}
invK_np = sess.run(invK, feed_dict=feed_dict)
feed_dict[invK_plc] = invK_np
print("")
print("Estimating maximizer by maximize the VaR of posterior mean.")
# max_x_mean_np, max_quantile_f_mean_np = mean_varopt.maximize_in_session(
# sess,
# n_x_train = ntrain,
# n_z_train = ntrainsur,
# feed_dict = feed_dict,
# verbose = 100
# )
max_x_mean_np, max_quantile_f_mean_np = mean_varopt.find_max_in_set(
sess, X_np, feed_dict, ntrain=1000
)
(
max_x_mean_np_by_lower,
max_quantile_f_mean_np_by_lower,
) = lower_varopt.find_max_in_set(sess, X_np, feed_dict, ntrain=1000)
all_estimated_maximizers[
run_idx, query_idx, :
] = max_x_mean_np.squeeze()
all_estimated_maximizers_by_lower[
run_idx, query_idx, :
] = max_x_mean_np_by_lower.squeeze()
all_estimated_max_var_by_mean[run_idx, query_idx] = np.squeeze(
max_quantile_f_mean_np
)
all_estimated_max_var_by_lower[run_idx, query_idx] = np.squeeze(
max_quantile_f_mean_np_by_lower
)
print(
"Estimated maximizer at {} VaR {}".format(
max_x_mean_np, max_quantile_f_mean_np
)
)
print(
"Estimated maximizer by lower at {} VaR {}".format(
max_x_mean_np_by_lower, max_quantile_f_mean_np_by_lower
)
)
sys.stdout.flush()
# computing the regret
feed_dict[est_maximizer_plc] = max_x_mean_np.reshape(1, 1, xdim)
for _ in range(2000):
sess.run(train_ground_truth_quantile_f_val_at_est_max, feed_dict)
ground_truth_quantile_f_val_at_est_max_np = sess.run(
ground_truth_quantile_f_val_at_est_max, feed_dict
)
regret = (
opt_var_np - ground_truth_quantile_f_val_at_est_max_np.squeeze()
)
print("Regret by maximizing mean: ", regret)
print(
"Groundtruth VaR at the query x by max mean: ",
ground_truth_quantile_f_val_at_est_max_np.squeeze(),
)
all_regrets_by_mean[run_idx, query_idx] = regret
all_vars_at_est_maximizer_by_mean[
run_idx, query_idx
] = ground_truth_quantile_f_val_at_est_max_np.squeeze()
# Find query x
print("")
print("Finding query x by maximizing upper bound of VaR.")
(
query_x_np,
upper_quantile_f_at_queryx_np,
) = upper_varopt.maximize_in_session(
sess,
n_x_train=ntrain,
n_z_train=ntrainsur,
feed_dict=feed_dict,
verbose=100,
)
upper_quantile_f_at_queryx_np = np.squeeze(
upper_quantile_f_at_queryx_np
)
print("Query x: {}".format(query_x_np))
print(
" At query, upper bound of function value: {:.6f}".format(
upper_quantile_f_at_queryx_np
)
)
sys.stdout.flush()
feed_dict[query_x_plc] = query_x_np.reshape(1, 1, xdim)
for _ in range(ntrainsur):
sess.run(train_lower_quantile_f_val_at_queryx, feed_dict)
lower_quantile_f_val_at_queryx_np = sess.run(
lower_quantile_f_val_at_queryx, feed_dict
)
lower_quantile_f_val_at_queryx_np = np.squeeze(
lower_quantile_f_val_at_queryx_np
)
print(
" At query, lower bound of function value: {:.6f}".format(
lower_quantile_f_val_at_queryx_np
)
)
query_z_tf.load(sess.run(query_z_init), sess)
for _ in range(2000):
sess.run(train_z_selection, feed_dict)
(
query_z_np,
selected_z_loss_np,
selected_upper_diff_np,
selected_lower_diff_np,
selected_bound_diff_np,
selected_z_logprob_np,
) = sess.run(
[
selected_z,
selected_z_loss,
selected_upper_diff,
selected_lower_diff,
selected_bound_diff,
selected_z_logprob,
],
feed_dict,
)
print(
"Query z: {} with upper diff {} lower diff {} bound diff {} lprob {}, loss {}".format(
query_z_np,
selected_upper_diff_np,
selected_lower_diff_np,
selected_bound_diff_np,
selected_z_logprob_np,
selected_z_loss_np,
)
)
X_np = np.concatenate([X_np, query_x_np.reshape(1, xdim)], axis=0)
Z_np = np.concatenate([Z_np, query_z_np.reshape(1, zdim)], axis=0)
input_np = np.concatenate([X_np, Z_np], axis=-1)
query_np = np.concatenate(
[query_x_np.reshape(1, xdim), query_z_np.reshape(1, zdim)], axis=-1
)
query_obs_np = func(query_np).reshape(-1, 1) + np.sqrt(
generate_obs_noise_var_np
) * np.random.randn(query_np.shape[0]).reshape(-1, 1)
Y_np = np.concatenate([Y_np, query_obs_np], axis=0)
with open(
"{}/all_observations_quantile{}.pkl".format(folder, quantile), "wb"
) as outfile:
pickle.dump(
{"X": X_np, "Z": Z_np, "Y": Y_np},
outfile,
protocol=pickle.HIGHEST_PROTOCOL,
)
with open(
"{}/regrets_by_mean_quantile{}.pkl".format(folder, quantile), "wb"
) as outfile:
pickle.dump(
{
"regrets": all_regrets_by_mean,
"estimated_max_VaR_by_mean": all_estimated_max_var_by_mean,
"groundtruth_VaR_at_estimate": all_vars_at_est_maximizer_by_mean,
"estimated_max_VaR_by_lower": all_estimated_max_var_by_lower,
"optimal_VaR": opt_var_np,
},
outfile,
protocol=pickle.HIGHEST_PROTOCOL,
)
with open(
"{}/estimated_maximizers_quantile{}.pkl".format(folder, quantile), "wb"
) as outfile:
pickle.dump(
{
"estimated_maximizers_by_mean": all_estimated_maximizers,
"estimated_maximizers_by_lower": all_estimated_maximizers_by_lower,
"optimal_VaR": opt_var_np,
},
outfile,
protocol=pickle.HIGHEST_PROTOCOL,
)
| 33.267033
| 186
| 0.570674
|
8dc6a70cad327ddfd38fad42a2778b7012f2e352
| 3,372
|
py
|
Python
|
nova/conf/image_file_url.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/image_file_url.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/image_file_url.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
# needs:fix_opt_description
# needs:check_deprecation_status
# needs:check_opt_group_and_type
# needs:fix_opt_description_indentation
# needs:fix_opt_registration_consistency
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.i18n import _
image_file_url_group = cfg.OptGroup(
'image_file_url',
title='Image File URL Options')
filesystems = cfg.ListOpt(
name='filesystems',
deprecated_for_removal=True,
deprecated_reason='The feature to download images from glance via '
'filesystem is not used and will be removed in the '
'future.',
default=[],
help=_('List of file systems that are configured '
'in this file in the '
'image_file_url:<list entry name> '
'sections'))
# NOTE(jbresnah) because the group under which these options are added is
# dyncamically determined these options need to stay out of global space
# or they will confuse generate_sample.sh
filesystem_opts = [
cfg.StrOpt('id',
help=_('A unique ID given to each file system. This is '
'value is set in Glance and agreed upon here so '
'that the operator knowns they are dealing with '
'the same file system.'),
deprecated_for_removal=True,
deprecated_reason='The feature to download images from glance '
'via filesystem is not used and will be '
'removed in the future.'),
cfg.StrOpt('mountpoint',
help=_('The path at which the file system is mounted.'),
deprecated_for_removal=True,
deprecated_reason='The feature to download images from glance '
'via filesystem is not used and will be '
'removed in the future.'),
]
ALL_OPTS = [filesystems]
def register_opts(conf):
conf.register_group(image_file_url_group)
conf.register_opts(ALL_OPTS, group=image_file_url_group)
for fs in conf.image_file_url.filesystems:
group_name = 'image_file_url:' + fs
conf.register_opts(filesystem_opts, group=group_name)
def list_opts():
# NOTE(markus_z): As the "filesystem" opt has an empty list as a default
# value and this value is necessary for a correct group name, we cannot
# list the "filesystem_opts" for the "nova.conf.sample" file here. A
# follow up patch will deprecate those. Due to their dynamic creation
# they never got shown in "nova.conf.sample" nor the config reference
# manual. I see no need to change this here with a dummy group or somehing
# like that.
return {image_file_url_group: ALL_OPTS}
| 40.142857
| 79
| 0.661032
|
d611b3677aee3d7810bbe1509eb235a4e29204e9
| 355
|
py
|
Python
|
student/urls.py
|
YarinBou/SJMaster
|
74f2d77bf03a9a5856ffd44ce57041ae7a416bcb
|
[
"MIT"
] | 1
|
2022-03-17T10:34:56.000Z
|
2022-03-17T10:34:56.000Z
|
student/urls.py
|
YarinBou/SJMaster
|
74f2d77bf03a9a5856ffd44ce57041ae7a416bcb
|
[
"MIT"
] | 49
|
2021-11-15T08:29:25.000Z
|
2022-01-01T17:52:46.000Z
|
student/urls.py
|
YarinBou/SJMaster
|
74f2d77bf03a9a5856ffd44ce57041ae7a416bcb
|
[
"MIT"
] | 2
|
2021-11-02T09:22:23.000Z
|
2021-11-04T11:04:18.000Z
|
from django.urls import path
from student.views import update_student_account_settings_view, account_update_success
urlpatterns = [
path('student/account_settings/', update_student_account_settings_view,
name="student_account_settings"),
path('account_update_success/', account_update_success,
name="account_update_success"),
]
| 35.5
| 86
| 0.788732
|
63f642a9612be623f107ee6ec057fe65d60f0dc4
| 567
|
py
|
Python
|
physics/boundary_conditions/third_type_boundary_condition.py
|
fedorpashin/physics
|
d587d50679fb5ad3994a8b992806a30053ed45e1
|
[
"MIT"
] | 2
|
2021-09-06T16:20:25.000Z
|
2021-09-06T16:21:17.000Z
|
physics/boundary_conditions/third_type_boundary_condition.py
|
fedorpashin/physics
|
d587d50679fb5ad3994a8b992806a30053ed45e1
|
[
"MIT"
] | 30
|
2021-09-06T16:20:29.000Z
|
2021-11-27T22:41:03.000Z
|
physics/boundary_conditions/third_type_boundary_condition.py
|
fedorpashin/physics
|
d587d50679fb5ad3994a8b992806a30053ed45e1
|
[
"MIT"
] | null | null | null |
from physics.boundary_conditions.boundary_condition import BoundaryCondition
from dataclasses import dataclass
from final_class import final
from overrides import overrides
__all__ = ['ThirdTypeBoundaryCondition']
@final
@dataclass
class ThirdTypeBoundaryCondition(BoundaryCondition):
__ν: float
__κ: float
def __init__(self, ν: float, κ: float):
self.__ν = ν
self.__κ = κ
@property # type: ignore
@overrides
def ν(self) -> float:
return self.__ν
@property
def κ(self) -> float:
return self.__κ
| 20.25
| 76
| 0.698413
|
613679698dd09c922c898a19badfb25043a1b8c1
| 231
|
py
|
Python
|
tests/test_compression.py
|
d1618033/Flask-Loopback
|
6e2e7bc6c94d51c238b91fe15bf070e8b99f6e25
|
[
"BSD-3-Clause"
] | 8
|
2015-01-28T18:47:56.000Z
|
2022-02-13T01:26:14.000Z
|
tests/test_compression.py
|
d1618033/Flask-Loopback
|
6e2e7bc6c94d51c238b91fe15bf070e8b99f6e25
|
[
"BSD-3-Clause"
] | 13
|
2016-01-18T15:42:41.000Z
|
2021-01-07T16:18:19.000Z
|
tests/test_compression.py
|
d1618033/Flask-Loopback
|
6e2e7bc6c94d51c238b91fe15bf070e8b99f6e25
|
[
"BSD-3-Clause"
] | 4
|
2016-09-18T12:47:32.000Z
|
2020-03-13T05:35:32.000Z
|
import requests
def test_compression(active_app, url): # pylint: disable=unused-argument
resp = requests.get(url.add_path('compressed'))
resp.raise_for_status()
assert resp.content.decode('utf-8') == 'uncompressed!'
| 25.666667
| 72
| 0.727273
|
65a154eda8d2848c581251e026cd75cb004fdc70
| 4,623
|
py
|
Python
|
project/admin.py
|
tomgreen66/cogs3
|
6a0240faca83a7d1af061e70b5d7c324cf5e067a
|
[
"MIT"
] | null | null | null |
project/admin.py
|
tomgreen66/cogs3
|
6a0240faca83a7d1af061e70b5d7c324cf5e067a
|
[
"MIT"
] | null | null | null |
project/admin.py
|
tomgreen66/cogs3
|
6a0240faca83a7d1af061e70b5d7c324cf5e067a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from project.forms import ProjectAdminForm, ProjectUserMembershipAdminForm
from project.models import (Project, ProjectCategory, ProjectFundingSource,
ProjectSystemAllocation, ProjectUserMembership)
from project.openldap import (update_openldap_project,
update_openldap_project_membership)
@admin.register(ProjectCategory)
class ProjectCategoryAdmin(admin.ModelAdmin):
list_display = ('name', )
@admin.register(ProjectFundingSource)
class ProjectFundingSourceAdmin(admin.ModelAdmin):
list_display = (
'name',
'description',
)
@admin.register(ProjectSystemAllocation)
class ProjectSystemAllocationAdmin(admin.ModelAdmin):
list_display = (
'project',
'system',
)
@admin.register(ProjectUserMembership)
class ProjectUserMembershipAdmin(admin.ModelAdmin):
def _project_membership_action_message(self, rows_updated):
if rows_updated == 1:
message = '1 project membership was'
else:
message = '{rows} project memberships were'.format(rows=rows_updated)
return message
def activate_project_memberships(self, request, queryset):
rows_updated = 0
for membership in queryset:
membership.status = ProjectUserMembership.AUTHORISED
membership.save()
update_openldap_project_membership(membership)
rows_updated += 1
message = self._project_membership_action_message(rows_updated)
self.message_user(request, '{message} successfully submitted for activation.'.format(message=message))
activate_project_memberships.short_description = 'Activate selected project memberships in LDAP'
def deactivate_project_memberships(self, request, queryset):
rows_updated = 0
for membership in queryset:
membership.status = ProjectUserMembership.REVOKED
membership.save()
update_openldap_project_membership(membership)
rows_updated += 1
message = self._project_membership_action_message(rows_updated)
self.message_user(request, '{message} successfully submitted for deactivation.'.format(message=message))
deactivate_project_memberships.short_description = 'Deactivate selected project memberships in LDAP'
form = ProjectUserMembershipAdminForm
actions = [activate_project_memberships, deactivate_project_memberships]
list_display = (
'project',
'user',
'status',
'date_joined',
)
search_fields = (
'project__code',
'user__first_name',
'user__last_name',
'user__email',
'user__profile__scw_username',
)
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
def _project_action_message(self, rows_updated):
if rows_updated == 1:
message = '1 project was'
else:
message = '{rows} project were'.format(rows=rows_updated)
return message
def activate_projects(self, request, queryset):
rows_updated = 0
for project in queryset:
project.status = Project.APPROVED
project.save()
update_openldap_project(project)
rows_updated += 1
message = self._project_action_message(rows_updated)
self.message_user(request, '{message} successfully submitted for activation.'.format(message=message))
activate_projects.short_description = 'Activate selected projects in LDAP'
def deactivate_projects(self, request, queryset):
rows_updated = 0
for project in queryset:
project.status = Project.REVOKED
project.save()
update_openldap_project(project)
rows_updated += 1
message = self._project_action_message(rows_updated)
self.message_user(request, '{message} successfully submitted for deactivation.'.format(message=message))
deactivate_projects.short_description = 'Deactivate selected projects in LDAP'
form = ProjectAdminForm
actions = [activate_projects, deactivate_projects]
# Fields to be used when displaying a Project instance.
list_display = (
'code',
'created_time',
'start_date',
'tech_lead',
'status',
)
list_filter = ('status', )
search_fields = (
'title',
'legacy_hpcw_id',
'legacy_arcca_id',
'code',
'gid_number',
'pi',
'tech_lead__first_name',
'tech_lead__last_name',
'tech_lead__email',
)
| 33.258993
| 112
| 0.674886
|
c1a8c7bc78dc25a74f08d18acde5c3055ab765e9
| 863
|
py
|
Python
|
socket/server/logger.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
socket/server/logger.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
socket/server/logger.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
import logging
import sys
class StdErrHandler(logging.Handler):
def __init__(self, level=None):
if level is None:
level = logging.NOTSET
super().__init__(level)
fmt = logging.Formatter('%(name)s: %(message)s')
super().setFormatter(fmt)
def flush(self):
print(file=sys.stderr, end='', flush=True)
def emit(self, record):
message = super().format(record)
print(message, file=sys.stderr)
def _instantiate(cls):
return cls()
@_instantiate
class Logger(logging.Logger):
def __init__(self):
super().__init__(__name__)
self.handler = StdErrHandler()
super().addHandler(self.handler)
def flush(self):
self.handler.flush()
return self
def log(self, *args, **kwargs):
super().info(*args, **kwargs)
return self
| 21.575
| 56
| 0.606025
|
17f69032f8f005d57937c3ee4a9597ea3d63759c
| 7,603
|
py
|
Python
|
moveControl/pathTrack/fuzzy_pid.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | null | null | null |
moveControl/pathTrack/fuzzy_pid.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | null | null | null |
moveControl/pathTrack/fuzzy_pid.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | 1
|
2021-09-04T10:27:30.000Z
|
2021-09-04T10:27:30.000Z
|
import skfuzzy as sf
import time
import numpy as np
from math import pi, log
class FuzzyPID:
def __init__(self, Pmax, Pmin, Imax, Imin, Dmax, Dmin):
self.Kpmax = Pmax
self.Kpmin = Pmin
self.Kimax = Imax
self.Kimin = Imin
self.Kdmax = Dmax
self.Kdmin = Dmin
self.sample_time = 0.0
self.current_time = time.time()
self.last_time = self.current_time
self.tfm = self.tfm_generator(-pi, pi)
self.dtfm = self.tfm_generator(-8, 8)
self.re = self.rule()
self.rde = self.re.T
self.rie = self.rule_ki()
self.a = self.rule_alpha()
self.b = self.a.T
self.clear()
def tfm_generator(self, xmin, xmax):
x = (xmax - xmin) / 2
NB = np.array([xmin, xmin, xmin + 1 / 3 * x], dtype=np.float32)
NM = np.array([xmin, xmin + 1 / 3 * x, xmin + 2 / 3 * x], dtype=np.float32)
NS = np.array([xmin + 1 / 3 * x, xmin + 2 / 3 * x, xmin + x], dtype=np.float32)
ZE = np.array([xmin + 2 / 3 * x, xmin + x, xmax - 2 / 3 * x], dtype=np.float32)
PS = np.array([xmin + x, xmax - 2 / 3 * x, xmax - x / 3], dtype=np.float32)
PM = np.array([xmax - 2 / 3 * x, xmax - x / 3, xmax], dtype=np.float32)
PB = np.array([xmax - 1 / 3 * x, xmax, xmax], dtype=np.float32)
return [NB, NM, NS, ZE, PS, PM, PB]
def membership(self, x, tfm):
x = np.array([x])
return [sf.trimf(x, tfm[0]), sf.trimf(x, tfm[1]), sf.trimf(x, tfm[2]), \
sf.trimf(x, tfm[3]), sf.trimf(x, tfm[4]), sf.trimf(x, tfm[5]), sf.trimf(x, tfm[6])]
def rule(self):
return np.matrix([[3, 4, 5, 6, 5, 4, 3], [2, 3, 4, 5, 4, 3, 2], [1, 2, 3, 4, 3, 2, 1], \
[0, 1, 2, 3, 2, 1, 0], [1, 2, 3, 4, 3, 2, 1], [2, 3, 4, 5, 4, 3, 2], [3, 4, 5, 6, 5, 4, 3]])
def rule_alpha(self):
return np.matrix([[2, 2, 2, 2, 2, 2, 2], [3, 3, 2, 2, 2, 3, 3], [4, 3, 3, 2, 3, 3, 4], \
[5, 4, 3, 3, 3, 4, 5], [4, 3, 3, 2, 3, 3, 4], [3, 3, 2, 2, 2, 3, 3], [2, 2, 2, 2, 2, 2, 2]])
def rule_ki(self):
return np.matrix([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 2, 2, 2, 0, 0], \
[0, 2, 4, 2, 4, 2, 0], [0, 0, 2, 2, 2, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def clear(self):
self.SetPoint = 0.0
self.PTerm = 0.0
self.ITerm = 0.0
self.DTerm = 0.0
self.last_error = 0.0
self.int_error = 0.0
self.windup_guard = 10.0
self.output = 0.0
def update_K(self, error, d_error):
self.Kp = self.re[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))] / 6 * (self.Kpmax - self.Kpmin) + self.Kpmin
self.Kd = self.rde[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))] / 6 * (self.Kdmax - self.Kdmin) + self.Kdmin
self.alpha = self.a[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))]
self.Ki = self.rie[np.argmax(self.membership(error, self.tfm)), \
np.argmax(self.membership(d_error, self.dtfm))] / 4 * (self.Kimax - self.Kimin) + self.Kimin
def update(self, feedback_value, speed):
error = self.SetPoint - feedback_value
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
d_error = speed
self.update_K(error, d_error)
if delta_time >= self.sample_time:
pTerm = self.Kp * error
if pTerm < -self.windup_guard:
self.PTerm = -self.windup_guard
elif pTerm > self.windup_guard:
self.PTerm = self.windup_guard
else:
self.PTerm = pTerm
self.ITerm += self.Ki * error * delta_time
if (self.ITerm < -self.windup_guard):
self.ITerm = -self.windup_guard
elif (self.ITerm > self.windup_guard):
self.ITerm = self.windup_guard
if delta_time > 0:
self.DTerm = self.Kd * delta_error / delta_time
if (self.DTerm < -self.windup_guard):
self.DTerm = -self.windup_guard
elif (self.DTerm > self.windup_guard):
self.DTerm = self.windup_guard
self.last_time = self.current_time
self.last_error = error
Output = self.PTerm + (self.ITerm) + (self.DTerm)
if Output > 15:
self.output = 15
elif Output < -15:
self.output = -15
else:
self.output = Output
def setKp(self, Pmax, Pmin):
self.Kpmax = Pmax
self.Kpmin = Pmin
def setKd(self, Dmax, Dmin):
self.Kdmax = Dmax
self.Kdmin = Dmin
def setKi(self, Imax, Imin):
self.Kimax = Imax
self.Kimin = Imin
def setSampleTime(self, sample_time):
self.sample_time = sample_time
def setSetPoint(self, setpoint):
self.SetPoint = setpoint
def demo():
import skfuzzy
import time
import os
import sys
lib_path = os.path.abspath(os.path.join(sys.path[0], '..'))
sys.path.append(lib_path)
# import gym
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
import numpy as np
import math
from tqdm import tqdm
Ctl = FuzzyPID(10, 7, 4, 2, 1.15, 0.75)
Ctl.setKp(10, 3)
Ctl.setKi(9, 0)
Ctl.setKd(0.9, 0.3)
Ctl.setSampleTime(0.05)
Ctl.setSetPoint(0.0)
graph = []
Graph = []
a = np.arange(-pi, pi, pi / 100)
b = np.arange(-8, 8, 8 / 100)
for i in tqdm(a):
for j in b:
Ctl.update(i, j)
# print(Ctl.output)
graph.append(Ctl.output)
Graph.append(graph)
graph = []
print(Graph)
plt.imshow(Graph, extent=(np.amin(a), np.amax(a), np.amax(b), np.amin(b)),
cmap=cm.hot)
plt.colorbar()
plt.savefig('hot.png') # 先存,再show
plt.show()
'''
tfm = Ctl.tfm_generator(-pi, pi)
dtfm = Ctl.tfm_generator(-8,8)
graph = []
ele = 2*8 / 100
count = -8
indexing = []
labels = ["NB", "NM","NS","ZE","PS","PM","PB"]
for i in range(7):
for j in range(100):
graph.append(Ctl.membership(count, dtfm)[i])
count += ele
indexing.append(count)
plt.plot(indexing,graph, "-",label = labels[i])
graph = []
indexing = []
count = -8
plt.title("Angle Speed Membership")
plt.legend(loc = 'upper right')
string = "../result/membership2.png"
plt.savefig(string)
'''
'''
env = gym.make('Pendulum-v0')
for i_episode in range(10):
observation = env.reset()
Ctl.clear()
for t in range(300):
env.render()
feedback, thbot = env.state
graph.append(feedback)
Ctl.update(feedback, thbot)
action = [Ctl.output]
print(action)
print(Ctl.PTerm, Ctl.ITerm,Ctl.DTerm)
observation, reward, done, info = env.step(action)
plt.plot(graph[::10], "^-")
graph = []
plt.title("Fuzzy PID performance")
string = "../result/"+str(time.time())+"Fuzzy_graph.png"
plt.savefig(string)
env.close()
'''
if __name__ == '__main__':
demo()
| 35.036866
| 119
| 0.519926
|
02b641e8cf741d60e6eaada38df857e69d300475
| 8,241
|
py
|
Python
|
wenet/bin/alignment.py
|
underdogliu/wenet
|
19af50a6c848e77adb5dfa40a861061c1ae8b78e
|
[
"Apache-2.0"
] | null | null | null |
wenet/bin/alignment.py
|
underdogliu/wenet
|
19af50a6c848e77adb5dfa40a861061c1ae8b78e
|
[
"Apache-2.0"
] | null | null | null |
wenet/bin/alignment.py
|
underdogliu/wenet
|
19af50a6c848e77adb5dfa40a861061c1ae8b78e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 Mobvoi Inc. (authors: Di Wu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import copy
import logging
import os
import sys
import torch
import yaml
from torch.utils.data import DataLoader
from textgrid import TextGrid, IntervalTier
from wenet.dataset.dataset import AudioDataset, CollateFunc
from wenet.transformer.asr_model import init_asr_model
from wenet.utils.checkpoint import load_checkpoint
from wenet.utils.ctc_util import forced_align
from wenet.utils.common import get_subsample
def generator_textgrid(maxtime, lines, output):
# Download Praat: https://www.fon.hum.uva.nl/praat/
interval = maxtime / (len(lines) + 1)
margin = 0.0001
tg = TextGrid(maxTime=maxtime)
linetier = IntervalTier(name="line", maxTime=maxtime)
i = 0
for l in lines:
s, e, w = l.split()
linetier.add(minTime=float(s) + margin, maxTime=float(e), mark=w)
tg.append(linetier)
print("successfully generator {}".format(output))
tg.write(output)
def get_frames_timestamp(alignment):
# convert alignment to a praat format, which is a doing phonetics
# by computer and helps analyzing alignment
timestamp = []
# get frames level duration for each token
start = 0
end = 0
while end < len(alignment):
if end == len(
alignment) - 1 and alignment[start] == alignment[end] == 0:
timestamp[-1] += alignment[start:]
break
while end < len(alignment) and alignment[end] == 0:
end += 1
end += 1
while end < len(alignment) and alignment[end - 1] == alignment[end]:
end += 1
timestamp.append(alignment[start:end])
start = end
return timestamp
def get_labformat(timestamp, subsample):
begin = 0
duration = 0
labformat = []
for idx, t in enumerate(timestamp):
# 25ms frame_length,10ms hop_length, 1/subsample
subsample = get_subsample(configs)
# time duration
duration = len(t) * 0.01 * subsample
if idx < len(timestamp) - 1:
print("{:.2f} {:.2f} {}".format(begin, begin + duration,
char_dict[t[-1]]))
labformat.append("{:.2f} {:.2f} {}\n".format(
begin, begin + duration, char_dict[t[-1]]))
else:
non_blank = 0
for i in t:
if i != 0:
token = i
break
print("{:.2f} {:.2f} {}".format(begin, begin + duration,
char_dict[token]))
labformat.append("{:.2f} {:.2f} {}\n".format(
begin, begin + duration, char_dict[token]))
begin = begin + duration
return labformat
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='use ctc to generate alignment')
parser.add_argument('--config', required=True, help='config file')
parser.add_argument('--input_file', required=True, help='format data file')
parser.add_argument('--gpu',
type=int,
default=-1,
help='gpu id for this rank, -1 for cpu')
parser.add_argument('--checkpoint', required=True, help='checkpoint model')
parser.add_argument('--dict', required=True, help='dict file')
parser.add_argument('--result_file',
required=True,
help='alignment result file')
parser.add_argument('--batch_size', type=int, default=1, help='batch size')
parser.add_argument('--gen_praat',
action='store_true',
help='convert alignment to a praat format')
args = parser.parse_args()
print(args)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
if args.batch_size > 1:
logging.fatal('alignment mode must be running with batch_size == 1')
sys.exit(1)
with open(args.config, 'r') as fin:
configs = yaml.load(fin, Loader=yaml.FullLoader)
# Load dict
char_dict = {}
with open(args.dict, 'r') as fin:
for line in fin:
arr = line.strip().split()
assert len(arr) == 2
char_dict[int(arr[1])] = arr[0]
eos = len(char_dict) - 1
raw_wav = configs['raw_wav']
# Init dataset and data loader
ali_collate_conf = copy.deepcopy(configs['collate_conf'])
ali_collate_conf['spec_aug'] = False
ali_collate_conf['spec_sub'] = False
ali_collate_conf['feature_dither'] = False
ali_collate_conf['speed_perturb'] = False
if raw_wav:
ali_collate_conf['wav_distortion_conf']['wav_distortion_rate'] = 0
ali_collate_func = CollateFunc(**ali_collate_conf, raw_wav=raw_wav)
dataset_conf = configs.get('dataset_conf', {})
dataset_conf['batch_size'] = args.batch_size
dataset_conf['batch_type'] = 'static'
dataset_conf['sort'] = False
ali_dataset = AudioDataset(args.input_file,
**dataset_conf,
raw_wav=raw_wav)
ali_data_loader = DataLoader(ali_dataset,
collate_fn=ali_collate_func,
shuffle=False,
batch_size=1,
num_workers=0)
# Init asr model from configs
model = init_asr_model(configs)
load_checkpoint(model, args.checkpoint)
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
model = model.to(device)
model.eval()
with torch.no_grad(), open(args.result_file, 'w',
encoding='utf-8') as fout:
for batch_idx, batch in enumerate(ali_data_loader):
print("#" * 80)
key, feat, target, feats_length, target_length = batch
print(key)
feat = feat.to(device)
target = target.to(device)
feats_length = feats_length.to(device)
target_length = target_length.to(device)
# Let's assume B = batch_size and N = beam_size
# 1. Encoder
encoder_out, encoder_mask = model._forward_encoder(
feat, feats_length) # (B, maxlen, encoder_dim)
maxlen = encoder_out.size(1)
ctc_probs = model.ctc.log_softmax(
encoder_out) # (1, maxlen, vocab_size)
# print(ctc_probs.size(1))
ctc_probs = ctc_probs.squeeze(0)
target = target.squeeze(0)
alignment = forced_align(ctc_probs, target)
print(alignment)
fout.write('{} {}\n'.format(key[0], alignment))
if args.gen_praat:
timestamp = get_frames_timestamp(alignment)
print(timestamp)
subsample = get_subsample(configs)
labformat = get_labformat(timestamp, subsample)
lab_path = os.path.join(os.path.dirname(args.result_file),
key[0] + ".lab")
with open(lab_path, 'w', encoding='utf-8') as f:
f.writelines(labformat)
textgrid_path = os.path.join(os.path.dirname(args.result_file),
key[0] + ".TextGrid")
generator_textgrid(maxtime=(len(alignment) + 1) * 0.01 *
subsample,
lines=labformat,
output=textgrid_path)
| 37.802752
| 79
| 0.58391
|
45b60fb0cc52ee27686bbe532a4810ebc3efe5fc
| 1,165
|
py
|
Python
|
router_mod/quagga.py
|
bachnguyenhuu/RENAT
|
f747996e1b79284ef70c51b71774098c200abc7f
|
[
"Apache-2.0"
] | 65
|
2018-01-23T00:25:52.000Z
|
2022-02-03T12:02:59.000Z
|
router_mod/quagga.py
|
bachnguyenhuu/RENAT
|
f747996e1b79284ef70c51b71774098c200abc7f
|
[
"Apache-2.0"
] | 11
|
2018-01-25T05:29:14.000Z
|
2021-01-03T12:07:33.000Z
|
router_mod/quagga.py
|
bachnguyenhuu/RENAT
|
f747996e1b79284ef70c51b71774098c200abc7f
|
[
"Apache-2.0"
] | 18
|
2018-01-25T03:09:00.000Z
|
2021-12-15T10:41:15.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2020 NTT Communications
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Provides keywords for Juniper platform
*Notes:* Ignore the _self_ parameters when using those keywords.
"""
import Common
from robot.libraries.BuiltIn import BuiltIn
def number_of_bgp_neighbor(self,state="Established",cmd='show bgp summary'):
""" Returns number of BGP neighbor in ``state`` state
"""
output = self._vchannel.cmd(cmd).lower()
count = output.count(state.lower())
BuiltIn().log_to_console(output)
BuiltIn().log("Number of BGP neighbors in `%s` state is %d" % (state,count))
return count
| 32.361111
| 80
| 0.721888
|
de082847a14f054297938aefb2c758d740ee28fc
| 17,418
|
py
|
Python
|
python/tvm/relay/op/op_attrs.py
|
jianshitansuantong233/tvm
|
61e4fcfb842772e205e64373fb09228b0c5b7e01
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/op/op_attrs.py
|
jianshitansuantong233/tvm
|
61e4fcfb842772e205e64373fb09228b0c5b7e01
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/op/op_attrs.py
|
jianshitansuantong233/tvm
|
61e4fcfb842772e205e64373fb09228b0c5b7e01
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The attributes node used for Relay operators"""
from tvm.ir import Attrs
import tvm._ffi
@tvm._ffi.register_object("relay.attrs.Conv1DAttrs")
class Conv1DAttrs(Attrs):
"""Attributes for nn.conv1d"""
@tvm._ffi.register_object("relay.attrs.Conv2DAttrs")
class Conv2DAttrs(Attrs):
"""Attributes for nn.conv2d"""
@tvm._ffi.register_object("relay.attrs.Conv2DWinogradAttrs")
class Conv2DWinogradAttrs(Attrs):
"""Attributes for nn.contrib_conv2d_winograd_without_weight_transform"""
@tvm._ffi.register_object("relay.attrs.Conv3DAttrs")
class Conv3DAttrs(Attrs):
"""Attributes for nn.conv3d"""
@tvm._ffi.register_object("relay.attrs.Conv3DWinogradAttrs")
class Conv3DWinogradAttrs(Attrs):
"""Attributes for nn.contrib_conv3d_winograd_without_weight_transform"""
@tvm._ffi.register_object("relay.attrs.ConvWinogradWeightTransformAttrs")
class ConvWinogradWeightTransformAttrs(Attrs):
"""Attributes for nn.contrib_convNd_winograd_weight_transform"""
@tvm._ffi.register_object("relay.attrs.Conv2DWinogradNNPACKWeightTransformAttrs")
class Conv2DWinogradNNPACKWeightTransformAttrs(Attrs):
"""Attributes for nn.contrib_conv2d_winograd_nnpack_weight_transform"""
@tvm._ffi.register_object("relay.attrs.GlobalPool2DAttrs")
class GlobalPool2DAttrs(Attrs):
"""Attributes for nn.global_pool"""
@tvm._ffi.register_object("relay.attrs.BiasAddAttrs")
class BiasAddAttrs(Attrs):
"""Atttribute of nn.bias_add"""
@tvm._ffi.register_object("relay.attrs.MatmulAttrs")
class MatmulAttrs(Attrs):
"""Attributes for nn.matmul"""
@tvm._ffi.register_object("relay.attrs.DenseAttrs")
class DenseAttrs(Attrs):
"""Attributes for nn.dense"""
@tvm._ffi.register_object("relay.attrs.DensePackAttrs")
class DensePackAttrs(Attrs):
"""Attributes for nn.contrib_dense_pack"""
@tvm._ffi.register_object("relay.attrs.BatchMatmulAttrs")
class BatchMatmulAttrs(Attrs):
"""Attributes for nn.batch_matmul"""
@tvm._ffi.register_object("relay.attrs.SoftmaxAttrs")
class SoftmaxAttrs(Attrs):
"""Attributes for nn.softmax"""
@tvm._ffi.register_object("relay.attrs.FIFOBufferAttrs")
class FIFOBufferAttrs(Attrs):
"""Attributes for nn.fifo_buffer"""
@tvm._ffi.register_object("relay.attrs.UpSamplingAttrs")
class UpSamplingAttrs(Attrs):
"""Attributes for nn.upsampling"""
@tvm._ffi.register_object("relay.attrs.UpSampling3DAttrs")
class UpSampling3DAttrs(Attrs):
"""Attributes for nn.upsampling3d"""
@tvm._ffi.register_object("relay.attrs.PadAttrs")
class PadAttrs(Attrs):
"""Attributes for nn.pad"""
@tvm._ffi.register_object("relay.attrs.MirrorPadAttrs")
class MirrorPadAttrs(Attrs):
"""Attributes for nn.mirror_pad"""
@tvm._ffi.register_object("relay.attrs.LeakyReluAttrs")
class LeakyReluAttrs(Attrs):
"""Attributes for nn.leaky_relu"""
@tvm._ffi.register_object("relay.attrs.PReluAttrs")
class PReluAttrs(Attrs):
"""Attributes for nn.prelu"""
@tvm._ffi.register_object("relay.attrs.DropoutAttrs")
class DropoutAttrs(Attrs):
"""Attributes for nn.dropout"""
@tvm._ffi.register_object("relay.attrs.BatchNormAttrs")
class BatchNormAttrs(Attrs):
"""Attributes for nn.batch_norm"""
@tvm._ffi.register_object("relay.attrs.LRNAttrs")
class LRNAttrs(Attrs):
"""Attributes for nn.lrn"""
@tvm._ffi.register_object("relay.attrs.L2NormalizeAttrs")
class L2NormalizeAttrs(Attrs):
"""Attributes for nn.l2_normalize"""
@tvm._ffi.register_object("relay.attrs.DeformableConv2DAttrs")
class DeformableConv2DAttrs(Attrs):
"""Attributes for nn.deformable_conv2d"""
@tvm._ffi.register_object("relay.attrs.Resize1DAttrs")
class Resize1DAttrs(Attrs):
"""Attributes for image.resize1d"""
@tvm._ffi.register_object("relay.attrs.Resize2DAttrs")
class Resize2DAttrs(Attrs):
"""Attributes for image.resize2d"""
@tvm._ffi.register_object("relay.attrs.Resize3DAttrs")
class Resize3DAttrs(Attrs):
"""Attributes used in resize3d operators"""
@tvm._ffi.register_object("relay.attrs.CropAndResizeAttrs")
class CropAndResizeAttrs(Attrs):
"""Attributes for image.crop_and_resize"""
@tvm._ffi.register_object("relay.attrs.Dilation2DAttrs")
class Dilation2DAttrs(Attrs):
"""Attributes for image.dilation2d"""
@tvm._ffi.register_object("relay.attrs.ArgsortAttrs")
class ArgsortAttrs(Attrs):
"""Attributes for algorithm.argsort"""
@tvm._ffi.register_object("relay.attrs.OnDeviceAttrs")
class OnDeviceAttrs(Attrs):
"""Attributes for annotation.on_device"""
@tvm._ffi.register_object("relay.attrs.DebugAttrs")
class DebugAttrs(Attrs):
"""Attributes for debug"""
@tvm._ffi.register_object("relay.attrs.CompilerAttrs")
class CompilerAttrs(Attrs):
"""Attributes for compiler"""
@tvm._ffi.register_object("relay.attrs.DeviceCopyAttrs")
class DeviceCopyAttrs(Attrs):
"""Attributes for annotation.device_copy"""
@tvm._ffi.register_object("relay.attrs.CastAttrs")
class CastAttrs(Attrs):
"""Attributes for transform.cast"""
@tvm._ffi.register_object("relay.attrs.ConcatenateAttrs")
class ConcatenateAttrs(Attrs):
"""Attributes for tensor.concatenate"""
@tvm._ffi.register_object("relay.attrs.TransposeAttrs")
class TransposeAttrs(Attrs):
"""Attributes for transform.transpose"""
@tvm._ffi.register_object("relay.attrs.ReshapeAttrs")
class ReshapeAttrs(Attrs):
"""Attributes for transform.reshape"""
@tvm._ffi.register_object("relay.attrs.ReshapeLikeAttrs")
class ReshapeLikeAttrs(Attrs):
"""Attributes for transform.reshape_like"""
@tvm._ffi.register_object("relay.attrs.GatherAttrs")
class GatherAttrs(Attrs):
"""Attributes for transform.gather"""
@tvm._ffi.register_object("relay.attrs.TakeAttrs")
class TakeAttrs(Attrs):
"""Attributes for transform.take"""
@tvm._ffi.register_object("relay.attrs.InitOpAttrs")
class InitOpAttrs(Attrs):
"""Attributes for ops specifying a tensor"""
@tvm._ffi.register_object("relay.attrs.ArangeAttrs")
class ArangeAttrs(Attrs):
"""Attributes used in arange operators"""
@tvm._ffi.register_object("relay.attrs.MeshgridAttrs")
class MeshgridAttrs(Attrs):
"""Attributes used in arange operators"""
@tvm._ffi.register_object("relay.attrs.StackAttrs")
class StackAttrs(Attrs):
"""Attributes used in stack operators"""
@tvm._ffi.register_object("relay.attrs.RepeatAttrs")
class RepeatAttrs(Attrs):
"""Attributes used in repeat operators"""
@tvm._ffi.register_object("relay.attrs.TileAttrs")
class TileAttrs(Attrs):
"""Attributes used in tile operators"""
@tvm._ffi.register_object("relay.attrs.ReverseAttrs")
class ReverseAttrs(Attrs):
"""Attributes used in reverse operators"""
@tvm._ffi.register_object("relay.attrs.ReverseSequenceAttrs")
class ReverseSequenceAttrs(Attrs):
"""Attributes used in reverse sequence operators"""
@tvm._ffi.register_object("relay.attrs.SqueezeAttrs")
class SqueezeAttrs(Attrs):
"""Attributes used in squeeze operators"""
@tvm._ffi.register_object("relay.attrs.SplitAttrs")
class SplitAttrs(Attrs):
"""Attributes for transform.split"""
@tvm._ffi.register_object("relay.attrs.StridedSliceAttrs")
class StridedSliceAttrs(Attrs):
"""Attributes for transform.stranded_slice"""
@tvm._ffi.register_object("relay.attrs.SliceLikeAttrs")
class SliceLikeAttrs(Attrs):
"""Attributes for transform.slice_like"""
@tvm._ffi.register_object("relay.attrs.ClipAttrs")
class ClipAttrs(Attrs):
"""Attributes for transform.clip"""
@tvm._ffi.register_object("relay.attrs.LayoutTransformAttrs")
class LayoutTransformAttrs(Attrs):
"""Attributes for transform.layout_transform"""
@tvm._ffi.register_object("relay.attrs.ShapeOfAttrs")
class ShapeOfAttrs(Attrs):
"""Attributes for tensor.shape_of"""
@tvm._ffi.register_object("relay.attrs.MultiBoxPriorAttrs")
class MultiBoxPriorAttrs(Attrs):
"""Attributes for vision.multibox_prior"""
@tvm._ffi.register_object("relay.attrs.MultiBoxTransformLocAttrs")
class MultiBoxTransformLocAttrs(Attrs):
"""Attributes for vision.multibox_transform_loc"""
@tvm._ffi.register_object("relay.attrs.GetValidCountsAttrs")
class GetValidCountsAttrs(Attrs):
"""Attributes for vision.get_valid_counts"""
@tvm._ffi.register_object("relay.attrs.NonMaximumSuppressionAttrs")
class NonMaximumSuppressionAttrs(Attrs):
"""Attributes for vision.non_maximum_suppression"""
@tvm._ffi.register_object("relay.attrs.AllClassNonMaximumSuppressionAttrs")
class AllClassNonMaximumSuppressionAttrs(Attrs):
"""Attributes for vision.all_classnon_maximum_suppression"""
@tvm._ffi.register_object("relay.attrs.ROIAlignAttrs")
class ROIAlignAttrs(Attrs):
"""Attributes for vision.roi_align"""
@tvm._ffi.register_object("relay.attrs.ROIPoolAttrs")
class ROIPoolAttrs(Attrs):
"""Attributes for vision.roi_pool"""
@tvm._ffi.register_object("relay.attrs.YoloReorgAttrs")
class YoloReorgAttrs(Attrs):
"""Attributes for vision.yolo_reorg"""
@tvm._ffi.register_object("relay.attrs.ProposalAttrs")
class ProposalAttrs(Attrs):
"""Attributes used in proposal operators"""
@tvm._ffi.register_object("relay.attrs.MaxPool2DAttrs")
class MaxPool2DAttrs(Attrs):
"""Attributes used in max_pool2d operators"""
@tvm._ffi.register_object("relay.attrs.AvgPool2DAttrs")
class AvgPool2DAttrs(Attrs):
"""Attributes used in avg_pool2d operators"""
@tvm._ffi.register_object("relay.attrs.MaxPool1DAttrs")
class MaxPool1DAttrs(Attrs):
"""Attributes used in max_pool1d operators"""
@tvm._ffi.register_object("relay.attrs.AvgPool1DAttrs")
class AvgPool1DAttrs(Attrs):
"""Attributes used in avg_pool1d operators"""
@tvm._ffi.register_object("relay.attrs.MaxPool3DAttrs")
class MaxPool3DAttrs(Attrs):
"""Attributes used in max_pool3d operators"""
@tvm._ffi.register_object("relay.attrs.AvgPool3DAttrs")
class AvgPool3DAttrs(Attrs):
"""Attributes used in avg_pool3d operators"""
@tvm._ffi.register_object("relay.attrs.BitPackAttrs")
class BitPackAttrs(Attrs):
"""Attributes used in bitpack operator"""
@tvm._ffi.register_object("relay.attrs.BinaryConv2DAttrs")
class BinaryConv2DAttrs(Attrs):
"""Attributes used in bitserial conv2d operators"""
@tvm._ffi.register_object("relay.attrs.XnorConv2DAttrs")
class XnorConv2DAttrs(Attrs):
"""Attributes used in xnor conv2d operators"""
@tvm._ffi.register_object("relay.attrs.BinaryDenseAttrs")
class BinaryDenseAttrs(Attrs):
"""Attributes used in bitserial dense operators"""
@tvm._ffi.register_object("relay.attrs.Conv2DTransposeAttrs")
class Conv2DTransposeAttrs(Attrs):
"""Attributes used in Transposed Conv2D operators"""
@tvm._ffi.register_object("relay.attrs.Conv3DTransposeAttrs")
class Conv3DTransposeAttrs(Attrs):
"""Attributes used in Transposed Conv3D operators"""
@tvm._ffi.register_object("relay.attrs.DilateAttrs")
class DilateAttrs(Attrs):
"""Attributes used in dilate operators"""
@tvm._ffi.register_object("relay.attrs.SubPixelAttrs")
class SubPixelAttrs(Attrs):
"""Attributes used in depth to space and space to depth operators"""
@tvm._ffi.register_object("relay.attrs.CorrelationAttrs")
class CorrelationAttrs(Attrs):
"""Attributes used in correlation operators"""
@tvm._ffi.register_object("relay.attrs.AdaptivePool2DAttrs")
class AdaptivePool2DAttrs(Attrs):
"""Attributes used in 2D adaptive pooling operators"""
@tvm._ffi.register_object("relay.attrs.AdaptivePool3DAttrs")
class AdaptivePool3DAttrs(Attrs):
"""Attributes used in 3D adaptive pooling operators"""
@tvm._ffi.register_object("relay.attrs.AffineGridAttrs")
class AffineGridAttrs(Attrs):
"""Attributes used in affine_grid operators"""
@tvm._ffi.register_object("relay.attrs.AllocStorageAttrs")
class AllocStorageAttrs(Attrs):
"""Attributes used in alloc_storage operators"""
@tvm._ffi.register_object("relay.attrs.AllocTensorAttrs")
class AllocTensorAttrs(Attrs):
"""Attributes used in alloc_tensor operators"""
@tvm._ffi.register_object("relay.attrs.CastHintAttrs")
class CastHintAttrs(Attrs):
"""Attributes used in cast_hint annotation operators"""
@tvm._ffi.register_object("relay.attrs.Conv1DTransposeAttrs")
class Conv1DTransposeAttrs(Attrs):
"""Attributes used in 1D transposed convolution operators"""
@tvm._ffi.register_object("relay.attrs.ExpandDimsAttrs")
class ExpandDimsAttrs(Attrs):
"""Attributes used in expand_dims operators"""
@tvm._ffi.register_object("relay.attrs.GridSampleAttrs")
class GridSampleAttrs(Attrs):
"""Attributes used in grid_sample operators"""
@tvm._ffi.register_object("relay.attrs.GroupNormAttrs")
class GroupNormAttrs(Attrs):
"""Attributes used in group norm operators"""
@tvm._ffi.register_object("relay.attrs.InstanceNormAttrs")
class InstanceNormAttrs(Attrs):
"""Attributes used in instance norm operators"""
@tvm._ffi.register_object("relay.attrs.LayerNormAttrs")
class LayerNormAttrs(Attrs):
"""Attributes used in layer norm operators"""
@tvm._ffi.register_object("relay.attrs.NdarraySizeAttrs")
class NdarraySizeAttrs(Attrs):
"""Attributes used in ndarray_size operators"""
@tvm._ffi.register_object("relay.attrs.OneHotAttrs")
class OneHotAttrs(Attrs):
"""Attributes used in one_hot operators"""
@tvm._ffi.register_object("relay.attrs.QuantizeAttrs")
class QuantizeAttrs(Attrs):
"""Attributes used in quantize operators"""
@tvm._ffi.register_object("relay.attrs.DequantizeAttrs")
class DequantizeAttrs(Attrs):
"""Attributes used in dequantize operators"""
@tvm._ffi.register_object("relay.attrs.ReduceAttrs")
class ReduceAttrs(Attrs):
"""Attributes used in reduction operators (e.g. sum)"""
@tvm._ffi.register_object("relay.attrs.ArgReduceAttrs")
class ArgReduceAttrs(Attrs):
"""Attributes used in reduction operators (e.g. argmin/argmax)"""
@tvm._ffi.register_object("relay.attrs.VarianceAttrs")
class VarianceAttrs(Attrs):
"""Attributes used in reduction operators (e.g. sum)"""
@tvm._ffi.register_object("relay.attrs.RequantizeAttrs")
class RequantizeAttrs(Attrs):
"""Attributes used in requantize operators"""
@tvm._ffi.register_object("relay.attrs.ScatterAttrs")
class ScatterAttrs(Attrs):
"""Attributes used in scatter operators"""
@tvm._ffi.register_object("relay.attrs.SequenceMaskAttrs")
class SequenceMaskAttrs(Attrs):
"""Attributes used in sequence_mask operators"""
@tvm._ffi.register_object("relay.attrs.ShapeFuncAttrs")
class ShapeFuncAttrs(Attrs):
"""Attributes used in shape func operators"""
@tvm._ffi.register_object("relay.attrs.SimulatedQuantizeAttrs")
class SimulatedQuantizeAttrs(Attrs):
"""Attributes used in simulated_quantize operators"""
@tvm._ffi.register_object("relay.attrs.SparseDenseAttrs")
class SparseDenseAttrs(Attrs):
"""Attributes used in sparse_dense operators"""
@tvm._ffi.register_object("relay.attrs.SparseToDenseAttrs")
class SparseToDenseAttrs(Attrs):
"""Attributes used in sparse_to_dense operators"""
@tvm._ffi.register_object("relay.attrs.SparseTransposeAttrs")
class SparseTransposeAttrs(Attrs):
"""Attributes used in sparse_transpose operators"""
@tvm._ffi.register_object("relay.attrs.SparseConv2DAttrs")
class SparseConv2DAttrs(Attrs):
"""Attributes used in sparse_conv2d operators"""
@tvm._ffi.register_object("relay.attrs.TopkAttrs")
class TopkAttrs(Attrs):
"""Attributes used in topk operators"""
@tvm._ffi.register_object("relay.attrs.SearchSortedAttrs")
class SearchSortedAttrs(Attrs):
"""Attributes used in searchsorted operators"""
@tvm._ffi.register_object("relay.attrs.TupleGetItemAttrs")
class TupleGetItemAttrs(Attrs):
"""Attributes used in tuple item access operators"""
@tvm._ffi.register_object("relay.attrs.WithFuncIdAttrs")
class WithFuncIdAttrs(Attrs):
"""Attributes used in with_funcid annotation operators"""
@tvm._ffi.register_object("relay.attrs.SpaceToBatchNDAttrs")
class SpaceToBatchNDAttrs(Attrs):
"""Attributes used in SpaceToBatchND operators"""
@tvm._ffi.register_object("relay.attrs.BatchToSpaceNDAttrs")
class BatchToSpaceNDAttrs(Attrs):
"""Attributes used in BatchToSpaceNDAttrs operators"""
@tvm._ffi.register_object("relay.attrs.ThreefryGenerateAttrs")
class ThreefryGenerateAttrs(Attrs):
"""Attributes used in ThreefryGenerateAttrs operators"""
@tvm._ffi.register_object("relay.attrs.UniformAttrs")
class UniformAttrs(Attrs):
"""Attributes used in UniformAttrs operators"""
@tvm._ffi.register_object("relay.attrs.NLLLossAttrs")
class NLLLossAttrs(Attrs):
"""Attributes for nn.nll_loss"""
@tvm._ffi.register_object("relay.attrs.FixedPointMultiplyAttrs")
class FixedPointMultiplyAttrs(Attrs):
"""Attributes used in fixed_point_multiply operators"""
| 28.184466
| 81
| 0.768975
|
0ddb5ad568b54dc141e05d8dfce7cdb63c57a3fd
| 347
|
py
|
Python
|
spritzbot/plugins/friends.py
|
sengupta/spritzbot
|
0ca2ab453556b3a1eb98c9f40345ea8a3eba74e5
|
[
"BSD-2-Clause"
] | 1
|
2015-11-23T11:22:40.000Z
|
2015-11-23T11:22:40.000Z
|
spritzbot/plugins/friends.py
|
sengupta/spritzbot
|
0ca2ab453556b3a1eb98c9f40345ea8a3eba74e5
|
[
"BSD-2-Clause"
] | null | null | null |
spritzbot/plugins/friends.py
|
sengupta/spritzbot
|
0ca2ab453556b3a1eb98c9f40345ea8a3eba74e5
|
[
"BSD-2-Clause"
] | null | null | null |
import re
def commands():
"""Returns the list of commands that this plugin handles.
"""
return [
{
'type':'friends',
'triggers':[re.compile('.*')],
'field':'all',
}
]
def process(event):
return "You have %s friends." %(len(event['friends']))
| 21.6875
| 61
| 0.45245
|
b04e303b2542333cc55ca40a9020673bf48830d7
| 3,290
|
py
|
Python
|
acoustics/building.py
|
cnheider/python-acoustics
|
fbc87454422c41e1a39e282d7680126a6d8014dd
|
[
"BSD-3-Clause"
] | 371
|
2015-02-21T19:16:49.000Z
|
2022-03-31T03:36:56.000Z
|
acoustics/building.py
|
sky-enter/python-acoustics
|
fbc87454422c41e1a39e282d7680126a6d8014dd
|
[
"BSD-3-Clause"
] | 80
|
2015-01-03T09:48:05.000Z
|
2022-01-31T23:09:11.000Z
|
acoustics/building.py
|
sky-enter/python-acoustics
|
fbc87454422c41e1a39e282d7680126a6d8014dd
|
[
"BSD-3-Clause"
] | 109
|
2015-01-26T01:46:38.000Z
|
2022-03-23T06:41:42.000Z
|
"""
Building
========
The building module contains functions related to building acoustics.
"""
import numpy as np
def rw_curve(tl):
"""
Calculate the curve of :math:`Rw` from a NumPy array `tl` with third
octave data between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
ref_curve = np.array([0, 3, 6, 9, 12, 15, 18, 19, 20, 21, 22, 23, 23, 23, 23, 23])
residuals = 0
while residuals > -32:
ref_curve += 1
diff = tl - ref_curve
residuals = np.sum(np.clip(diff, np.min(diff), 0))
ref_curve -= 1
return ref_curve
def rw(tl):
"""
Calculate :math:`R_W` from a NumPy array `tl` with third octave data
between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
return rw_curve(tl)[7]
def rw_c(tl):
"""
Calculate :math:`R_W + C` from a NumPy array `tl` with third octave data
between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
k = np.array([-29, -26, -23, -21, -19, -17, -15, -13, -12, -11, -10, -9, -9, -9, -9, -9])
a = -10 * np.log10(np.sum(10**((k - tl) / 10)))
return a
def rw_ctr(tl):
"""
Calculate :math:`R_W + C_{tr}` from a NumPy array `tl` with third octave
data between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
k_tr = np.array([-20, -20, -18, -16, -15, -14, -13, -12, -11, -9, -8, -9, -10, -11, -13, -15])
a_tr = -10 * np.log10(np.sum(10**((k_tr - tl) / 10)))
return a_tr
def stc_curve(tl):
"""
Calculate the Sound Transmission Class (STC) curve from a NumPy array `tl`
with third octave data between 125 Hz and 4 kHz.
:param tl: Transmission Loss
"""
ref_curve = np.array([0, 3, 6, 9, 12, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20])
top_curve = ref_curve
res_sum = 0
while True:
diff = tl - top_curve
residuals = np.clip(diff, np.min(diff), 0)
res_sum = np.sum(residuals)
if res_sum < -32:
if np.any(residuals > -8):
top_curve -= 1
break
top_curve += 1
return top_curve
def stc(tl):
"""
Calculate the Sound Transmission Class (STC) from a NumPy array `tl` with
third octave data between 125 Hz and 4 kHz.
:param tl: Transmission Loss
"""
return stc_curve(tl)[6]
def mass_law(freq, vol_density, thickness, theta=0, c=343, rho0=1.225):
""" Calculate transmission loss according to mass law.
:param freq: Frequency of interest in Hz.
:type freq: `float` or `NumPy array`
:param vol_density: Volumetric density of material in [kg/m^3].
:type vol_density: `float`
:param thickness: Thickness of wall.
:type thickness: `float`
:param theta: Angle of incidence in degrees. Default value is `0` (normal incidence).
:type theta: `float`
:param c: Speed of sound in [m/s].
:type c: `float`
:param rho0: Density of air in kg/m^3.
:type rho0: `float`
"""
rad_freq = 2.0 * np.pi * freq
surface_density = vol_density * thickness
theta_rad = np.deg2rad(theta)
a = rad_freq * surface_density * np.cos(theta_rad) / (2 * rho0 * c)
tl_theta = 10 * np.log10(1 + a**2)
return tl_theta
__all__ = ['rw_curve', 'rw', 'rw_c', 'rw_ctr', 'stc_curve', 'stc', 'mass_law']
| 27.416667
| 98
| 0.590274
|
56b3d3524025e9ec6ea0de18ef9825f82429672f
| 362
|
py
|
Python
|
runner_master/runner/models/get.py
|
bigvideoresearch/SCC
|
f26cdb6aaf248b5112812dbdac1f1b5086aebccc
|
[
"MIT"
] | 5
|
2021-09-15T21:48:55.000Z
|
2022-03-22T11:21:58.000Z
|
runner_master/runner/models/get.py
|
bigvideoresearch/SCC
|
f26cdb6aaf248b5112812dbdac1f1b5086aebccc
|
[
"MIT"
] | null | null | null |
runner_master/runner/models/get.py
|
bigvideoresearch/SCC
|
f26cdb6aaf248b5112812dbdac1f1b5086aebccc
|
[
"MIT"
] | 1
|
2021-08-20T08:40:15.000Z
|
2021-08-20T08:40:15.000Z
|
from .. import models
def get(model_name):
names = model_name.split('.')
if model_name.startswith('gluon.'):
func = getattr(models.gluon, model_name[len('gluon.'):], None)
else:
func = getattr(models, model_name, None)
if func is None:
raise RuntimeError('model_name [{}] not exists'.format(model_name))
return func
| 27.846154
| 75
| 0.640884
|
de6e0a2000e20e795fe90ea9afc5121e4cf93d2a
| 390
|
py
|
Python
|
chameleon/celery.py
|
msherman64/portal
|
e5399ef2ed3051d7c9a46c660f028c666ae22ca6
|
[
"Apache-2.0"
] | 3
|
2015-08-04T20:53:41.000Z
|
2020-02-14T22:58:20.000Z
|
chameleon/celery.py
|
msherman64/portal
|
e5399ef2ed3051d7c9a46c660f028c666ae22ca6
|
[
"Apache-2.0"
] | 103
|
2015-01-15T14:21:00.000Z
|
2022-03-31T19:14:20.000Z
|
chameleon/celery.py
|
msherman64/portal
|
e5399ef2ed3051d7c9a46c660f028c666ae22ca6
|
[
"Apache-2.0"
] | 4
|
2016-02-22T16:48:20.000Z
|
2021-01-08T17:13:21.000Z
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chameleon.settings')
from django.conf import settings
app = Celery('chameleon')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print(('Request: {0!r}'.format(self.request)))
| 20.526316
| 69
| 0.769231
|
7d32e9939241ebc467a8418cdf7771de842a61d7
| 13,174
|
py
|
Python
|
wouso/games/quest/tests.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 117
|
2015-01-02T18:07:33.000Z
|
2021-01-06T22:36:25.000Z
|
wouso/games/quest/tests.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 229
|
2015-01-12T07:07:58.000Z
|
2019-10-12T08:27:01.000Z
|
wouso/games/quest/tests.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 96
|
2015-01-07T05:26:09.000Z
|
2020-06-25T07:28:51.000Z
|
from datetime import datetime, timedelta
from django.test import TestCase
from django.contrib.auth.models import User
from django.test import Client
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
import json
from wouso.core import scoring
from wouso.core.qpool.models import Question, Answer, Category
from models import *
from wouso.core.scoring import Coin
from wouso.core.tests import WousoTest
from wouso.core.user.models import Race
from wouso.games.quest.cpanel import quest_bonus
class QuestStatistics(WousoTest):
def setUp(self):
super(QuestStatistics, self).setUp()
self.user1 = User.objects.create(username='test1')
self.user1.set_password('test')
self.user1.save()
self.quest_user1 = self.user1.get_profile().get_extension(QuestUser)
self.user2 = User.objects.create(username='test2')
self.user2.set_password('test')
self.user2.save()
self.quest_user2 = self.user2.get_profile().get_extension(QuestUser)
scoring.setup_scoring()
category = Category.add('quest')
question1 = Question.objects.create(text='question1', answer_type='F',
category=category, active=True)
answer1 = Answer.objects.create(text='first answer', correct=True, question=question1)
question2 = Question.objects.create(text='question2', answer_type='F',
category=category, active=True)
answer2 = Answer.objects.create(text='second answer', correct=True, question=question2)
start = datetime.datetime.now()
end = datetime.datetime.now() + timedelta(days=1)
self.quest = Quest.objects.create(start=start, end=end)
self.quest.questions.add(question1)
self.quest.questions.add(question2)
def test_check_if_both_players_finished(self):
self.quest_user1.current_quest = self.quest
self.quest_user2.current_quest = self.quest
self.quest.check_answer(self.quest_user1, 'first answer')
self.quest.check_answer(self.quest_user1, 'second answer')
self.quest.check_answer(self.quest_user2, 'first answer')
self.quest.check_answer(self.quest_user2, 'second answer')
self.assertTrue(self.quest_user1.finished)
self.assertTrue(self.quest_user2.finished)
def test_only_one_player_finished(self):
self.quest_user1.current_quest = self.quest
self.quest_user2.current_quest = self.quest
self.quest.check_answer(self.quest_user1, 'first answer')
self.quest.check_answer(self.quest_user1, 'second answer')
self.assertTrue(self.quest_user1.finished)
self.assertFalse(self.quest_user2.finished)
def test_players_are_registered_if_they_start_a_quest(self):
self.quest_user1.current_quest = self.quest
self.quest_user2.current_quest = self.quest
self.quest_user1.register_quest_result()
self.quest_user2.register_quest_result()
self.assertEqual(self.quest.players_count(), 2)
self.assertEqual(self.quest.players_completed(), 0)
def test_player_is_registered_to_a_previous_quest_when_he_starts_another(self):
start = datetime.datetime.now() - timedelta(days=2)
end = datetime.datetime.now() - timedelta(days=1)
old_quest = Quest.objects.create(start=start, end=end)
self.quest_user1.current_quest = old_quest
if not self.quest_user1.current_quest.is_active:
self.quest_user1.register_quest_result()
self.quest_user1.current_quest = QuestGame.get_current()
self.assertEqual(old_quest.players_count(), 1)
def test_check_for_duplicates(self):
self.quest_user1.current_quest = self.quest
self.quest_user1.register_quest_result()
self.quest_user1.register_quest_result()
self.assertEqual(len(QuestResult.objects.all()), 1)
class QuestTestCase(WousoTest):
def setUp(self):
super(QuestTestCase, self).setUp()
self.user, new = User.objects.get_or_create(username='_test')
self.user.set_password('test')
self.user.save()
profile = self.user.get_profile()
self.quest_user = profile.get_extension(QuestUser)
scoring.setup_scoring()
def tearDown(self):
#self.user.delete()
pass
def test_check_answer(self):
cat = Category.add('quest')
question = Question.objects.create(text='test_q', answer_type='F',
category=cat, active=True)
answer1 = Answer.objects.create(text='test_a1', correct=True, question=question)
answer2 = Answer.objects.create(text='test_a2', correct=True, question=question)
start = datetime.datetime.now()
end = datetime.datetime.now() + timedelta(days=1)
quest = Quest.objects.create(start=start, end=end)
quest.questions.add(question)
self.assertEqual(quest.count, 1)
self.quest_user.current_quest = quest
#self.quest_user.current_level = 0
quest.check_answer(self.quest_user, 'Test_a2')
self.assertTrue(self.quest_user.finished)
def test_check_bonus_for_quest(self):
category = Category.add('quest')
question1 = Question.objects.create(text='question1', answer_type='F',
category=category, active=True)
answer1 = Answer.objects.create(text='first answer', correct=True, question=question1)
start = datetime.datetime.now()
end = datetime.datetime.now() + timedelta(days=1)
quest = Quest.objects.create(start=start, end=end)
quest.questions.add(question1)
self.quest_user.current_quest = quest
quest.check_answer(self.quest_user, 'first answer')
self.assertEqual(len(quest.top_results()), 1)
pl = self.user.get_profile()
pl.points = 100
pl.save()
admin = User.objects.create_superuser('admin', 'admin@myemail.com', 'admin')
fact = RequestFactory()
request = fact.get(reverse('register_results', args=[1]))
request.user = admin
#get initial points
initial_points = pl.points
#add quest bonus
response = quest_bonus(request, quest.id)
#get final points
pl = User.objects.get(username=self.user.username)
final_points = pl.get_profile().points
self.assertTrue(final_points > initial_points)
class TestQuestViews(WousoTest):
def setUp(self):
super(TestQuestViews, self).setUp()
self.admin = self._get_superuser()
self.c = Client()
self.c.login(username='admin', password='admin')
now = datetime.datetime.now()
Quest.objects.create(start=now-timedelta(days=2), end=now-timedelta(days=1),
title='Quest no. 1')
self.q = Quest.objects.create(start=now, end=now + timedelta(days=1),
title='Quest no. 2')
Quest.objects.create(start=now+timedelta(days=1), end=now + timedelta(days=2),
title='Quest no. 3')
FinalQuest.objects.create(start=now, end=now+timedelta(days=1),
title='Final Quest')
scoring.setup_scoring()
def test_quest_home_view(self):
response = self.c.get(reverse('quest_home'))
self.assertContains(response, 'Quest no. 1')
self.assertContains(response, 'Quest no. 2')
self.assertContains(response, 'Quest no. 3')
self.assertContains(response, 'Final Quest')
def test_history_view(self):
questuser1 = self._get_player(1).get_extension(QuestUser)
questuser2 = self._get_player(2).get_extension(QuestUser)
category = Category.add('quest')
question1 = Question.objects.create(text='question1', answer_type='F',
category=category, active=True)
answer1 = Answer.objects.create(text='first answer', correct=True, question=question1)
self.q.questions.add(question1)
questuser1.current_quest = self.q
questuser2.current_quest = self.q
self.q.check_answer(questuser1, 'first answer')
self.q.check_answer(questuser2, 'first answer')
c = Client()
c.login(username='testuser1', password='test')
response = c.get(reverse('quest_history'))
# 'testuser1' appears once in overall gods section,
# once in quest result table and once because he is logged in
self.assertContains(response, '>testuser1<', count=3)
# 'testuser2' appears only in overall gods and quest result table
self.assertContains(response, '>testuser2<', count=2)
class FinalQuestTestCase(WousoTest):
def test_final_bonus(self):
u1 = self._get_player(1).get_extension(QuestUser)
u2 = self._get_player(2).get_extension(QuestUser)
r = Race.objects.create(name='rasa_buna', can_play=True)
Formula.add('finalquest-ok', expression='points=50*({level}+1)/{level_users}')
Formula.add('level-gold', expression='gold=0')
Coin.add('points')
Coin.add('gold')
final = FinalQuest.objects.create(start=datetime.datetime.now(), end=datetime.datetime.now())
question = Question.objects.create(text='test', answer_type='F')
final.questions.add(question)
question = Question.objects.create(text='test', answer_type='F')
final.questions.add(question)
u1.current_level = 1; u1.race = r; u1.current_quest = final
u1.save()
u2.current_level = 1; u2.race = r; u2.current_quest = final
u2.save()
final.give_level_bonus()
u1 = QuestUser.objects.get(pk=u1.pk)
self.assertEqual(u1.points, 50)
u2 = QuestUser.objects.get(pk=u2.pk)
self.assertEqual(u2.points, 50)
def test_final_task_call_checker(self):
from django.conf import settings
settings.FINAL_QUEST_CHECKER_PATH = os.path.join(os.path.dirname(__file__), 'tests')
final = FinalQuest.objects.create(start=datetime.datetime.now(), end=datetime.datetime.now(), type=TYPE_CHECKER)
question = Question.objects.create(text='test', answer_type='F')
final.questions.add(question)
u1 = self._get_player(1).get_extension(QuestUser)
self.assertFalse(final.answer_correct(0, question, u1.user.username + "wrong", u1))
self.assertTrue(final.answer_correct(0, question, u1.user.username, u1))
def test_final_quest_results_view(self):
u1 = self._get_player(1).get_extension(QuestUser)
u2 = self._get_player(2).get_extension(QuestUser)
r = Race.objects.create(name='rasa_buna', can_play=True)
Formula.add('finalquest-ok', expression='points=50*({level}+1)/{level_users}')
Formula.add('level-gold', expression='gold=0')
Coin.add('points')
Coin.add('gold')
final = FinalQuest.objects.create(start=datetime.datetime.now(), end=datetime.datetime.now())
question = Question.objects.create(text='test', answer_type='F')
final.questions.add(question)
question = Question.objects.create(text='test', answer_type='F')
final.questions.add(question)
u1.current_level = 1; u1.race = r; u1.current_quest = final
u1.save()
u2.current_level = 1; u2.race = r; u2.current_quest = final
u2.save()
c = Client()
admin = User.objects.create_superuser('admin', 'admin@myemail.com', 'admin')
c.login(username='admin', password='admin')
response = c.get('/cpanel/games/quest/final/results/')
self.assertContains(response, 'testuser1')
self.assertContains(response, 'testuser2')
# API tests
class QuestAPITestCase(WousoTest):
def test_info(self):
quser = self._get_player(1).get_extension(QuestUser)
quest = Quest.objects.create(start=datetime.datetime.now(), end=datetime.datetime.now()+timedelta(days=1))
quser.set_current(quest)
self._client_superuser()
response = self.client.get('/api/quest/admin/quest=%d/username=%s/' % (quest.id, quser.user.username))
data = json.loads(response.content)
self.assertEqual(data['user']['id'], quser.id)
def test_level_increment(self):
quser = self._get_player(1).get_extension(QuestUser)
quest = Quest.objects.create(start=datetime.datetime.now(), end=datetime.datetime.now()+timedelta(days=1))
quser.set_current(quest)
formula = Formula.add('quest-ok')
self._client_superuser()
response = self.client.post('/api/quest/admin/quest=%d/username=%s/' % (quest.id, quser.user.username))
data = json.loads(response.content)
self.assertEqual(data['current_level'], quser.current_level + 1)
response = self.client.post('/api/quest/admin/quest=%d/username=%s/' % (quest.id, quser.user.username))
data = json.loads(response.content)
self.assertEqual(data['current_level'], quser.current_level + 2)
| 45.271478
| 120
| 0.661151
|
0a55f23444ed3c655a579ef4df8faf76611caa39
| 5,540
|
py
|
Python
|
horovod/spark/keras/bare.py
|
hcyang99/horovod
|
825cc197468548da47dcd38872d5b4ba6e6a125b
|
[
"Apache-2.0"
] | 1
|
2020-05-07T08:26:36.000Z
|
2020-05-07T08:26:36.000Z
|
horovod/spark/keras/bare.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | null | null | null |
horovod/spark/keras/bare.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
import json
import warnings
import numpy as np
from keras import backend as K
from keras import optimizers
def save_bare_keras_optimizer(optimizer, h5py_file):
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
# Arguments
obj: the object to serialize
# Returns
JSON-serializable structure representing `obj`.
# Raises
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__,
'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable: %s' % (obj,))
if isinstance(optimizer, optimizers.TFOptimizer):
warnings.warn(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again '
'after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
h5py_file['training_config'] = json.dumps({
'optimizer_config': {
'class_name': optimizer.__class__.__name__,
'config': optimizer.get_config()
},
}, default=get_json_type).encode('utf8')
symbolic_weights = getattr(optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = h5py_file['optimizer_weights']
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
if name in weight_names:
idx = 2
unique_name = name + '_1'
while unique_name in weight_names:
unique_name = name + '_' + str(idx)
idx += 1
name = unique_name
weight_names.append(name.encode('utf8'))
optimizer_weights_group['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
optimizer_weights_group[name] = val
def load_bare_keras_optimizer(h5py_file, custom_objects=None):
if not custom_objects:
custom_objects = {}
def convert_custom_objects(obj):
"""Handles custom object lookup.
Arguments:
obj: object, dict, or list.
Returns:
The same structure, where occurrences
of a custom object name have been replaced
with the custom object.
"""
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for key, value in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if obj in custom_objects:
return custom_objects[obj]
return obj
optimizer, optimizer_weight_values = None, None
# instantiate optimizer
training_config = h5py_file.get('training_config')
training_config = json.loads(training_config[()].decode('utf-8'))
optimizer_config = training_config['optimizer_config']
optimizer = optimizers.deserialize(optimizer_config, custom_objects=custom_objects)
if 'optimizer_weights' in h5py_file:
optimizer_weights_group = h5py_file['optimizer_weights']
optimizer_weight_names = [
n.decode('utf8')
for n in optimizer_weights_group.attrs['weight_names']
]
optimizer_weight_values = [optimizer_weights_group[n].value for n in
optimizer_weight_names]
if optimizer_weight_values:
optimizer.set_weights(optimizer_weight_values)
return optimizer
| 35.974026
| 87
| 0.598556
|
cc3fa632de2a78e548a25b3fdb96497a418e1d52
| 16,927
|
py
|
Python
|
cherrypy/test/test_tools.py
|
abancu/core
|
e110a1df32ec8bf67f007960e61df55f0a926219
|
[
"MIT"
] | null | null | null |
cherrypy/test/test_tools.py
|
abancu/core
|
e110a1df32ec8bf67f007960e61df55f0a926219
|
[
"MIT"
] | null | null | null |
cherrypy/test/test_tools.py
|
abancu/core
|
e110a1df32ec8bf67f007960e61df55f0a926219
|
[
"MIT"
] | null | null | null |
"""Test the various means of instantiating and invoking tools."""
import gzip
import sys
import unittest
import io
from cherrypy._cpcompat import copyitems, itervalues
from cherrypy._cpcompat import IncompleteRead, ntob, ntou, xrange
import time
timeout = 0.2
import types
import six
import cherrypy
from cherrypy import tools
europoundUnicode = ntou('\x80\xa3')
# Client-side code #
from cherrypy.test import helper
class ToolTests(helper.CPWebCase):
def setup_server():
# Put check_access in a custom toolbox with its own namespace
myauthtools = cherrypy._cptools.Toolbox("myauth")
def check_access(default=False):
if not getattr(cherrypy.request, "userid", default):
raise cherrypy.HTTPError(401)
myauthtools.check_access = cherrypy.Tool(
'before_request_body', check_access)
def numerify():
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NumTool(cherrypy.Tool):
def _setup(self):
def makemap():
m = self._merged_args().get("map", {})
cherrypy.request.numerify_map = copyitems(m)
cherrypy.request.hooks.attach('on_start_resource', makemap)
def critical():
cherrypy.request.error_response = cherrypy.HTTPError(
502).set_response
critical.failsafe = True
cherrypy.request.hooks.attach('on_start_resource', critical)
cherrypy.request.hooks.attach(self._point, self.callable)
tools.numerify = NumTool('before_finalize', numerify)
# It's not mandatory to inherit from cherrypy.Tool.
class NadsatTool:
def __init__(self):
self.ended = {}
self._name = "nadsat"
def nadsat(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace(ntob("good"), ntob("horrorshow"))
chunk = chunk.replace(ntob("piece"), ntob("lomtick"))
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
nadsat.priority = 0
def cleanup(self):
# This runs after the request has been completely written out.
cherrypy.response.body = [ntob("razdrez")]
id = cherrypy.request.params.get("id")
if id:
self.ended[id] = True
cleanup.failsafe = True
def _setup(self):
cherrypy.request.hooks.attach('before_finalize', self.nadsat)
cherrypy.request.hooks.attach('on_end_request', self.cleanup)
tools.nadsat = NadsatTool()
def pipe_body():
cherrypy.request.process_request_body = False
clen = int(cherrypy.request.headers['Content-Length'])
cherrypy.request.body = cherrypy.request.rfile.read(clen)
# Assert that we can use a callable object instead of a function.
class Rotator(object):
def __call__(self, scale):
r = cherrypy.response
r.collapse_body()
if six.PY3:
r.body = [bytes([(x + scale) % 256 for x in r.body[0]])]
else:
r.body = [chr((ord(x) + scale) % 256) for x in r.body[0]]
cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
def stream_handler(next_handler, *args, **kwargs):
assert cherrypy.request.config.get('tools.streamer.arg') == 'arg value'
cherrypy.response.output = o = io.BytesIO()
try:
response = next_handler(*args, **kwargs)
# Ignore the response and return our accumulated output
# instead.
return o.getvalue()
finally:
o.close()
cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(
stream_handler)
class Root:
@cherrypy.expose
def index(self):
return "Howdy earth!"
@cherrypy.expose
@cherrypy.config(**{'tools.streamer.on': True, 'tools.streamer.arg': 'arg value'})
def tarfile(self):
assert cherrypy.request.config.get('tools.streamer.arg') == 'arg value'
cherrypy.response.output.write(ntob('I am '))
cherrypy.response.output.write(ntob('a tarfile'))
@cherrypy.expose
def euro(self):
hooks = list(cherrypy.request.hooks['before_finalize'])
hooks.sort()
cbnames = [x.callback.__name__ for x in hooks]
assert cbnames == ['gzip'], cbnames
priorities = [x.priority for x in hooks]
assert priorities == [80], priorities
yield ntou("Hello,")
yield ntou("world")
yield europoundUnicode
# Bare hooks
@cherrypy.expose
@cherrypy.config(**{'hooks.before_request_body': pipe_body})
def pipe(self):
return cherrypy.request.body
# Multiple decorators; include kwargs just for fun.
# Note that rotator must run before gzip.
@cherrypy.expose
def decorated_euro(self, *vpath):
yield ntou("Hello,")
yield ntou("world")
yield europoundUnicode
decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
decorated_euro = tools.rotator(scale=3)(decorated_euro)
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each
subclass, and adds an instance of the subclass as an attribute
of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in itervalues(dct):
if isinstance(value, types.FunctionType):
cherrypy.expose(value)
setattr(root, name.lower(), cls())
Test = TestType('Test', (object,), {})
# METHOD ONE:
# Declare Tools in _cp_config
@cherrypy.config(**{"tools.nadsat.on": True})
class Demo(Test):
def index(self, id=None):
return "A good piece of cherry pie"
def ended(self, id):
return repr(tools.nadsat.ended[id])
def err(self, id=None):
raise ValueError()
def errinstream(self, id=None):
yield "nonconfidential"
raise ValueError()
yield "confidential"
# METHOD TWO: decorator using Tool()
# We support Python 2.3, but the @-deco syntax would look like
# this:
# @tools.check_access()
def restricted(self):
return "Welcome!"
restricted = myauthtools.check_access()(restricted)
userid = restricted
def err_in_onstart(self):
return "success!"
@cherrypy.config(**{'response.stream': True})
def stream(self, id=None):
for x in xrange(100000000):
yield str(x)
conf = {
# METHOD THREE:
# Declare Tools in detached config
'/demo': {
'tools.numerify.on': True,
'tools.numerify.map': {ntob("pie"): ntob("3.14159")},
},
'/demo/restricted': {
'request.show_tracebacks': False,
},
'/demo/userid': {
'request.show_tracebacks': False,
'myauth.check_access.default': True,
},
'/demo/errinstream': {
'response.stream': True,
},
'/demo/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'tools.numerify.map': "pie->3.14159"
},
# Combined tools
'/euro': {
'tools.gzip.on': True,
'tools.encode.on': True,
},
# Priority specified in config
'/decorated_euro/subpath': {
'tools.gzip.priority': 10,
},
# Handler wrappers
'/tarfile': {'tools.streamer.on': True}
}
app = cherrypy.tree.mount(root, config=conf)
app.request_class.namespaces['myauth'] = myauthtools
if sys.version_info >= (2, 5):
from cherrypy.test import _test_decorators
root.tooldecs = _test_decorators.ToolExamples()
setup_server = staticmethod(setup_server)
def testHookErrors(self):
self.getPage("/demo/?id=1")
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody("A horrorshow lomtick of cherry 3.14159")
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/1")
self.assertBody("True")
valerr = '\n raise ValueError()\nValueError'
self.getPage("/demo/err?id=3")
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(502, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/3")
self.assertBody("True")
# If body is "razdrez", then on_end_request is being called too early.
if (cherrypy.server.protocol_version == "HTTP/1.0" or
getattr(cherrypy.server, "using_apache", False)):
self.getPage("/demo/errinstream?id=5")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus("200 OK")
self.assertBody("nonconfidential")
else:
# Because this error is raised after the response body has
# started, and because it's chunked output, an error is raised by
# the HTTP client when it encounters incomplete output.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
"/demo/errinstream?id=5")
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/5")
self.assertBody("True")
# Test the "__call__" technique (compile-time decorator).
self.getPage("/demo/restricted")
self.assertErrorPage(401)
# Test compile-time decorator with kwargs from config.
self.getPage("/demo/userid")
self.assertBody("Welcome!")
def testEndRequestOnDrop(self):
old_timeout = None
try:
httpserver = cherrypy.server.httpserver
old_timeout = httpserver.timeout
except (AttributeError, IndexError):
return self.skip()
try:
httpserver.timeout = timeout
# Test that on_end_request is called even if the client drops.
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest("GET", "/demo/stream?id=9", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
# Skip the rest of the request and close the conn. This will
# cause the server's active socket to error, which *should*
# result in the request being aborted, and request.close being
# called all the way up the stack (including WSGI middleware),
# eventually calling our on_end_request hook.
finally:
self.persistent = False
time.sleep(timeout * 2)
# Test that the on_end_request hook was called.
self.getPage("/demo/ended/9")
self.assertBody("True")
finally:
if old_timeout is not None:
httpserver.timeout = old_timeout
def testGuaranteedHooks(self):
# The 'critical' on_start_resource hook is 'failsafe' (guaranteed
# to run even if there are failures in other on_start methods).
# This is NOT true of the other hooks.
# Here, we have set up a failure in NumerifyTool.numerify_map,
# but our 'critical' hook should run and set the error to 502.
self.getPage("/demo/err_in_onstart")
self.assertErrorPage(502)
self.assertInBody(
"AttributeError: 'str' object has no attribute 'items'")
def testCombinedTools(self):
expectedResult = (ntou("Hello,world") +
europoundUnicode).encode('utf-8')
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage("/euro",
headers=[
("Accept-Encoding", "gzip"),
("Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7")])
self.assertInBody(zbuf.getvalue()[:3])
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6)
zfile.write(expectedResult)
zfile.close()
self.getPage("/decorated_euro", headers=[("Accept-Encoding", "gzip")])
self.assertInBody(zbuf.getvalue()[:3])
# This returns a different value because gzip's priority was
# lowered in conf, allowing the rotator to run after gzip.
# Of course, we don't want breakage in production apps,
# but it proves the priority was changed.
self.getPage("/decorated_euro/subpath",
headers=[("Accept-Encoding", "gzip")])
if six.PY3:
self.assertInBody(bytes([(x + 3) % 256 for x in zbuf.getvalue()]))
else:
self.assertInBody(''.join([chr((ord(x) + 3) % 256)
for x in zbuf.getvalue()]))
def testBareHooks(self):
content = "bit of a pain in me gulliver"
self.getPage("/pipe",
headers=[("Content-Length", str(len(content))),
("Content-Type", "text/plain")],
method="POST", body=content)
self.assertBody(content)
def testHandlerWrapperTool(self):
self.getPage("/tarfile")
self.assertBody("I am a tarfile")
def testToolWithConfig(self):
if not sys.version_info >= (2, 5):
return self.skip("skipped (Python 2.5+ only)")
self.getPage('/tooldecs/blah')
self.assertHeader('Content-Type', 'application/data')
def testWarnToolOn(self):
# get
try:
cherrypy.tools.numerify.on
except AttributeError:
pass
else:
raise AssertionError("Tool.on did not error as it should have.")
# set
try:
cherrypy.tools.numerify.on = True
except AttributeError:
pass
else:
raise AssertionError("Tool.on did not error as it should have.")
def testDecorator(self):
@cherrypy.tools.register('on_start_resource')
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.example, cherrypy.Tool))
self.assertEqual(cherrypy.tools.example._point, 'on_start_resource')
@cherrypy.tools.register('before_finalize', name='renamed', priority=60)
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.renamed, cherrypy.Tool))
self.assertEqual(cherrypy.tools.renamed._point, 'before_finalize')
self.assertEqual(cherrypy.tools.renamed._name, 'renamed')
self.assertEqual(cherrypy.tools.renamed._priority, 60)
class SessionAuthTest(unittest.TestCase):
def test_login_screen_returns_bytes(self):
"""
login_screen must return bytes even if unicode parameters are passed.
Issue 1132 revealed that login_screen would return unicode if the
username and password were unicode.
"""
sa = cherrypy.lib.cptools.SessionAuth()
res = sa.login_screen(None, username=six.text_type('nobody'),
password=six.text_type('anypass'))
self.assertTrue(isinstance(res, bytes))
| 37.952915
| 94
| 0.561174
|
122c5123a067f57e701f4bd14752028b058979d9
| 891
|
py
|
Python
|
dns/rdtypes/IN/NSAP_PTR.py
|
preo/dnspython
|
465785f85f87508209117264c677080e901e957c
|
[
"0BSD"
] | null | null | null |
dns/rdtypes/IN/NSAP_PTR.py
|
preo/dnspython
|
465785f85f87508209117264c677080e901e957c
|
[
"0BSD"
] | null | null | null |
dns/rdtypes/IN/NSAP_PTR.py
|
preo/dnspython
|
465785f85f87508209117264c677080e901e957c
|
[
"0BSD"
] | null | null | null |
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class NSAP_PTR(dns.rdtypes.nsbase.UncompressedNS):
"""NSAP-PTR record"""
| 44.55
| 72
| 0.780022
|
0364160c1903589508f9e772e99e3fa7b3c3eb67
| 27,807
|
py
|
Python
|
scripts/irods/test/test_ichksum.py
|
JustinKyleJames/irods
|
59e9db75200e95796ec51ec20eb3b185d9e4b5f5
|
[
"BSD-3-Clause"
] | 333
|
2015-01-15T15:42:29.000Z
|
2022-03-19T19:16:15.000Z
|
scripts/irods/test/test_ichksum.py
|
JustinKyleJames/irods
|
59e9db75200e95796ec51ec20eb3b185d9e4b5f5
|
[
"BSD-3-Clause"
] | 3,551
|
2015-01-02T19:55:40.000Z
|
2022-03-31T21:24:56.000Z
|
scripts/irods/test/test_ichksum.py
|
JustinKyleJames/irods
|
59e9db75200e95796ec51ec20eb3b185d9e4b5f5
|
[
"BSD-3-Clause"
] | 148
|
2015-01-31T16:13:46.000Z
|
2022-03-23T20:23:43.000Z
|
from __future__ import print_function
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import os
import shutil
import re
import hashlib
import base64
from . import session
from . import settings
from . import resource_suite
from .. import lib
from .. import test
from .. import paths
from ..configuration import IrodsConfig
class Test_Ichksum(resource_suite.ResourceBase, unittest.TestCase):
plugin_name = IrodsConfig().default_rule_engine_plugin
def setUp(self):
super(Test_Ichksum, self).setUp()
def tearDown(self):
super(Test_Ichksum, self).tearDown()
def test_ichksum_data_obj(self):
filename = 'test_ichksum_data_obj'
lib.make_file(filename, 1024, 'arbitrary')
file_chksum = lib.file_digest(filename, 'sha256', encoding='base64')
self.admin.assert_icommand(['iput', filename])
self.admin.assert_icommand(['ichksum', filename], 'STDOUT_SINGLELINE', file_chksum)
def test_ichksum_coll(self):
collname = 'test_ichksum_coll'
filename = 'file'
lib.make_file(filename, 1024, 'arbitrary')
file_chksum = lib.file_digest(filename, 'sha256', encoding='base64')
self.admin.assert_icommand(['imkdir', collname])
self.admin.assert_icommand(['iput', filename, collname + '/' + filename])
self.admin.assert_icommand(['ichksum', collname], 'STDOUT_SINGLELINE', file_chksum)
def test_ichksum_recursive(self):
collname_1 = 'test_ichksum_recursive'
filename_1 = 'file1'
collname_2 = 'subdir'
filename_2 = 'file2'
collname_3 = 'subsubdir'
filename_3 = 'file_3'
lib.make_file(filename_1, 256, 'arbitrary')
lib.make_file(filename_2, 512, 'arbitrary')
lib.make_file(filename_3, 1024, 'arbitrary')
file_chksum_1 = lib.file_digest(filename_1, 'sha256', encoding='base64')
file_chksum_2 = lib.file_digest(filename_2, 'sha256', encoding='base64')
file_chksum_3 = lib.file_digest(filename_3, 'sha256', encoding='base64')
self.admin.assert_icommand(['imkdir', collname_1])
self.admin.assert_icommand(['iput', filename_1, collname_1 + '/' + filename_1])
self.admin.assert_icommand(['imkdir', collname_1 + '/' + collname_2])
self.admin.assert_icommand(['iput', filename_2, collname_1 + '/' + collname_2 + '/' + filename_2])
self.admin.assert_icommand(['imkdir', collname_1 + '/' + collname_2 + '/' + collname_3])
self.admin.assert_icommand(['iput', filename_3, collname_1 + '/' + collname_2 + '/' + collname_3 + '/' + filename_3])
self.admin.assert_icommand(['ichksum', collname_1], 'STDOUT_MULTILINE', [file_chksum_3, file_chksum_2, file_chksum_1])
@unittest.skipIf(test.settings.RUN_IN_TOPOLOGY, "Skip for topology testing")
def test_ichksum_truncating_printed_filename__issue_3085(self):
filename = 'issue_3085_012345678901234567890123456789.txt'
filename_path = os.path.join(self.admin.local_session_dir, 'issue_3085_012345678901234567890123456789.txt')
lib.make_file(filename_path, 1024, 'arbitrary')
self.admin.assert_icommand(['iput', filename_path])
self.admin.assert_icommand(['ichksum', filename], 'STDOUT_SINGLELINE', filename)
@unittest.skipIf(test.settings.RUN_IN_TOPOLOGY, "Skip for topology testing")
@unittest.skipUnless(plugin_name == 'irods_rule_engine_plugin-irods_rule_language', 'only applicable for irods_rule_language REP')
def test_ichksum_fails_with_dyn_pep__issue_3485(self):
config = IrodsConfig()
# Create a file and make iRODS aware of it.
# If this were done after adding the rule to the rulebase, an
# error would be returned.
filename = 'test_file_issue_3485.txt'
lib.make_file(filename, 1024, 'arbitrary')
chksum = lib.file_digest(filename, 'sha256', encoding='base64')
self.admin.assert_icommand(['iput', filename])
with lib.file_backed_up(config.server_config_path):
# Create a new rule file that contains a pep with the wrong signature.
# This particular pep is required in order to show that a more appropriate
# error code/status is returned instead of [SYS_INTERNAL_ERR].
rule_filename = 'issue_3485_rule.re'
rule_filename_path = os.path.join(paths.core_re_directory(), rule_filename)
with open(rule_filename_path, 'w') as f:
f.write("pep_database_mod_data_obj_meta_pre() {}\n")
# Add the newly created rule file to the rule engine rulebase.
rule_engine = config.server_config['plugin_configuration']['rule_engines'][0]
rule_engine['plugin_specific_configuration']['re_rulebase_set'][0] = rule_filename[:-3]
lib.update_json_file_from_dict(config.server_config_path, config.server_config)
self.admin.assert_icommand(['ichksum', filename], 'STDERR', 'status = -1097000 NO_RULE_OR_MSI_FUNCTION_FOUND_ERR')
# Update the rule to the correct signature.
# Running ichksum after this change should work as expected and print
# the requested information.
with open(rule_filename_path, 'w') as f:
f.write("pep_database_mod_data_obj_meta_pre(*a, *b, *c, *d, *e) {}\n")
self.admin.assert_icommand(['ichksum', filename], 'STDOUT', [filename, chksum])
os.remove(rule_filename_path)
self.admin.assert_icommand(['irm', '-f', filename])
def test_chksum_catalog_verify_not_exhausting_statements__issue_4732(self):
Statement_Table_Size = 50
filecount = Statement_Table_Size + 10
dirname = 'ichksum_targets_4732'
lib.create_directory_of_small_files(dirname, filecount)
try:
_,err,rcAdm = self.admin.run_icommand(['iput', '-r','-k', dirname])
_,err,rcUsr = self.user0.run_icommand(['iput', '-r','-k', dirname])
self.assertTrue(rcAdm == 0 and rcUsr == 0, '**** could not iput -r -k directory/ies in test for #4732')
tests = [ (self.admin, ['ichksum','--verify','-r','-KM',dirname], 'CAT_STATEMENT_TABLE_FULL' ),
(self.admin, ['ichksum','--verify','-r','-K', dirname], 'CAT_NO_ACCESS_PERMISSION' ),
(self.user0, ['ichksum','--verify','-r','-K', dirname], 'CAT_NO_ACCESS_PERMISSION' ) ]
for user,cmd,guard_err in tests:
_,err,rc = user.run_icommand( cmd )
self.assertTrue (guard_err not in err, 'ichksum incurred statement table exhaustion #4732')
self.assertTrue (err == '', 'ichksum produced unwelcome STDERR output')
self.assertTrue (rc == 0, 'ichksum returned nonzero error code during routine operation')
finally:
clean_exit = True
try:
self.user0.assert_icommand(['irm', '-rf', dirname])
except:
clean_exit = False
try:
self.admin.assert_icommand(['irm', '-rf', dirname])
except:
clean_exit = False
shutil.rmtree(dirname, ignore_errors=True)
self.assertTrue (clean_exit, '**** inadequate clean-up in test for #4732 ***')
def test_ichksum_reports_incompatible_params__issue_5252(self):
data_object = 'foo'
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['ichksum', '-n', '0', '-R', 'demoResc', data_object], 'STDERR', ["the 'n' and 'R' option cannot be used together"])
self.admin.assert_icommand(['ichksum', '-a', '-n', '0', data_object], 'STDERR', ["the 'n' and 'a' option cannot be used together"])
self.admin.assert_icommand(['ichksum', '-a', '-R', 'demoResc', data_object], 'STDERR', ["the 'R' and 'a' option cannot be used together"])
self.admin.assert_icommand(['ichksum', '-K', '-f', data_object], 'STDERR', ["the 'K' and 'f' option cannot be used together"])
self.admin.assert_icommand(['ichksum', '-K', '-a', '-n', '0', data_object], 'STDERR', ["the 'n' and 'a' option cannot be used together"])
self.admin.assert_icommand(['ichksum', '-K', '-a', '-R', 'demoResc', data_object], 'STDERR', ["the 'R' and 'a' option cannot be used together"])
self.admin.assert_icommand(['ichksum', '-K', '--silent', data_object], 'STDERR', ["the 'K' and 'silent' option cannot be used together"])
self.admin.assert_icommand(['ichksum', '--verify', '--silent', data_object], 'STDERR', ["the 'verify' and 'silent' option cannot be used together"])
def test_ichksum_reports_number_of_replicas_skipped__issue_5252(self):
other_resc = 'issue_5252_resc'
data_object = os.path.join(self.admin.session_collection, 'foo')
try:
lib.create_ufs_resource(other_resc, self.admin)
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['irepl', '-R', other_resc, data_object])
# Show that ichksum reports about skipping replicas that are not good.
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_REPL_STATUS', '0'])
self.admin.assert_icommand_fail(['ichksum', '-K', data_object], 'STDOUT', ['INFO: Number of replicas skipped: 1'])
# Show that ichksum reports information about good replicas when they exist.
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_REPL_STATUS', '1'])
out, _, ec = self.admin.run_icommand(['ichksum', '-K', data_object])
self.assertNotEqual(ec, 0)
self.assertNotIn('INFO: Number of replicas skipped:', out)
self.assertIn('WARNING: No checksum available for replica [0].', out)
self.assertIn('WARNING: No checksum available for replica [1].', out)
finally:
self.admin.run_icommand(['irm', '-f', data_object])
self.admin.run_icommand(['iadmin', 'rmresc', other_resc])
def test_ichksum_reports_when_replicas_physical_size_does_not_match_size_in_catalog__issue_5252(self):
data_object = os.path.join(self.admin.session_collection, 'foo')
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_SIZE', '1'])
self.admin.assert_icommand_fail(['ichksum', '-K', data_object], 'STDOUT', ['ERROR: Physical size does not match size in catalog for replica [0].'])
# Show that --verify is an alias for -K.
self.admin.assert_icommand_fail(['ichksum', '--verify', data_object], 'STDOUT', ['ERROR: Physical size does not match size in catalog for replica [0].'])
# Show that the error is printed when targeting a specific replica.
self.admin.assert_icommand_fail(['ichksum', '-K', '-n0', data_object], 'STDOUT', ['ERROR: Physical size does not match size in catalog for replica [0].'])
def test_ichksum_reports_when_replicas_are_missing_checksums__issue_5252(self):
data_object = 'foo'
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['irepl', '-R', self.testresc, data_object])
self.admin.assert_icommand(['ichksum', '-n0', data_object], 'STDOUT', [data_object + ' sha2:'])
self.admin.assert_icommand_fail(['ichksum', '-K', data_object], 'STDOUT', ['WARNING: No checksum available for replica [1].'])
# Show that --verify is an alias for -K.
self.admin.assert_icommand_fail(['ichksum', '--verify', data_object], 'STDOUT', ['WARNING: No checksum available for replica [1].'])
# Show that the warning is printed when targeting a specific replica.
self.admin.assert_icommand_fail(['ichksum', '-K', '-n1', data_object], 'STDOUT', ['WARNING: No checksum available for replica [1].'])
def test_ichksum_reports_when_replicas_computed_checksum_does_not_match_checksum_in_catalog__issue_5252(self):
data_object = os.path.join(self.admin.session_collection, 'foo')
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['ichksum', data_object], 'STDOUT', [os.path.basename(data_object) + ' sha2:'])
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_CHECKSUM', 'sha2:BAD_CHECKSUM'])
self.admin.assert_icommand_fail(['ichksum', '-K', data_object], 'STDOUT', ['ERROR: Computed checksum does not match what is in the catalog for replica [0].'])
# Show that the error is printed when targeting a specific replica.
self.admin.assert_icommand_fail(['ichksum', '-K', '-n0', data_object], 'STDOUT', ['ERROR: Computed checksum does not match what is in the catalog for replica [0].'])
# Show that when --no-compute is passed, verification mode will not verify if the checksum in the
# catalog is correct or not, regardless of whether the client is targeting one or all replicas.
self.admin.assert_icommand(['ichksum', '-K', '--no-compute', data_object])
self.admin.assert_icommand(['ichksum', '-K', '--no-compute', '-n0', data_object])
@unittest.skipIf(test.settings.TOPOLOGY_FROM_RESOURCE_SERVER, "Skip for topology testing from resource server")
def test_ichksum_reports_when_replicas_do_not_share_identical_checksums__issue_5252(self):
data_object = 'foo'
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['irepl', '-R', self.testresc, data_object])
# Change the contents of replica 0.
# This will cause ichksum to report a warning.
gql = "select DATA_PATH where COLL_NAME = '{0}' and DATA_NAME = '{1}' and DATA_REPL_NUM = '0'".format(self.admin.session_collection, data_object)
out, _, ec = self.admin.run_icommand(['iquest', '%s', gql])
self.assertEqual(ec, 0)
out = out.strip()
self.assertGreater(len(out), 0)
with open(out, 'r+') as f:
f.write('test')
self.admin.assert_icommand(['ichksum', '-a', data_object], 'STDOUT', ['WARNING: Data object has replicas with different checksums.'])
def test_ichksum_computes_checksum_for_highest_voted_replica_when_no_options_are_present__issue_5252(self):
other_resc = 'issue_5252_resc'
other_ufs = other_resc + '_ufs'
data_object = 'foo'
try:
# Construct a passthru hierarchy which will influence the vote such that the test
# always produces the same results. Replica 0 will always land on the default
# resource for the session, which is demoResc by default. Replica 1 will then
# land on the passthru hierarchy. The ichksum will vote higher on the passthru
# resource (high read weight) and so Replica 1 will receive the checksum.
lib.create_passthru_resource(other_resc, self.admin)
lib.create_ufs_resource(other_ufs, self.admin)
lib.add_child_resource(other_resc, other_ufs, self.admin)
self.admin.assert_icommand(['iadmin', 'modresc', other_resc, 'context', 'write=0.1;read=1.50'])
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['irepl', '-R', other_resc, data_object])
self.admin.assert_icommand(['ichksum', data_object], 'STDOUT', [data_object + ' sha2:'])
# Show that only one of the two replicas has a checksum.
out, _, ec = self.admin.run_icommand(['ichksum', '-K', '-n0', data_object])
self.assertNotEqual(ec, 0)
self.assertGreater(len(out), 0)
self.assertIn('WARNING: No checksum available for replica [0].', out)
out, _, ec = self.admin.run_icommand(['ichksum', '-K', '-n1', data_object])
self.assertEqual(ec, 0)
self.assertEqual(len(out), 0)
self.assertNotIn('WARNING: No checksum available for replica [1].', out)
finally:
self.admin.run_icommand(['irm', '-f', data_object])
lib.remove_child_resource(other_resc, other_ufs, self.admin)
self.admin.run_icommand(['iadmin', 'rmresc', other_resc])
self.admin.run_icommand(['iadmin', 'rmresc', other_ufs])
def test_silent_option_is_supported__issue_5252(self):
data_object = 'foo'
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
self.admin.assert_icommand(['irepl', '-R', self.testresc, data_object])
self.admin.assert_icommand(['ichksum', '-a', '--silent', data_object])
# Verify that the data object's replicas have checksums.
gql = "select DATA_CHECKSUM where COLL_NAME = '{0}' and DATA_NAME = '{1}'".format(self.admin.session_collection, data_object)
self.admin.assert_icommand(['iquest', '%s', gql], 'STDOUT', ['sha2:'])
def test_recursive_option_is_supported__issue_5252(self):
coll_a = os.path.join(self.admin.session_collection, 'coll_a.5252')
self.admin.assert_icommand(['imkdir', coll_a])
coll_b = os.path.join(self.admin.session_collection, 'coll_b.5252')
self.admin.assert_icommand(['imkdir', coll_b])
# Create three data objects with one additional replica.
# Place one data object in each collection.
data_objects = [os.path.join(self.admin.session_collection, 'foo'),
os.path.join(coll_a, 'bar'),
os.path.join(coll_b, 'baz')]
for data_object in data_objects:
self.admin.assert_icommand(['istream', 'write', data_object], input='some special sauce!')
self.admin.assert_icommand(['irepl', '-R', self.testresc, data_object])
# Checksum all of the data objects (and replicas).
self.admin.assert_icommand(['ichksum', '-r', '-a', self.admin.session_collection], 'STDOUT', [
'C- ' + os.path.dirname(data_objects[0]),
'C- ' + os.path.dirname(data_objects[1]),
'C- ' + os.path.dirname(data_objects[2]),
' ' + os.path.basename(data_objects[0]) + ' sha2:',
' ' + os.path.basename(data_objects[1]) + ' sha2:',
' ' + os.path.basename(data_objects[2]) + ' sha2:'
])
# Show that using --silent produces no output when all replicas are in a good state.
self.admin.assert_icommand(['ichksum', '-r', '-a', '--silent', self.admin.session_collection])
def test_ichksum_ignores_replicas_that_are_not_marked_good_or_stale__issue_5252(self):
data_object = os.path.join(self.admin.session_collection, 'foo')
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
try:
# Set the replica's status to an unknown value, in this test, 7.
# If the replica's status was set to intermediate (2), write lock (3), or read lock (4), ichksum would return HIERARCHY_ERROR.
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_REPL_STATUS', '7'])
self.admin.assert_icommand(['ichksum', data_object], 'STDERR', ['SYS_REPLICA_INACCESSIBLE'])
self.admin.assert_icommand(['ichksum', '-n0', data_object], 'STDERR', ['SYS_REPLICA_INACCESSIBLE'])
self.admin.assert_icommand(['ichksum', '-a', data_object], 'STDERR', ['SYS_NO_GOOD_REPLICA'])
finally:
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_REPL_STATUS', '1'])
def test_ichksum_returns_immediately_if_processing_replicas_in_the_bundle_resource__issue_5252(self):
data_object = os.path.join(self.admin.session_collection, 'foo')
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
# Capture the replica's resource id.
gql = "select RESC_ID where COLL_NAME = '{0}' and DATA_NAME = '{1}'".format(self.admin.session_collection, os.path.basename(data_object))
original_resc_id, _, ec = self.admin.run_icommand(['iquest', '%s', gql])
self.assertEqual(ec, 0)
# Get the resource id of the bundle resource and update the replica's resource id column.
bundle_resc_id, _, ec = self.admin.run_icommand(['iquest', '%s', "select RESC_ID where RESC_NAME = 'bundleResc'"])
self.assertEqual(ec, 0)
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_RESC_ID', bundle_resc_id.strip()])
self.admin.assert_icommand(['ichksum', data_object], 'STDERR', ['SYS_CANT_CHKSUM_BUNDLED_DATA'])
self.admin.assert_icommand(['ichksum', '-n0', data_object], 'STDERR', ['SYS_CANT_CHKSUM_BUNDLED_DATA'])
# Restore the replica's original resource id.
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_RESC_ID', original_resc_id.strip()])
def test_ichksum_is_not_allowed_access_to_locked_replicas__issue_5252(self):
data_object = os.path.join(self.admin.session_collection, 'foo')
self.admin.assert_icommand(['istream', 'write', data_object], input='some data')
try:
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_REPL_STATUS', '2'])
self.admin.assert_icommand(['ichksum', data_object], 'STDERR', ['HIERARCHY_ERROR'])
self.admin.assert_icommand(['ichksum', '-n0', data_object], 'STDERR', ['HIERARCHY_ERROR'])
self.admin.assert_icommand(['ichksum', '-a', data_object], 'STDERR', ['HIERARCHY_ERROR'])
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_REPL_STATUS', '3'])
self.admin.assert_icommand(['ichksum', data_object], 'STDERR', ['HIERARCHY_ERROR'])
self.admin.assert_icommand(['ichksum', '-n0', data_object], 'STDERR', ['HIERARCHY_ERROR'])
self.admin.assert_icommand(['ichksum', '-a', data_object], 'STDERR', ['HIERARCHY_ERROR'])
finally:
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_REPL_STATUS', '1'])
def test_ichksum_groups_objects_appropriately_when_the_recursive_flag_is_set__issue_5285(self):
# Create a test collection. This avoids issues with data objects created by
# the base class appearing in the output.
root_col = os.path.join(self.admin.session_collection, 'issue_5285')
self.admin.assert_icommand(['imkdir', root_col])
# Add a new data object to the root test collection.
data_object_0 = 'foo'
self.admin.assert_icommand(['istream', 'write', os.path.join(root_col, data_object_0)], input='X')
# Create a new collection and add two data objects to it.
data_object_1 = 'bar'
data_object_2 = 'baz'
col_1 = os.path.join(root_col, 'col_1')
self.admin.assert_icommand(['imkdir', col_1])
self.admin.assert_icommand(['istream', 'write', os.path.join(col_1, data_object_1)], input='Y')
self.admin.assert_icommand(['istream', 'write', os.path.join(col_1, data_object_2)], input='Z')
# Create a third collection inside of the previously created collection and
# add one data object to it.
data_object_3 = 'goo'
col_2 = os.path.join(col_1, 'col_2')
self.admin.assert_icommand(['imkdir', col_2])
self.admin.assert_icommand(['istream', 'write', os.path.join(col_2, data_object_3)], input='W')
# Verify that the output is correct.
out, _, ec = self.admin.run_icommand(['ichksum', '-r', root_col])
self.assertEqual(ec, 0)
pattern = '''C- {0}:
{1} sha2:.+
C- {2}:
{3} sha2:.+
{4} sha2:.+
C- {5}:
{6} sha2:.+
'''.format(root_col,
data_object_0,
col_1,
data_object_1,
data_object_2,
col_2,
data_object_3)
self.assertTrue(re.match(pattern, out))
@unittest.skipIf(test.settings.TOPOLOGY_FROM_RESOURCE_SERVER, "Skip for topology testing from resource server")
def test_ichksum_honors_the_size_in_the_catalog_when_computing_checksums__issue_5401(self):
data_object = os.path.join(self.admin.session_collection, 'foo')
contents = 'the data'
self.admin.assert_icommand(['istream', 'write', data_object], input=contents)
def do_test(size_in_catalog, checksum):
# Make the catalog report the wrong size of the replica on disk.
# This change will cause icommands such as iget and istream to print at most "size_in_catalog" bytes.
self.admin.assert_icommand(['iadmin', 'modrepl', 'logical_path', data_object, 'replica_number', '0', 'DATA_SIZE', str(size_in_catalog)])
self.admin.assert_icommand(['istream', 'read', data_object], 'STDOUT', [contents[:size_in_catalog]])
# Show that ichksum reads at most "size_in_catalog" bytes when computing a checksum.
self.admin.assert_icommand(['ichksum', '-f', data_object], 'STDOUT', ['sha2:' + checksum])
self.assertEqual(checksum, base64.b64encode(hashlib.sha256(contents[:size_in_catalog]).digest()))
# Compute the SHA256 checksum of the replica using its actual size on disk.
gql = "select DATA_PATH where COLL_NAME = '{0}' and DATA_NAME = '{1}'".format(self.admin.session_collection, os.path.basename(data_object))
physical_path, err, ec = self.admin.run_icommand(['iquest', '%s', gql])
self.assertEqual(ec, 0)
self.assertEqual(len(err), 0)
self.assertGreater(len(physical_path), 0)
with open(physical_path.strip(), 'r') as f:
sha2 = hashlib.sha256(f.read())
# Show that the checksums are different (size in catalog vs size on disk).
self.assertNotEqual(checksum, base64.b64encode(sha2.digest()))
do_test(0, '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=')
do_test(3, 'uXdtfd9FnJrVsOHWrGHie++16Z/WJEZndgDXys71RNA=')
@unittest.skipIf(test.settings.TOPOLOGY_FROM_RESOURCE_SERVER, "Skip for topology testing from resource server")
def test_ichksum_detects_when_the_size_in_storage_is_less_than_the_size_in_catalog__issue_5401(self):
data_object = 'issue_5401.txt'
# Create a data object.
contents = 'A very very very very very very very long string!'
self.admin.assert_icommand(['istream', 'write', data_object], input=contents)
# Get the data object's physical path.
gql = "select DATA_PATH where COLL_NAME = '{0}' and DATA_NAME = '{1}'".format(self.admin.session_collection, data_object)
data_path, err, ec = self.admin.run_icommand(['iquest', '%s', gql])
self.assertEqual(ec, 0)
self.assertEqual(len(err), 0)
self.assertGreater(len(data_path), 0)
# Make the physical object's size less than the size recorded in the catalog.
with open(data_path.strip(), 'w') as f:
f.write(contents[:10])
# Show that ichksum detects the size inconsistency between the catalog and the object in storage.
self.admin.assert_icommand(['ichksum', '-f', data_object], 'STDERR', ['-512000 UNIX_FILE_READ_ERR'])
| 59.289979
| 173
| 0.659402
|
3fe351b8982b48d5099b52ac68c87586c54b0883
| 2,209
|
py
|
Python
|
addHumanLabels.py
|
online-behaviour/2017-election
|
b6c0b8a52336c26909b8c852de55d18d38a4cbfb
|
[
"Apache-2.0"
] | null | null | null |
addHumanLabels.py
|
online-behaviour/2017-election
|
b6c0b8a52336c26909b8c852de55d18d38a4cbfb
|
[
"Apache-2.0"
] | null | null | null |
addHumanLabels.py
|
online-behaviour/2017-election
|
b6c0b8a52336c26909b8c852de55d18d38a4cbfb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
addHumanLabels.py: add human labels to data file
usage: addHumanLabels.py labelFile < dataFile > outFile
20190206 erikt(at)xs4all.nl
"""
import csv
import sys
COMMAND = sys.argv.pop(0)
USAGE = "usage: "+COMMAND+" labelFile < dataFile > outFile "
OFFSET = 1
SEPARATOR = ","
INDEXID = 2
LABELID = 3
FASTTEXTID = 0
DEEPLEARNINGID = 1
TWEETIDID = 4
DATEID = 5
USERNAMEID = 6
TWEETTEXTID = 7
TWEETID = "tweetid"
USERID = "userid"
USERNAME = "username"
PARTY1 = "party1"
PARTY2 = "party2"
TEXT = "text"
DATE = "date"
UNKNOWN1 = "unknown1"
UNKNOWN2 = "unknown2"
UNKNOWN3 = "unknown3"
LABEL = "label"
FASTTEXT = "fasttext"
DEEPLEARNING = "deeplearning"
OUTPUTFIELDS = [TWEETID,USERID,USERNAME,PARTY1,TEXT,UNKNOWN1,PARTY2,UNKNOWN2,UNKNOWN3,LABEL,DATE,FASTTEXT,DEEPLEARNING]
LABELIDS = {"":"0","ERROR":"0","C TRAIL":"1","PROMOTION":"2",
"C ACTION":"3","VOTE CALL":"4","NEWS":"5","STANCE":"6",
"CRITIQUE":"7", "INPUT":"8","ADVICE":"9","ACKNOWL":"10",
"PERSONAL":"11","OTHER":"12" }
def processData(labels):
lineCounter = 0
csvreader = csv.reader(sys.stdin,delimiter=SEPARATOR)
csvwriter = csv.DictWriter(sys.stdout,delimiter=SEPARATOR,fieldnames=OUTPUTFIELDS)
for row in csvreader:
lineCounter += 1
index = str(lineCounter)
if index in labels:
data = {LABEL:labels[index],TEXT:row[TWEETTEXTID],TWEETID:row[TWEETIDID],USERNAME:row[USERNAMEID],DATE:row[DATEID],FASTTEXT:row[FASTTEXTID],DEEPLEARNING:row[DEEPLEARNINGID]}
csvwriter.writerow(data)
def readLabels(labelFileName):
try: labelFile = open(labelFileName,"r")
except Exception as e:
sys.exit(COMMAND+": cannot read file "+labelFileName+": "+str(e))
labels = {}
for line in labelFile:
tokens = line.split()
index = str(int(tokens[INDEXID])+OFFSET)
labelName = " ".join(tokens[LABELID:])
labels[index] = LABELIDS[labelName]
return(labels)
def main(argv):
try: labelFileName = argv.pop(0)
except Exception as e: sys.exit(USAGE+str(e))
labels = readLabels(labelFileName)
processData(labels)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 30.260274
| 185
| 0.664101
|
3d10dd112998618244852a725c037996365f9844
| 823
|
py
|
Python
|
Chapters/Chapter_3-Resizing_and_cropping.py
|
Balaji-Ganesh/Furnishing-OpenCV-Basics
|
54cd8fa09cc6f1298861b12ffb190432f412bd1f
|
[
"MIT"
] | null | null | null |
Chapters/Chapter_3-Resizing_and_cropping.py
|
Balaji-Ganesh/Furnishing-OpenCV-Basics
|
54cd8fa09cc6f1298861b12ffb190432f412bd1f
|
[
"MIT"
] | null | null | null |
Chapters/Chapter_3-Resizing_and_cropping.py
|
Balaji-Ganesh/Furnishing-OpenCV-Basics
|
54cd8fa09cc6f1298861b12ffb190432f412bd1f
|
[
"MIT"
] | null | null | null |
import cv2
img = cv2.imread("Resources/lena.jpg")
print(img.shape)
# Resizing the image..
imgResize = cv2.resize(src=img, dsize=(200, 300)) # dsize=(width, height)
# Cropping the image.. for this no need of an OpenCV function, can do normally via matrix manipulation, but its bit tricky --> first height, next width not like the convention for a opencv function--> width, height
croppedImg = img[250:380, 230:370] # [height, width]-- height from which to which (ex: h1:h2), width from where to where (Ex: w1:w2)--- combinely.. [h1:h2, w1:w2]
# -for this stmt, here........^^^, i.e., if w2<h2, getting a black cut edge, know why this happening..??
# Displaying images...
cv2.imshow("Original Image", img)
cv2.imshow("Resized image", imgResize)
cv2.imshow("Cropped Image", croppedImg)
cv2.waitKey(0)
| 45.722222
| 215
| 0.687728
|
497c3788de1338cd9fe2c07355b1c7e5c9318bd5
| 219
|
py
|
Python
|
csacademy/digit-function.py
|
phvash/competitive-programming
|
33c881764341d8ff42b7c819ed04eb9c7d6f93f4
|
[
"MIT"
] | null | null | null |
csacademy/digit-function.py
|
phvash/competitive-programming
|
33c881764341d8ff42b7c819ed04eb9c7d6f93f4
|
[
"MIT"
] | null | null | null |
csacademy/digit-function.py
|
phvash/competitive-programming
|
33c881764341d8ff42b7c819ed04eb9c7d6f93f4
|
[
"MIT"
] | null | null | null |
def f(num):
global count
count += 1
if num == 0:
return 0
sum_of_digits = sum(map(int, str(num)))
f(num - sum_of_digits)
global count
count = 0
num = int(input())
f(num)
print(count)
| 12.882353
| 43
| 0.56621
|
cc8ef23a1bda282279da246073e3d471d36a60a0
| 662
|
py
|
Python
|
amodem/tests/test_sampling.py
|
Matthew-MK/amodem
|
a75dda9ab0f7445589a036357e604703ccb34726
|
[
"MIT"
] | 766
|
2015-01-14T15:48:07.000Z
|
2022-03-30T01:19:48.000Z
|
amodem/tests/test_sampling.py
|
Matthew-MK/amodem
|
a75dda9ab0f7445589a036357e604703ccb34726
|
[
"MIT"
] | 42
|
2015-01-02T18:50:11.000Z
|
2022-03-11T19:10:35.000Z
|
amodem/tests/test_sampling.py
|
Matthew-MK/amodem
|
a75dda9ab0f7445589a036357e604703ccb34726
|
[
"MIT"
] | 116
|
2015-01-14T20:43:52.000Z
|
2022-03-24T13:10:30.000Z
|
from amodem import sampling
from amodem import common
import numpy as np
from io import BytesIO
def test_resample():
x = np.sin(2*np.pi * 10 * np.linspace(0, 1, 1001))
src = BytesIO(common.dumps(x))
dst = BytesIO()
sampling.resample(src=src, dst=dst, df=0.0)
y = common.loads(dst.getvalue())
err = x[:len(y)] - y
assert np.max(np.abs(err)) < 1e-4
dst = BytesIO()
sampling.resample(src=BytesIO(b'\x00\x00'), dst=dst, df=0.0)
assert dst.tell() == 0
def test_coeffs():
interp = sampling.Interpolator(width=4, resolution=16)
err = interp.filt[0] - [0, 0, 0, 1, 0, 0, 0, 0]
assert np.max(np.abs(err)) < 1e-10
| 25.461538
| 64
| 0.623867
|
f98768da4591835f07f914a88fc6ef93707fafca
| 9,983
|
py
|
Python
|
uf/modeling/gpt2.py
|
yupeijei1997/unif
|
16685a89446e6ce14080439162a9bfd0c75f0521
|
[
"Apache-2.0"
] | 1
|
2021-05-15T12:07:40.000Z
|
2021-05-15T12:07:40.000Z
|
uf/modeling/gpt2.py
|
yupeijei1997/unif
|
16685a89446e6ce14080439162a9bfd0c75f0521
|
[
"Apache-2.0"
] | null | null | null |
uf/modeling/gpt2.py
|
yupeijei1997/unif
|
16685a89446e6ce14080439162a9bfd0c75f0521
|
[
"Apache-2.0"
] | null | null | null |
# coding:=utf-8
# Copyright 2020 Tencent. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' GPT-2.
Code revised from Open AI's implementation of GPT-2.
See `https://github.com/openai/gpt-2`.
'''
import numpy as np
from uf.tools import tf
from .base import BaseEncoder, BaseDecoder
from . import util
class GPT2(BaseDecoder, BaseEncoder):
def __init__(self,
hparams,
is_training,
input_ids,
sample_weight=None,
scope='model',
given=1,
**kwargs):
super().__init__()
batch_size = util.get_shape_list(input_ids, expected_rank=2)[0]
max_seq_length = hparams.n_predict
# Tilda embeddings for SMART algorithm
tilda_embeddings = None
use_tilda_embedding=kwargs.get('use_tilda_embedding')
if use_tilda_embedding:
with tf.variable_scope('', reuse=True):
tilda_embeddings = tf.get_variable('tilda_embeddings')
with tf.variable_scope(scope):
def _forward(input_ids, past=None):
batch, sequence = shape_list(input_ids)
if tilda_embeddings is None:
wte = tf.get_variable(
'word_embeddings', [hparams.n_vocab, hparams.n_embed],
initializer=tf.random_normal_initializer(stddev=0.02))
else:
wte = tilda_embeddings
wpe = tf.get_variable(
'wpe', [hparams.n_ctx, hparams.n_embed],
initializer=tf.random_normal_initializer(stddev=0.01))
past_length = 0 if past is None else tf.shape(past)[-2]
h = (tf.gather(wte, input_ids) +
tf.gather(wpe, positions_for(input_ids, past_length)))
# stacked transformer layers
presents = []
pasts = tf.unstack(past, axis=1) if past is not None else \
[None] * hparams.n_layer
assert len(pasts) == hparams.n_layer
for layer, past in enumerate(pasts):
h, present = block(
h, 'h%d' % layer, past=past, hparams=hparams)
presents.append(present)
present = tf.stack(presents, axis=1)
h = norm(h, 'ln_f')
# Language model loss. Do tokens <n predict token n?
h_flat = tf.reshape(h, [batch*sequence, hparams.n_embed])
logits = tf.matmul(h_flat, wte, transpose_b=True)
logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])
return logits, present
# convert to labels
label_ids = tf.concat(
[input_ids[:, 1:],
tf.zeros([batch_size, 1], dtype=tf.int32)], axis=-1)
# forward once
if is_training:
(logits, _) = _forward(input_ids)
self.preds['preds'] = tf.argmax(logits, axis=-1)
# forward loop
else:
input_ids = input_ids[:, 0:given]
for cur_length in range(given, max_seq_length + 1):
(logits, _) = _forward(input_ids)
pred_ids = tf.argmax(
logits[:, cur_length-1:cur_length, :], axis=-1)
pred_ids = tf.cast(pred_ids, tf.int32)
input_ids = tf.concat([input_ids, pred_ids], axis=-1)
self.preds['preds'] = input_ids
# loss
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(label_ids, depth=hparams.n_vocab)
per_token_loss = -tf.reduce_sum(
one_hot_labels * log_probs, axis=-1)
label_mask = tf.cast(tf.not_equal(label_ids, 0), tf.float32)
per_example_loss = \
tf.reduce_sum(per_token_loss * label_mask, axis=-1) / \
tf.reduce_sum(label_mask, axis=-1)
if sample_weight is not None:
per_example_loss *= tf.expand_dims(sample_weight, axis=-1)
self.total_loss = tf.reduce_mean(per_example_loss)
self.losses['losses'] = per_example_loss
def shape_list(x):
'''Deal with dynamic shape in tensorflow cleanly.'''
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def softmax(x, axis=-1):
x = x - tf.reduce_max(x, axis=axis, keepdims=True)
ex = tf.exp(x)
return ex / tf.reduce_sum(ex, axis=axis, keepdims=True)
def gelu(x):
return 0.5*x*(1+tf.tanh(np.sqrt(2/np.pi)*(x+0.044715*tf.pow(x, 3))))
def norm(x, scope, *, axis=-1, epsilon=1e-5):
'''Normalize to mean = 0, std = 1, then do a diagonal affine transform.'''
with tf.variable_scope(scope):
n_state = x.shape[-1].value
g = tf.get_variable('g', [n_state],
initializer=tf.constant_initializer(1))
b = tf.get_variable('b', [n_state],
initializer=tf.constant_initializer(0))
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + epsilon)
x = x*g + b
return x
def split_states(x, n):
'''Reshape the last dimension of x into [n, x.shape[-1]/n].'''
*start, m = shape_list(x)
return tf.reshape(x, start + [n, m//n])
def merge_states(x):
'''Smash the last two dimensions of x into a single dimension.'''
*start, a, b = shape_list(x)
return tf.reshape(x, start + [a * b])
def conv1d(x, scope, nf, *, w_init_stdev=0.02):
with tf.variable_scope(scope):
*start, nx = shape_list(x)
w = tf.get_variable(
'w', [1, nx, nf],
initializer=tf.random_normal_initializer(stddev=w_init_stdev))
b = tf.get_variable('b', [nf], initializer=tf.constant_initializer(0))
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]),
tf.reshape(w, [-1, nf])) + b, start + [nf])
return c
def attention_mask(nd, ns, *, dtype):
'''1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't
produce garbage on TPUs.
'''
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def attn(x, scope, n_state, *, past, hparams):
assert x.shape.ndims == 3 # Should be [batch, sequence, features]
assert n_state % hparams.n_head == 0
if past is not None:
assert past.shape.ndims == 5 # Should be [batch, 2, heads, sequence,
# features], where 2 is [k, v]
def split_heads(x):
# From [batch, sequence, features] to [batch, heads,
# sequence, features]
return tf.transpose(split_states(x, hparams.n_head), [0, 2, 1, 3])
def merge_heads(x):
# Reverse of split_heads
return merge_states(tf.transpose(x, [0, 2, 1, 3]))
def mask_attn_weights(w):
# w has shape [batch, heads, dst_sequence, src_sequence], where
# information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w*b - tf.cast(1e10, w.dtype)*(1-b)
return w
def multihead_attn(q, k, v):
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
w = w * tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))
w = mask_attn_weights(w)
w = softmax(w)
a = tf.matmul(w, v)
return a
with tf.variable_scope(scope):
c = conv1d(x, 'c_attn', n_state*3)
q, k, v = map(split_heads, tf.split(c, 3, axis=2))
present = tf.stack([k, v], axis=1)
if past is not None:
pk, pv = tf.unstack(past, axis=1)
k = tf.concat([pk, k], axis=-2)
v = tf.concat([pv, v], axis=-2)
a = multihead_attn(q, k, v)
a = merge_heads(a)
a = conv1d(a, 'c_proj', n_state)
return a, present
def mlp(x, scope, n_state, *, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
h = gelu(conv1d(x, 'c_fc', n_state))
h2 = conv1d(h, 'c_proj', nx)
return h2
def block(x, scope, *, past, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
a, present = attn(
norm(x, 'ln_1'), 'attn', nx, past=past, hparams=hparams)
x = x + a
m = mlp(norm(x, 'ln_2'), 'mlp', nx*4, hparams=hparams)
x = x + m
return x, present
def past_shape(*, hparams, batch_size=None, sequence=None):
return [batch_size,
hparams.n_layer,
2,
hparams.n_head,
sequence,
hparams.n_embed // hparams.n_head]
def expand_tile(value, size):
'''Add a new axis of given size.'''
value = tf.convert_to_tensor(value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)
def positions_for(tokens, past_length):
batch_size = tf.shape(tokens)[0]
nsteps = tf.shape(tokens)[1]
return expand_tile(past_length + tf.range(nsteps), batch_size)
| 36.039711
| 79
| 0.565361
|
7c69cb8e63dd507135b0da61eac979a99498de39
| 1,327
|
py
|
Python
|
checkio/Rock/DNA Common Sequence/dna_common_sequences.py
|
KenMercusLai/checkio
|
c7702221e1bc0b0b30425859ffa6c09722949d65
|
[
"MIT"
] | 39
|
2015-02-09T13:24:12.000Z
|
2019-05-16T17:51:19.000Z
|
checkio/Rock/DNA Common Sequence/dna_common_sequences.py
|
KenMercusLai/checkio
|
c7702221e1bc0b0b30425859ffa6c09722949d65
|
[
"MIT"
] | 1
|
2019-10-21T16:18:14.000Z
|
2019-10-21T16:18:14.000Z
|
checkio/Rock/DNA Common Sequence/dna_common_sequences.py
|
KenMercusLai/checkio
|
c7702221e1bc0b0b30425859ffa6c09722949d65
|
[
"MIT"
] | 22
|
2015-01-30T18:00:05.000Z
|
2021-05-22T02:57:23.000Z
|
# please read the following article for algothmn explanation
# http://wordaligned.org/articles/longest-common-subsequence
def common(first, second):
dp_rec = [[('',)] * (len(second) + 1) for i in range(len(first) + 1)]
for i in range(1, len(first) + 1):
for j in range(1, len(second) + 1):
# same ending
if first[i - 1] == second[j - 1]:
dp_rec[i][j] = tuple((s + first[i - 1]) for s in dp_rec[i - 1][j - 1])
# otherwise depend on LCS size
else:
lcs_left_len = len(dp_rec[i][j - 1][0])
lcs_up_len = len(dp_rec[i - 1][j][0])
if lcs_left_len == lcs_up_len:
# remove duplicates
dp_rec[i][j] = tuple(set(dp_rec[i][j - 1] + dp_rec[i - 1][j]))
elif lcs_left_len > lcs_up_len:
dp_rec[i][j] = dp_rec[i][j - 1]
else:
dp_rec[i][j] = dp_rec[i - 1][j]
return ','.join(sorted(dp_rec[-1][-1]))
if __name__ == '__main__': # pragma: no cover
# These "asserts" using only for self-checking and not necessary for
# auto-testing
assert common("ACGTC", "TTACTC") == "ACTC", "One"
assert common("CGCTA", "TACCG") == "CC,CG,TA", "Two"
assert common("GCTT", "AAAAA") == "", "None"
| 41.46875
| 86
| 0.515448
|
8b42cb5c97af45c56dd32c3d7d4ed86ca82ed439
| 1,865
|
py
|
Python
|
src/pycity_scheduling/algorithms/__init__.py
|
ElsevierSoftwareX/SOFTX-D-20-00087
|
d2d3f1effda2c0499cb05abf87435375a21379e3
|
[
"MIT"
] | 4
|
2021-11-01T15:13:27.000Z
|
2022-01-16T18:01:06.000Z
|
src/pycity_scheduling/algorithms/__init__.py
|
ElsevierSoftwareX/SOFTX-D-20-00087
|
d2d3f1effda2c0499cb05abf87435375a21379e3
|
[
"MIT"
] | 2
|
2021-11-18T05:58:00.000Z
|
2022-01-19T16:46:20.000Z
|
src/pycity_scheduling/algorithms/__init__.py
|
ElsevierSoftwareX/SOFTX-D-20-00087
|
d2d3f1effda2c0499cb05abf87435375a21379e3
|
[
"MIT"
] | 5
|
2021-11-01T15:13:35.000Z
|
2022-02-03T21:28:48.000Z
|
"""
The pycity_scheduling framework
Copyright (C) 2022,
Institute for Automation of Complex Power Systems (ACS),
E.ON Energy Research Center (E.ON ERC),
RWTH Aachen University
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from .stand_alone_optimization_algorithm import StandAlone
from .local_optimization_algorithm import LocalOptimization
from .exchange_admm_algorithm import ExchangeADMM
from .central_optimization_algorithm import CentralOptimization
from .dual_decomposition_algorithm import DualDecomposition
__all__ = [
'StandAlone',
'LocalOptimization',
'ExchangeADMM',
'CentralOptimization',
'DualDecomposition',
'algorithm',
'algorithms',
]
algorithms = {
'stand-alone': StandAlone,
'local': LocalOptimization,
'exchange-admm': ExchangeADMM,
'central': CentralOptimization,
'dual-decomposition': DualDecomposition,
}
| 37.3
| 118
| 0.787668
|
d37353cf4c6658b97996ff365832047189978593
| 3,101
|
py
|
Python
|
mysite/timesheets/migrations/0001_initial.py
|
xanderyzwich/Timesheets
|
15685ac7b786d3e66bd24e8a3a252f193ee8f49b
|
[
"MIT"
] | null | null | null |
mysite/timesheets/migrations/0001_initial.py
|
xanderyzwich/Timesheets
|
15685ac7b786d3e66bd24e8a3a252f193ee8f49b
|
[
"MIT"
] | 1
|
2019-06-11T21:23:49.000Z
|
2019-06-11T21:23:49.000Z
|
mysite/timesheets/migrations/0001_initial.py
|
xanderyzwich/Timesheets
|
15685ac7b786d3e66bd24e8a3a252f193ee8f49b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-21 21:01
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Adhoc',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('description', models.CharField(max_length=50)),
('hours_projected', models.IntegerField()),
('hours_actual', models.IntegerField()),
('created_date', models.DateField(default=datetime.date.today)),
],
),
migrations.CreateModel(
name='App',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=25)),
('created_date', models.DateField(default=datetime.date.today)),
],
),
migrations.CreateModel(
name='Defect',
fields=[
('id', models.CharField(max_length=25, primary_key=True, serialize=False)),
('description', models.CharField(max_length=50)),
('created_date', models.DateField(default=datetime.date.today)),
('app', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='timesheets.App')),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=25)),
('last_name', models.CharField(max_length=25)),
('created_date', models.DateField(default=datetime.date.today)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='Timesheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=datetime.date.today)),
('hours', models.DecimalField(decimal_places=2, max_digits=4)),
('adhoc', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='timesheets.Adhoc')),
('app', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='timesheets.App')),
('defect', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='timesheets.Defect')),
('emp', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='timesheets.Employee')),
('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='timesheets.Task')),
],
),
]
| 42.479452
| 115
| 0.571751
|
50994f0b071b8e55a71c2ed2a59105999f56fad3
| 48,442
|
py
|
Python
|
tests/validation/tests/v3_api/common.py
|
zehuaiWANG/rancher
|
111645ae58da302a0d48138e054fd97dd1b6a1f9
|
[
"Apache-2.0"
] | null | null | null |
tests/validation/tests/v3_api/common.py
|
zehuaiWANG/rancher
|
111645ae58da302a0d48138e054fd97dd1b6a1f9
|
[
"Apache-2.0"
] | 2
|
2021-03-25T23:07:41.000Z
|
2022-03-29T21:57:09.000Z
|
tests/validation/tests/v3_api/common.py
|
zehuaiWANG/rancher
|
111645ae58da302a0d48138e054fd97dd1b6a1f9
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
DEFAULT_MONITORING_TIMEOUT = 180
MONITORING_VERSION = os.environ.get('MONITORING_VERSION', "0.0.5")
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
USER_TOKEN = os.environ.get('USER_TOKEN', "None")
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
CLUSTER_NAME_2 = os.environ.get("RANCHER_CLUSTER_NAME_2", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token):
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False):
command = 'kubectl --kubeconfig {0} {1}'.format(
kube_fname, cmd)
if json_out:
command += ' -o json'
if stderr:
result = run_command_with_stderr(command)
else:
result = run_command(command)
if json_out:
result = json.loads(result)
print(result)
return result
def run_command(command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster):
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker:
schedulable_nodes.append(node)
return schedulable_nodes
def get_role_nodes(cluster, role):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster)
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name):
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(USER_TOKEN, cluster)
p_client = get_project_client_for_token(project, USER_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster():
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload):
url = get_endpoint_url_for_workload(p_client, workload)
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster):
get_endpoint_url_for_workload(p_client, workload, 60)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port = wl.publicEndpoints[0]["port"]
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
app_data = client.list_app(id=app_id).data
start = time.time()
assert len(app_data) >= 1, "Cannot find app"
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
assert quest["type"] == "string", \
"Cannot set default for types other than string"
answer = "fake"
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print(questions_and_answers)
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app = client.list_app(id=app_id).data
if len(app) == 0:
break
def validate_catalog_app(proj_client, app, external_id, answer=None):
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
pramaters = external_id.split('&')
assert len(pramaters) > 1, \
"Incorrect list of paramaters from catalog external ID"
chart = pramaters[len(pramaters)-2].split("=")[1] + "-" + \
pramaters[len(pramaters)-1].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
for wl in workloads:
assert wl.state == "active"
assert wl.workloadLabels.chart == chart, \
"the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user, cattle_auth_url)
return user, user_token
def get_user_token(user, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': user.username,
'password': USER_PASSWORD,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
| 35.30758
| 79
| 0.626997
|
aba0018aa1043212cb8093434da60964716af72e
| 4,139
|
py
|
Python
|
gym_server/server.py
|
ThomasGale/pytorch-cpp-rl
|
e25fa6418e308eefb7213c2d45528eaac20780ee
|
[
"MIT"
] | 445
|
2019-04-11T12:30:34.000Z
|
2022-03-28T08:14:42.000Z
|
gym_server/server.py
|
ThomasGale/pytorch-cpp-rl
|
e25fa6418e308eefb7213c2d45528eaac20780ee
|
[
"MIT"
] | 21
|
2019-04-11T20:18:02.000Z
|
2021-11-27T12:56:58.000Z
|
gym_server/server.py
|
ThomasGale/pytorch-cpp-rl
|
e25fa6418e308eefb7213c2d45528eaac20780ee
|
[
"MIT"
] | 87
|
2019-04-11T17:42:05.000Z
|
2022-03-10T15:55:48.000Z
|
"""
Contains a class that trains an agent.
"""
import logging
from typing import Tuple
import numpy as np
import gym
from gym_server.envs import make_vec_envs
from gym_server.messages import (InfoMessage, MakeMessage, ResetMessage,
StepMessage)
from gym_server.zmq_client import ZmqClient
RUNNING_REWARD_HORIZON = 10
class Server:
"""
When `Server.serve()` is called, provides a ZMQ based API for training
RL agents on OpenAI gym environments.
"""
def __init__(self, zmq_client: ZmqClient):
self.zmq_client: ZmqClient = zmq_client
self.env: gym.Env = None
logging.info("Gym server initialized")
def serve(self):
"""
Run the server.
"""
logging.info("Serving")
try:
self.__serve()
except KeyboardInterrupt:
pass
def _serve(self):
while True:
request = self.zmq_client.receive()
method = request['method']
param = request['param']
if method == 'info':
(action_space_type,
action_space_shape,
observation_space_type,
observation_space_shape) = self.__info()
self.zmq_client.send(InfoMessage(action_space_type,
action_space_shape,
observation_space_type,
observation_space_shape))
elif method == 'make':
self.__make(param['env_name'], param['num_envs'])
self.zmq_client.send(MakeMessage())
elif method == 'reset':
observation = self.__reset()
self.zmq_client.send(ResetMessage(observation))
elif method == 'step':
if 'render' in param:
result = self.__step(
np.array(param['actions']), param['render'])
else:
result = self.__step(np.array(param['actions']))
self.zmq_client.send(StepMessage(result[0],
result[1],
result[2],
result[3]['reward']))
def info(self):
"""
Return info about the currently loaded environment
"""
action_space_type = self.env.action_space.__class__.__name__
if action_space_type == 'Discrete':
action_space_shape = [self.env.action_space.n]
else:
action_space_shape = self.env.action_space.shape
observation_space_type = self.env.observation_space.__class__.__name__
observation_space_shape = self.env.observation_space.shape
return (action_space_type, action_space_shape, observation_space_type,
observation_space_shape)
def make(self, env_name, num_envs):
"""
Makes a vectorized environment of the type and number specified.
"""
logging.info("Making %d %ss", num_envs, env_name)
self.env = make_vec_envs(env_name, 0, num_envs)
def reset(self) -> np.ndarray:
"""
Resets the environments.
"""
logging.info("Resetting environments")
return self.env.reset()
def step(self,
actions: np.ndarray,
render: bool = False) -> Tuple[np.ndarray, np.ndarray,
np.ndarray, np.ndarray]:
"""
Steps the environments.
"""
if isinstance(self.env.action_space, gym.spaces.Discrete):
actions = actions.squeeze(-1)
actions = actions.astype(np.int)
observation, reward, done, info = self.env.step(actions)
reward = np.expand_dims(reward, -1)
done = np.expand_dims(done, -1)
if render:
self.env.render()
return observation, reward, done, info
__info = info
__make = make
__reset = reset
__serve = _serve
__step = step
| 33.379032
| 78
| 0.543368
|
e4ddc7bf09f8e1b91ec6b5df9789ec5339f5732f
| 896
|
py
|
Python
|
setup.py
|
sbousseaden/PythonForWindows
|
8d33e0143f8b7735118dde2e13bcc0175db9adcd
|
[
"BSD-3-Clause"
] | 1
|
2020-08-02T09:35:14.000Z
|
2020-08-02T09:35:14.000Z
|
setup.py
|
sbousseaden/PythonForWindows
|
8d33e0143f8b7735118dde2e13bcc0175db9adcd
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
sbousseaden/PythonForWindows
|
8d33e0143f8b7735118dde2e13bcc0175db9adcd
|
[
"BSD-3-Clause"
] | 1
|
2020-09-21T14:46:44.000Z
|
2020-09-21T14:46:44.000Z
|
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
PKG_NAME = "PythonForWindows"
VERSION = "0.6"
setup(
name = PKG_NAME,
version = VERSION,
author = 'Hakril',
author_email = 'none',
description = 'A codebase aimed to make interaction with Windows and native execution easier',
license = 'BSD',
keywords = 'windows python',
url = 'https://github.com/hakril/PythonForWindows',
packages = ['windows',
'windows.crypto',
'windows.debug',
'windows.generated_def',
'windows.native_exec',
'windows.rpc',
'windows.utils',
'windows.winobject',
'windows.winproxy',
'windows.winproxy.apis'],
classifiers = ['Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7']
)
| 30.896552
| 98
| 0.554688
|
fcedd0a353eaffa18bdf8fa44ba5d5a8c4de4d1a
| 269
|
py
|
Python
|
dataset_06/malis/setup.py
|
naibaf7/caffe_neural_models
|
9d372c4bc599029902185e19f89e5c39f842fff7
|
[
"BSD-2-Clause"
] | 28
|
2015-06-11T07:48:29.000Z
|
2019-06-28T01:29:09.000Z
|
dataset_06/malis/setup.py
|
naibaf7/caffe_neural_models
|
9d372c4bc599029902185e19f89e5c39f842fff7
|
[
"BSD-2-Clause"
] | 2
|
2015-10-01T13:14:46.000Z
|
2016-09-28T16:25:35.000Z
|
dataset_06/malis/setup.py
|
naibaf7/caffe_neural_models
|
9d372c4bc599029902185e19f89e5c39f842fff7
|
[
"BSD-2-Clause"
] | 16
|
2015-07-08T18:47:59.000Z
|
2020-03-26T13:48:48.000Z
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension("malis", ["malis.pyx", "malis_cpp.cpp"], language='c++',)]
setup(cmdclass = {'build_ext': build_ext}, ext_modules = ext_modules)
| 38.428571
| 83
| 0.765799
|
8ab35bccc122e46362187084443f32d25ddaa048
| 2,360
|
py
|
Python
|
duplication_reversal_task/lsnn/toolbox/matplotlib_extension.py
|
IGITUGraz/Spike-Frequency-Adaptation-Supports-Network-Computations
|
58a33584b34b477cfb1d3614460f66f94344fb9c
|
[
"MIT"
] | 1
|
2021-07-17T10:34:15.000Z
|
2021-07-17T10:34:15.000Z
|
duplication_reversal_task/lsnn/toolbox/matplotlib_extension.py
|
IGITUGraz/Spike-Frequency-Adaptation-Supports-Network-Computations
|
58a33584b34b477cfb1d3614460f66f94344fb9c
|
[
"MIT"
] | null | null | null |
duplication_reversal_task/lsnn/toolbox/matplotlib_extension.py
|
IGITUGraz/Spike-Frequency-Adaptation-Supports-Network-Computations
|
58a33584b34b477cfb1d3614460f66f94344fb9c
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) 2019 the LSNN team, TU Graz
"""
__author__ = 'guillaume'
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import patches
from matplotlib import cm
from collections import OrderedDict
from matplotlib.colors import LinearSegmentedColormap
import scipy as si
def raster_plot(ax,spikes,linewidth=0.8,**kwargs):
n_t,n_n = spikes.shape
event_times,event_ids = np.where(spikes)
max_spike = 10000
event_times = event_times[:max_spike]
event_ids = event_ids[:max_spike]
for n,t in zip(event_ids,event_times):
ax.vlines(t, n + 0., n + 1., linewidth=linewidth, **kwargs)
ax.set_ylim([0 + .5, n_n + .5])
ax.set_xlim([0, n_t])
ax.set_yticks([0, n_n])
def strip_right_top_axis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def arrow_trajectory(ax,data,epsi=0,hdw=.03,lab='',fact=.8,color=(1.,1.,1.,1.),arrow_tick_steps=[],**kwargs):
fc = tuple(np.clip(np.array(color) * fact,0,1.))
ploted_lab = False
X = data[:-1,:]
dX = data[1:,:] - data[:-1,:]
t0 = 0
T = data.shape[0]-1
if epsi > 0:
while sum(dX[T-1]**2) / np.mean( np.sum(dX**2,axis=1)) < epsi: T = T-1
while sum(dX[t0]**2) / np.mean(np.sum(dX**2,axis=1)) < epsi: t0 = t0+1
ax.scatter(data[t0,0],data[t0,1],s=50,facecolor=fc,color=color,**kwargs)
for t in np.arange(t0+1,T):
x,y = X[t-1,:]
dx,dy = dX[t-1,:]
if t == T-1:
headwidth = hdw
head_length = hdw * 1.5
elif t in arrow_tick_steps:
headwidth = hdw
head_length = hdw * 0.15
else:
headwidth = 0.
head_length = 0.
if dx != 0 or dy != 0:
if ploted_lab:
p = patches.FancyArrow(x, y, dx, dy,facecolor=color,edgecolor=fc,head_width=headwidth,head_length=head_length,**kwargs)
else:
ploted_lab = True
p = patches.FancyArrow(x, y, dx, dy,facecolor=color,edgecolor=fc,head_width=headwidth,head_length=head_length,label=lab,**kwargs)
ax.add_patch(p)
def hide_bottom_axis(ax):
ax.spines['bottom'].set_visible(False)
ax.set_xticklabels([])
ax.get_xaxis().set_visible(False)
| 27.44186
| 145
| 0.608898
|
4e3b4e1f5ab08a1a154b819ffbe3424f853569f3
| 15,430
|
py
|
Python
|
scenarios/application_performance_scalabilty.py
|
jpavlav/cbtool
|
ed1e65dd50aa76fc6388999b36f1305c23b708d8
|
[
"Apache-2.0"
] | 69
|
2015-01-29T14:19:57.000Z
|
2021-12-05T13:21:37.000Z
|
scenarios/application_performance_scalabilty.py
|
jpavlav/cbtool
|
ed1e65dd50aa76fc6388999b36f1305c23b708d8
|
[
"Apache-2.0"
] | 132
|
2015-01-07T21:39:11.000Z
|
2022-01-28T19:12:18.000Z
|
scenarios/application_performance_scalabilty.py
|
jpavlav/cbtool
|
ed1e65dd50aa76fc6388999b36f1305c23b708d8
|
[
"Apache-2.0"
] | 50
|
2015-02-10T15:53:22.000Z
|
2022-01-13T06:22:10.000Z
|
#!/usr/bin/env python
#/*******************************************************************************
# Copyright (c) 2012 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#/*******************************************************************************
'''
This is scenario exemplifies how to use CloudBench to ascertain application scalability.
At the moment, this is heavily tailored for a simulated cloud (the main goal of
this code is exemplify the use of APIs in long range (several hours to days) and
large scale (thousands of VMs) experiments.
This assumes you have already attached to a cloud through the GUI or CLI.
'''
import itertools
import fnmatch
import json
import os
import pwd
import redis
import prettytable
from sys import path, argv
from time import sleep, time
from optparse import OptionParser
from common import *
def set_provisioning_sla(options, api, workload_attrs, slaprov) :
'''
TBD
'''
for _role in workload_attrs["role_list"].split(',') :
api.typealter(options.cloud_name, options.workload, _role + "_sla_provisioning_target", str(slaprov))
return True
def set_runtime_sla(options, api, workload_attrs, metric, value) :
'''
TBD
'''
if metric.count("latency") or metric.count("_time") :
_limit = "lt"
elif metric.count("bandwidth") or metric.count("throughput") :
_limit = "gt"
else :
_limit = "gt"
api.typealter(options.cloud_name, options.workload, "sla_runtime_target_" + metric, str(value) + '-' + _limit)
return True
def subscribe(options, api, channel) :
'''
TBD
'''
_obj_stor_attr = api.waiton(options.cloud_name,"VM",channel,"getsubscription",1)
redis_conn = redis.Redis(host = _obj_stor_attr["host"], port = 6379, db = _obj_stor_attr["dbid"], decode_responses=True)
redis_conn_pubsub = redis_conn.pubsub()
redis_conn_pubsub.subscribe(_obj_stor_attr["subscription"])
_msg = "Will wait for messages published by VMs..."
print _msg
for message in redis_conn_pubsub.listen() :
if isinstance(message["data"], str) :
_msg = "Message detected, getting statistics from CB"
print _msg
_stats = api.stats(options.cloud_name)
return _stats
def check_stopping_conditions(options, \
api, \
stats, \
minimum_ais,
failed_vms_pct, \
sla_provisioning_violated_vms_pct, \
sla_runtime_violated_vms_pct, \
app_errors_vms_pct, \
cumulative_run_errors_pct) :
'''
TBD
'''
_stopping_condition = False
print "Checking stopping conditions"
_exp_counters = stats["experiment_counters"]
print "TOTAL AIs: " + _exp_counters["AI"]["arrived"]
if int(_exp_counters["AI"]["arrived"]) < minimum_ais:
print "Do not check stopping conditions with less than " + str(minimum_ais) + " AIs."
return _stopping_condition
print "TOTAL VMs: " + _exp_counters["VM"]["arrived"]
print "FAILED VMs: " + _exp_counters["VM"]["failed"]
print "FAILED VMs stopping condition: ",
_failed_vms_pct = float(_exp_counters["VM"]["failed"])/float(_exp_counters["VM"]["arrived"])
if _failed_vms_pct >= failed_vms_pct :
print " YES ",
_stopping_condition = True
else :
print " NO ",
print " (target " + str(failed_vms_pct*100) + "%, actual " + str(_failed_vms_pct * 100) + "%)"
print "SLA Provisioning violated VMs stopping condition: ",
_sla_provisioning_violated_vms_pct = float(_exp_counters["VM"]["sla_provisioning_violated"]) / float(_exp_counters["VM"]["arrived"])
if _sla_provisioning_violated_vms_pct >= sla_provisioning_violated_vms_pct :
print " YES ",
_stopping_condition = True
else :
print " NO ",
print " (target " + str(sla_provisioning_violated_vms_pct*100) + "%, actual " + str(_sla_provisioning_violated_vms_pct * 100) + "%)"
print "SLA Runtime violated VMs stopping condition: ",
# EXTREMELY IMPORTANT. Here we are relying heavily on the fact that (typically)
# only one VM (the load generator VM) computes SLA RUNTIME violations
_sla_runtime_violated_vms_pct = float(_exp_counters["VM"]["sla_runtime_violated"]) / float(_exp_counters["AI"]["arrived"])
if _sla_runtime_violated_vms_pct >= sla_runtime_violated_vms_pct :
print " YES ",
_stopping_condition = True
else :
print " NO ",
print " (target " + str(sla_runtime_violated_vms_pct*100) + "%, actual " + str(_sla_runtime_violated_vms_pct * 100) + "%)"
print "App Errors VMs stopping condition: ",
# EXTREMELY IMPORTANT. Here we are relying heavily on the fact that (typically)
# only one VM (the load generator VM) computes (APP)ERRORS violations
_app_errors_vms_pct = float(_exp_counters["VM"]["app_errors"]) / float(_exp_counters["AI"]["arrived"])
if _app_errors_vms_pct >= app_errors_vms_pct :
print " YES ",
_stopping_condition = True
else :
print " NO ",
print " (target " + str(app_errors_vms_pct * 100) + "%, actual " + str(_app_errors_vms_pct * 100) + "%)"
print "Cumulative App Errors stopping condition: ",
_total_runs = 0
_total_run_errors = 0
# Get data for ALL AIs currently running with a single API call.
for _ai_metrics in api.get_performance_data(options.cloud_name, None, metric_class = "runtime", object_type = "VM", metric_type = "app", latest = True) :
if "app_load_id" in _ai_metrics :
if "val" in _ai_metrics["app_load_id"] :
_total_runs += int(_ai_metrics["app_load_id"]["val"])
if "app_errors" in _ai_metrics :
if "acc" in _ai_metrics["app_errors"] :
_total_run_errors += int(_ai_metrics["app_errors"]["acc"])
if float(_total_runs):
_cumulative_run_errors_pct = float(_total_run_errors) / float(_total_runs)
else :
_cumulative_run_errors_pct = 0
if _cumulative_run_errors_pct >= cumulative_run_errors_pct :
print " YES ",
_stopping_condition = True
else :
print " NO ",
print " (target " + str(cumulative_run_errors_pct*100) + "%, actual " + str(_cumulative_run_errors_pct * 100) + "%)"
return _stopping_condition
def cli_postional_argument_parser() :
'''
TBD
'''
_usage = "./" + argv[0] + " <cloud_name> <experiment id> <vapp type> <vapps pattern>"
options, args = cli_named_option_parser()
if len(argv) < 4 :
print _usage
exit(1)
options.cloud_name = argv[1]
options.experiment_id = argv[2]
options.workload = argv[3]
options.pattern = argv[4]
print '#' * 5 + " cloud name: " + str(options.cloud_name)
print '#' * 5 + " experiment id: " + str(options.experiment_id)
print '#' * 5 + " workload: " + str(options.workload)
print '#' * 5 + " pattern: " + str(options.pattern)
return options
def main() :
'''
TDB
'''
error = False
_channel = "EXPERIMENT"
_start = int(time())
options = cli_postional_argument_parser()
api = connect_to_cb(options.cloud_name)
_msg = "Setting expid \"" + options.experiment_id + "\""
print _msg
api.expid(options.cloud_name, options.experiment_id)
_msg = "Obtaining Virtual Application attributes"
print _msg
workload_attrs = api.typeshow(options.cloud_name, options.workload)
_msg = "Setting SLA (provisioning and runtime) targets"
print _msg
# Lets assume that these target SLAs were already obtained by any other mean
set_provisioning_sla(options, api, workload_attrs, 5)
set_runtime_sla(options, api, workload_attrs, "bandwidth", 1000)
set_runtime_sla(options, api, workload_attrs, "throughput", 150)
set_runtime_sla(options, api, workload_attrs, "latency", 210)
_msg = "Setting application status stickyness (i.e., once an AI reports an "
_msg += "error, permanently add this AI to the \"AI in Error\" list)"
print _msg
api.cldalter(options.cloud_name, "vm_defaults", "sticky_app_status", "true")
_msg = "Instructing VMs to publish messages whenever they arrive, update app"
_msg += " metrics or depart)"
print _msg
api.cldalter(options.cloud_name, "vm_defaults", "notification", "true")
api.cldalter(options.cloud_name, "vm_defaults", "notification_channel", _channel)
_iait = "unformIxIxI300I600"
# ------------------ START SIMULATED CLOUD ONLY ----------------------------
# Since we are focusing on a simulated cloud, lets make sure the each new
# simulated AI will create its own performance emitter.
api.cldalter(options.cloud_name, "ai_defaults", "create_performance_emitter", "true")
# ------------------- END SIMULATED CLOUD ONLY -----------------------------
# We will now attach a new Virtual Application Submitter (VAppS).
# Important, the "pattern name" must refer to an EXISTING VAppS. If needed,
# please add on to your private configuration file befor executing the
# cloud attachment through the CLI. See below an illustrative example,
# which assumes that Virtual Applications of the type "nullworkload" will
# be created.
# [AIDRS_TEMPLATES : SIMPLENW]
# TYPE = nullworkload
# MAX_AIS = 8000
# IAIT = uniformIXIXI60I180
# LOAD_LEVEL = uniformIXIXI1I3
# LOAD_DURATION = uniformIXIXI40I60
# LIFETIME = uniformIXIXI200I300
# ------------------ START SIMULATED CLOUD ONLY ----------------------------
# Again, given our focus on simulated clouds, lets "accelerate" both the
# VApp (by producing more performance samples per unit of time) and the
# VApp Submmiter (by dispatching a new VApp every 20 seconds)
api.typealter(options.cloud_name, options.workload, "load_duration", "5")
_iait="20"
# ------------------- END SIMULATED CLOUD ONLY -----------------------------
_msg = "Setting Virtual Application Submmiter inter-arrival time to " + str(_iait)
_msg += " seconds."
print _msg
api.patternalter(options.cloud_name, options.pattern, "iait", _iait)
_msg = "Setting Virtual Application Submmiter (AI) lifetime time to 10000000000000"
_msg += " seconds."
print _msg
api.patternalter(options.cloud_name, options.pattern, "lifetime", "10000000000000")
# ------------------ START SIMULATED CLOUD ONLY ----------------------------
# In the case of simulated clouds (and ONLY for simulated clouds) the time
# to boot is controlled by the parameter "CHECK_BOOT_COMPLETE". Lets make
# sure that the deployment time for these VMs stay well within the defined
# SLA for provisioning (defined in the function set_provisioning_sla).
api.cldalter(options.cloud_name, "vm_defaults", "check_boot_complete", "wait_for_0")
# Lets also set "good" (i.e., well within SLA, as defined in the "set_runtime_sla"
# functions. Again, keep in mind that these are needed for "Simulated" clouds
# only.
api.typealter(options.cloud_name, options.workload, "bandwidth_value", "uniformIXIXI1200I1500")
api.typealter(options.cloud_name, options.workload, "throughput_value", "uniformIXIXI200I330")
api.typealter(options.cloud_name, options.workload, "latency_value", "uniformIXIXI100I150")
api.typealter(options.cloud_name, options.workload, "errors_value", "0")
_change_ai_template = True
# ------------------- END SIMULATED CLOUD ONLY -----------------------------
_msg = "\nAttaching Virtual Application submmiter\n"
print _msg
api.appdrsattach(options.cloud_name, options.pattern)
_stop = False
while not _stop:
# Now we wait, and check whenever the number of AIs in "ARRIVING" state
# is equal zero. The counter state is updated every 10 seconds, waiting
# at the most 1 minute.
_check_interval = 10
_max_check = 60
'''
_msg = "Waiting until the counter \"AI ARRIVING\" is equal zero, waiting "
_msg += str(_check_interval) + " seconds between updates, up to "
_msg += str(_max_check) + " seconds."
print _msg
_counters = api.waituntil(options.cloud_name, \
"AI", \
"ARRIVING", \
0, \
"decreasing", \
_check_interval, \
_max_check)
'''
_counters = subscribe(options, api, _channel)
_min_ais = 3
# ------------------ START SIMULATED CLOUD ONLY ----------------------------
# We want the cloud to misbehave after a certain number of AIs are present.
# There are multiple possibilities for errors.
if int(_counters["experiment_counters"]["AI"]["arrived"]) > _min_ais + 2 and _change_ai_template :
_msg = "\nChanging Virtual Application defaults in order to force the"
_msg += " the reaching of an stopping condition\n"
print _msg
# SLA provisioning violation
#api.cldalter(options.cloud_name, "vm_defaults", "check_boot_complete", "wait_for_7")
# SLA runtime violation
api.typealter(options.cloud_name, options.workload, "bandwidth_value", "uniformIXIXI500I600")
# App Errors VMs
#api.typealter(options.cloud_name, workload, "errors_value", "1")
_change_ai_template = False
# ------------------- END SIMULATED CLOUD ONLY -----------------------------
_stop = check_stopping_conditions(options, \
api, \
_counters, \
_min_ais,
0.05, \
0.06, \
0.02, \
0.04, \
0.1)
_msg = "Total experiment time is " + str(int(time()) - _start) + " seconds... \n\n"
print _msg
_msg = "\nDetaching Virtual Application submmiter\n"
print _msg
api.appdrsdetach(options.cloud_name, "all")
_msg = "\nDetaching all Virtual Applications\n"
print _msg
api.appdetach(options.cloud_name, "all")
if __name__ == '__main__':
main()
| 39.66581
| 157
| 0.607129
|
aedc647aff36e47ca57f42620fd2dd1123b74bc0
| 1,004
|
py
|
Python
|
server.py
|
objectc/smart_remote
|
86a71c76fd04461e3071b285eb3683f643902aaf
|
[
"MIT"
] | 1
|
2020-08-16T15:41:04.000Z
|
2020-08-16T15:41:04.000Z
|
server.py
|
objectc/smart_remote
|
86a71c76fd04461e3071b285eb3683f643902aaf
|
[
"MIT"
] | null | null | null |
server.py
|
objectc/smart_remote
|
86a71c76fd04461e3071b285eb3683f643902aaf
|
[
"MIT"
] | 1
|
2021-03-01T11:13:25.000Z
|
2021-03-01T11:13:25.000Z
|
import logging
import os
from flask import Flask
from flask import request
from flask import make_response
from flask import abort, jsonify
from flask import Flask
app = Flask(__name__)
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
COMMANDS = {
'POWER': 'POWER',
'COOL': 'COOL',
'SPEED_UP': 'SPEED_UP',
'SPEED_DOWN': 'SPEED_DOWN',
'TEMP_UP': 'TEMP_UP',
'TEMP_DOWN': 'TEMP_DOWN',
'TIMER': 'TIMER',
'ROTATE': 'ROTATE',
'NARROW': 'NARROW',
'WIDE': 'WIDE'
}
@app.route("/dyson", methods=['GET'])
def getDyson():
command = request.args.get('command')
if command:
if command in COMMANDS.keys():
command_str = 'irsend send_once Dyson ' + COMMANDS[command]
os.system(command_str)
resData = {'msg': 'success'}
response = jsonify(resData)
return response
return jsonify({'msg': 'no commands found'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| 23.904762
| 71
| 0.624502
|
9760fdeba3e5299a32a8bb669cbd50617014b41e
| 1,789
|
py
|
Python
|
src/svs/assert_hint.py
|
inacademia-development/svs
|
a6c91d42b5abd09a7781a417cd26127fd2fb6b01
|
[
"Apache-2.0"
] | 1
|
2018-01-30T07:36:02.000Z
|
2018-01-30T07:36:02.000Z
|
src/svs/assert_hint.py
|
InAcademia/svs
|
a6c91d42b5abd09a7781a417cd26127fd2fb6b01
|
[
"Apache-2.0"
] | 10
|
2020-01-21T15:02:49.000Z
|
2020-10-15T08:26:27.000Z
|
src/svs/assert_hint.py
|
inacademia-development/svs
|
a6c91d42b5abd09a7781a417cd26127fd2fb6b01
|
[
"Apache-2.0"
] | 1
|
2018-08-02T12:31:22.000Z
|
2018-08-02T12:31:22.000Z
|
"""
Micro Service that asserts the requested idp_hint
"""
import hashlib
import logging
from satosa.internal_data import InternalResponse
from satosa.micro_services.base import ResponseMicroService
logger = logging.getLogger('satosa')
def inacademia_hinting_hash(data):
"""
Hash data the same way this is done in the inacademia-hinting code.
This code should not be changed on its own - if needed, it should be
changed in-sync with the inacademia-hinting code.
"""
raw = data.encode("utf-8") if isinstance(data, str) else data
hash = hashlib.sha1(raw).hexdigest()
return hash
class AssertHint(ResponseMicroService):
"""
idp_hint asserting micro_service
"""
def __init__(self, config, internal_attributes, *args, **kwargs):
super().__init__(*args, **kwargs)
self.internal_attribute = config.get('internal_attribute', 'idp_used')
logger.info(f"AssertHint micro_service is active {self.internal_attribute}")
def process(self, context, internal_response):
idp_hint_key = context.state['InAcademia'].get('idp_hint_key', None)
fresh_idp_hint_key = context.state['InAcademia'].get('fresh_idp_hint_key', None)
logger.debug(f"AssertHint requested idp_hint: {idp_hint_key}, fresh_idp_hint: {fresh_idp_hint_key}")
if fresh_idp_hint_key is not None:
issuer = internal_response.auth_info.issuer
logger.info(f"AssertHint issuer: {issuer}")
issuer_hash = inacademia_hinting_hash(issuer)
#logger.info(f"AssertHint issuer hash: {issuer_hash}")
if issuer_hash == fresh_idp_hint_key:
internal_response.attributes[self.internal_attribute] = [idp_hint_key]
return super().process(context, internal_response)
| 37.270833
| 108
| 0.708776
|
866a91a2a61280a89bd4dbe72f158da89c1622c4
| 6,040
|
py
|
Python
|
instagrapi/types.py
|
jhd3197/instagrapi
|
cf05591ba8397856871ce7daae5458cfc4f527da
|
[
"MIT"
] | 1
|
2020-12-29T12:24:03.000Z
|
2020-12-29T12:24:03.000Z
|
instagrapi/types.py
|
jhd3197/instagrapi
|
cf05591ba8397856871ce7daae5458cfc4f527da
|
[
"MIT"
] | null | null | null |
instagrapi/types.py
|
jhd3197/instagrapi
|
cf05591ba8397856871ce7daae5458cfc4f527da
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel, FilePath, HttpUrl
class Resource(BaseModel):
pk: int
video_url: Optional[HttpUrl] # for Video and IGTV
thumbnail_url: HttpUrl
media_type: int
class User(BaseModel):
pk: int
username: str
full_name: str
is_private: bool
profile_pic_url: HttpUrl
is_verified: bool
media_count: int
follower_count: int
following_count: int
biography: Optional[str] = ""
external_url: Optional[HttpUrl]
is_business: bool
class Account(BaseModel):
pk: int
username: str
full_name: str
is_private: bool
profile_pic_url: HttpUrl
is_verified: bool
biography: Optional[str] = ""
external_url: Optional[HttpUrl]
is_business: bool
birthday: Optional[str]
phone_number: Optional[str]
gender: Optional[int]
email: Optional[str]
class UserShort(BaseModel):
pk: int
username: Optional[str]
full_name: Optional[str] = ""
profile_pic_url: Optional[HttpUrl]
# is_private: bool
# is_verified: bool
stories: List = []
class Usertag(BaseModel):
user: UserShort
x: float
y: float
class Location(BaseModel):
pk: Optional[int]
name: str
address: Optional[str] = ""
lng: Optional[float]
lat: Optional[float]
external_id: Optional[int]
external_id_source: Optional[str]
precision: Optional[int]
# address_json: Optional[dict] = {}
# profile_pic_url: Optional[HttpUrl]
# directory: Optional[dict] = {}
class Media(BaseModel):
pk: int
id: str
code: str
taken_at: datetime
media_type: int
product_type: Optional[str] = "" # igtv or feed
thumbnail_url: Optional[HttpUrl]
location: Optional[Location] = None
user: UserShort
comment_count: int
like_count: int
has_liked: Optional[bool]
caption_text: str
usertags: List[Usertag]
video_url: Optional[HttpUrl] # for Video and IGTV
view_count: Optional[int] = 0 # for Video and IGTV
video_duration: Optional[float] = 0.0 # for Video and IGTV
title: Optional[str] = ""
resources: List[Resource] = []
class MediaOembed(BaseModel):
title: str
author_name: str
author_url: str
author_id: int
media_id: str
provider_name: str
provider_url: HttpUrl
type: str
width: Optional[int] = None
height: Optional[int] = None
html: str
thumbnail_url: HttpUrl
thumbnail_width: int
thumbnail_height: int
can_view: bool
class Collection(BaseModel):
id: str
name: str
type: str
media_count: int
class Comment(BaseModel):
pk: int
text: str
user: UserShort
created_at_utc: datetime
content_type: str
status: str
has_liked: Optional[bool]
like_count: Optional[int]
class Hashtag(BaseModel):
id: int
name: str
media_count: Optional[int]
profile_pic_url: Optional[HttpUrl]
class StoryMention(BaseModel):
user: UserShort
x: Optional[float]
y: Optional[float]
width: Optional[float]
height: Optional[float]
class StoryHashtag(BaseModel):
hashtag: Hashtag
x: Optional[float]
y: Optional[float]
width: Optional[float]
height: Optional[float]
class StoryLocation(BaseModel):
location: Location
x: Optional[float]
y: Optional[float]
width: Optional[float]
height: Optional[float]
class StorySticker(BaseModel):
id: str
type: Optional[str] = 'gif'
x: float
y: float
z: Optional[int] = 1000005
width: float
height: float
rotation: Optional[float] = 0.0
class StoryBuild(BaseModel):
mentions: List[StoryMention]
path: FilePath
class StoryLink(BaseModel):
webUri: HttpUrl
class Story(BaseModel):
pk: int
id: str
code: str
taken_at: datetime
media_type: int
product_type: Optional[str] = ""
thumbnail_url: Optional[HttpUrl]
user: UserShort
video_url: Optional[HttpUrl] # for Video and IGTV
video_duration: Optional[float] = 0.0 # for Video and IGTV
mentions: List[StoryMention]
links: List[StoryLink]
hashtags: List[StoryHashtag]
locations: List[StoryLocation]
stickers: List[StorySticker]
class DirectMessage(BaseModel):
id: int # e.g. 28597946203914980615241927545176064
user_id: Optional[int]
thread_id: Optional[int]
timestamp: datetime
item_type: Optional[str]
is_shh_mode: Optional[bool]
reactions: Optional[dict]
text: Optional[str]
media_share: Optional[Media]
reel_share: Optional[dict]
story_share: Optional[dict]
felix_share: Optional[dict]
placeholder: Optional[dict]
class DirectResponse(BaseModel):
unseen_count: Optional[int]
unseen_count_ts: Optional[int]
status: Optional[str]
class DirectThread(BaseModel):
pk: int # thread_v2_id, e.g. 17898572618026348
id: int # thread_id, e.g. 340282366841510300949128268610842297468
messages: List[DirectMessage]
users: List[UserShort]
inviter: UserShort
left_users: List[UserShort]
admin_user_ids: list
last_activity_at: datetime
muted: bool
is_pin: bool
named: bool
canonical: bool
pending: bool
archived: bool
thread_type: str
thread_title: str
folder: int
vc_muted: bool
is_group: bool
mentions_muted: bool
approval_required_for_new_members: bool
input_mode: int
business_thread_folder: int
read_state: int
is_close_friend_thread: bool
assigned_admin_id: int
shh_mode_enabled: bool
last_seen_at: dict
def is_seen(self, user_id: int):
"""Have I seen this thread?
:param user_id: You account user_id
"""
user_id = str(user_id)
own_timestamp = int(self.last_seen_at[user_id]["timestamp"])
timestamps = [
(int(v["timestamp"]) - own_timestamp) > 0
for k, v in self.last_seen_at.items()
if k != user_id
]
return not any(timestamps)
| 22.706767
| 70
| 0.671026
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.