hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4506aa49b0d0ef8fc0b5286ac4ac7e1abf6f84cf
| 23,526
|
py
|
Python
|
CIG96-HI-cal/cal.cig96_28jul13b.py
|
AMIGA-IAA/deep-obs
|
1526647f9949fbdfd437e68d037c8ea0ce5053fe
|
[
"MIT"
] | null | null | null |
CIG96-HI-cal/cal.cig96_28jul13b.py
|
AMIGA-IAA/deep-obs
|
1526647f9949fbdfd437e68d037c8ea0ce5053fe
|
[
"MIT"
] | null | null | null |
CIG96-HI-cal/cal.cig96_28jul13b.py
|
AMIGA-IAA/deep-obs
|
1526647f9949fbdfd437e68d037c8ea0ce5053fe
|
[
"MIT"
] | null | null | null |
# -*- coding: iso-8859-1 -*-
### EVLA DATA REDUCTION
### Project: 13A-341
### Dataset date: 28jul13b
### Original dataset name: 13A-341.sb24028522.eb24167262.56501.354832060184.ms
### Renamed as: cig96_11.ms
###
### Configuration: C (6/10)
# ===============================================================================
### CALIBRATION ###
# ===============================================================================
### Import of EVLA data from SDM format:
########################################
# importevla(asdm='cig96_11', vis='cig96_11.ms')
# Original data file gets now CASA format and a new name: "cig96_11.ms"
# ===============================================================================
### Listobs inspection:
#######################
listobs(vis='cig96_11.ms')
# Listobs output summary: 27 antennae, 9 spectral windows, 2 polarizations (RR, LL), 1 dummy scan, 3 sources (2 calibrators + 1 target), 2048 channels of 7.8 kHz each for spw=0
# Spectral Window 0 (line):
# SpwID Name #Chans Frame Ch0(MHz) ChanWid(kHz) TotBW(kHz) BBC Num Corrs
# 0 A0C0#0 2048 TOPO 1404.995 7.812 16000.0 12 RR LL
# Sources: 3
# ID Code Name RA Decl Epoch SrcId nRows
# 0 NONE 0137+331=3C48 01:37:41.299431 +33.09.35.13299 J2000 0 28431
# 1 K 0137+331=3C48 01:37:41.299431 +33.09.35.13299 J2000 1 338013
# 2 D J0238+1636 02:38:38.930108 +16.36.59.27470 J2000 2 202176
# 3 NONE CIG 96 02:15:27.600000 +06.00.09.00001 J2000 3 1658475
### BANDPASS and FLUX calibrator: 1 0137+331 = 3C48
### PHASE calibrator: 2 J0238+1636
### TARGET: 3 CIG 96
# ===============================================================================
### Data inspection via plotms:
###############################
# Prior to anything else, there is dummy scan, field=0, so we remove it:
# flagdata(vis='cig96_11.ms', mode='manual', field='0')
# We run plotms to inspect the general aspect of the data:
#plotms(vis='cig96_11.ms',xaxis='channel',yaxis='amp',field='1',spw='0',coloraxis='field',avgtime='1e9')
#plotms(vis='cig96_11.ms',xaxis='time',yaxis='amp',field='1',spw='0',coloraxis='field',avgchannel='1e9')
# ===============================================================================
# Antenna position correction:
##############################
gencal(vis='cig96_11.ms', caltable='cig96_11.ms.antpos', caltype='antpos')
# ===============================================================================
### Plot with the spatial distribution of the antennae:
#######################################################
plotants(vis='cig96_11.ms',figfile='cig96_11.ms.plotants.png')
# ===============================================================================
### Flagging:
#############
# Log file provides with some insight of the antennae status, corruption, etc.:
# Antenna(s) 19 (Data: Corrupted):
# L-band visibility amplitudes below normal due to receiver LNA problem.
# so we flag it.
flagdata(vis='cig96_11.ms', mode='manual', field='', antenna='ea19', flagbackup=True)
### Shadowing correction over the whole .ms:
flagdata(vis='cig96_11.ms', mode='shadow', flagbackup=True)
# Percentage of data flagged in table selection: %
### Zero clipping correction over the whole .ms:
flagdata(vis='cig96_11.ms', mode='clip', clipzeros=True, flagbackup=True)
# Percentage of data flagged in table selection: 0.0145235%
### Field 1 (bandpass and flux calibrator): quacking of the first miuntes (all antennas showing low amplitudes):
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='3&5', correlation='LL', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='3&13', correlation='LL', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='3&15', correlation='LL', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='5&15', correlation='LL', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='5&13', correlation='LL', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='13&15', correlation='LL', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='11&22', correlation='LL', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='6&10', correlation='RR', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='5&6', correlation='RR', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='6&11', correlation='RR', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='6&19', correlation='RR', flagbackup=True)
flagdata(vis='cig96_11.ms', mode='manual', scan='2', antenna='7&23', correlation='RR', flagbackup=True)
# There are 2 points around 0.5 that could be clipped but the command does not do what we think it should:
# flagdata(vis='cig96_11.ms', mode='clip', field='1', spw='0:354~355', clipminmax=[0.00, 0.40], clipoutside=True, clipzeros=False, flagbackup=True)
# Decission: we let them be.
### Field 2 (phase calib.): needs quacking, and anteannae 3 and 5 in corr LL need removal, they show some high values:
# We could remove the upper amplitude values:
# flagdata(vis='cig96_11.ms', mode='manual', field='2', antenna='3&5', correlation='LL', flagbackup=True)
# Decission: we let them be.
# Quacking needed:
flagdata(vis='cig96_11.ms', mode='quack', field='2', quackinterval=5.0, quackmode='beg')
# it has the RFI in channel ~350:
flagdata(vis='cig96_11.ms', mode='manual', field='2', spw = '0:330~380')
# and a small one in ~1250:
flagdata(vis='cig96_11.ms', mode='manual', field='2', antenna='4&8', spw = '0:1253')
# and the MW HI emission in ~1960:
flagdata(vis='cig96_11.ms', mode='manual', field='2', spw = '0:1940~1975')
### Field 3 (CIG96) flagging:
# quacking of the first 10 seconds of the rest of the scans:
flagdata(vis='cig96_11.ms', mode='quack', field='3', quackinterval=10.0, quackmode='beg')
# same RFI in ~350 and MW emission as in field 2; no RFI in ~1250:
flagdata(vis='cig96_11.ms', mode='manual', field='3', spw = '0:330~380')
flagdata(vis='cig96_11.ms', mode='manual', field='3', spw = '0:1940~1975')
# Enough flagging for now.
# ===============================================================================
### Calibration:
### ============
### Reference antenna selection:
################################
# After checking plotants, we select this one because it is quite central:
refant='ea10'
# ===============================================================================
### Opacity and gaincurve corrections:
######################################
# Should we do opacities correction?
# myTau = plotweather(vis='cig96_11.ms', doPlot=T)
# display cig96_11.ms.plotweather.png
# gencal(vis='cig96_11.ms', caltype='opac', caltable='cig96_11.ms.opcac', parameter=myTau)
# ANSWER: no, not needed for HI observations, the atmosphere barely has any influence with HI. Only ionosphere in very flat elevations and at dawn/sunset.
# Should we do gaincurve correction?
# Takes into account the elevations of each antenn, see elevations plot: CIG96 and the phase calibrator have a large elevation range.
# gencal(vis='cig96_11.ms', caltype='gceff', caltable='cig96_11.ms.gaincurve')
# ANSWER: no, it might even harm the observations due to the possible crosstalk at low elevations and the model it is based on.
# ===============================================================================
### Delays correction:
######################
# Prior to the bandpass correction, we need to correct the phase variations with time (they are large).
# We use the bandpass calibrator in field 1 with the antenna position correction table:
gaincal(vis='cig96_11.ms', caltable='cig96_11.ms.delays', field='1', refant='ea10', gaintype='K', gaintable=['cig96_11.ms.antpos'])
# ===============================================================================
### Flux density:
#################
# Our flux calibrator is 3C48, in field = 1.
#
# First, we check the list of available models:
setjy(vis='cig96_11.ms', listmodels=T)
# Candidate modimages (*) at pepino (dae66) in path:
# /Applications/CASA.app/Contents/data/nrao/VLA/CalModels/
#
# Candidate modimages (*) at NRAO local workstations in path:
# /home/casa/packages/RHEL5/release/casapy-42.1.29047-001-1-64b/data/nrao/VLA/CalModels/
#
# The model chosen has to be in accordance with the calibrator and band selected: 3C48 in L band:
setjy(vis='cig96_11.ms', field='1', modimage='/mnt/scops/data/data/paramimo/casapy-42.2.30986-1-64b/data/nrao/VLA/CalModels/3C48_L.im')
# ===============================================================================
### Bandpass calibration:
#########################
# For the bandpass and flux (field=1) and phase (field=2) calibrators we use solution interval
# time of solint='5s':
gaincal(vis='cig96_11.ms', caltable='cig96_11.ms.bpphase5s', field='1', refant='ea10', calmode='p', solint='5s', minsnr=5.0, gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays'])
# The solution interval of 5 seconds has been calculated following with the VLA exp.time
# calculator using the parameters:
#
# Freq. = 1.42 GHz
# Medium elevation (summer time)
# Bandwith freq. = 16 MHz (see listobs)
# RMS noise = 1.5 mJy
#
# The calculator estimates less than one second (0.2s) is enough to get such SNR or even higher
# so we set solint=5s since 5s is the shortest integration time of our data. This should mean that
# there should be a solution for all the intervals.
# We see the phase VS time figures to see that the phase is now waaayyy flatter:
#plotcal(caltable='cig96_11.ms.bpphase5s', xaxis='time', yaxis='phase')
# Apply phase solutions on the fly:
bandpass(vis='cig96_11.ms', caltable='cig96_11.ms.bandpass5s', field='1', refant='ea10', solint='inf', solnorm=T, minsnr=3.0, minblperant=3, gaintable=['cig96_11.ms.bpphase5s', 'cig96_11.ms.antpos', 'cig96_11.ms.delays'], interp=['nearest'])
# We check again the solutions:
#plotcal(caltable='cig96_11.ms.bpphase5s', field='1', xaxis='time', yaxis='phase')
# Difference in phase is less, phase shows a flatter behaviour now.
# ===============================================================================
### Phase and amplitude calibration:
####################################
# Phase calibration for the calibrators, fields 1 and 2 (spw 0, where the line is):
gaincal(vis='cig96_11.ms',caltable='cig96_11.ms.intphase', field='1,2', refant='ea10', calmode='p', solint='5s', minsnr=5.0, gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays', 'cig96_11.ms.bandpass5s'])
# We create another calibration table, using solint='inf', i.e., finding one solution over the whole scan,
# to use FOR THE TARGET later on:
# time.sleep(5)
gaincal(vis='cig96_11.ms', caltable='cig96_11.ms.scanphase', field='1,2', refant='ea10', calmode='p', solint='inf', minsnr=5.0, gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays', 'cig96_11.ms.bandpass5s'])
# Derive amplitude solutions:
# time.sleep(5)
gaincal(vis='cig96_11.ms', caltable='cig96_11.ms.amp', field='1', refant='ea10', calmode='ap', solint='inf', minsnr=5.0, gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays', 'cig96_11.ms.bandpass5s', 'cig96_11.ms.intphase'], gainfield=['','','','1'],append=True)
gaincal(vis='cig96_11.ms', caltable='cig96_11.ms.amp', field='2', refant='ea10', calmode='ap', solint='inf', minsnr=5.0, gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays', 'cig96_11.ms.bandpass5s', 'cig96_11.ms.intphase'], gainfield=['','','','2'],append=True)
# I check the tables with plotcal and some things do not look as expected: the first data show very
# low intensity compares to the rest and some graphs show only one point:
# #plotcal(caltable='cig96_11.ms.amp', xaxis='time', yaxis='amp', iteration='antenna', subplot=331)
# #plotcal(caltable='cig96_11.ms.amp', xaxis='time', yaxis='amp', iteration='baseline', subplot=331)
# Now I derive the flux for the rest of the sources. Note that the flux table REPLACES the amp.gcal
# in terms of future application of the calibration to the data, (i.e., it's not an incremental table)
# UNLESS we set incremental=T (as of CASA 4.0):
myflux = fluxscale(vis='cig96_11.ms', caltable='cig96_11.ms.amp', fluxtable='cig96_11.ms.flux', reference='1', transfer='2', incremental=False)
# Result:
# Flux density for J0238+1636 in SpW=0 (freq=1.40523e+09 Hz) is: 0.736132 +/- 0.00357112 (SNR = 206.135, N = 52)
# ===============================================================================
### Application of the calibration to the .ms file:
###################################################
# Note: In all applycal steps we set calwt=F. It is very important to turn off this parameter which
# determines if the weights are calibrated along with the data. Data from antennae with better receiver
# performance and/or longer integration times should have higher weights, and it can be advantageous to
# factor this information into the calibration. During the VLA era, meaningful weights were available for
# each visibility. However, at the time of this observation, the VLA was not yet recording the information
# necessary to calculate meaningful weights. Since these data weights are used at the imaging stage you
# can get strange results from having calwt=T when the input weights are themselves not meaningful,
# especially for self-calibration on resolved sources (your flux calibrator and target, for example).
applycal(vis='cig96_11.ms', field='1', gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays', 'cig96_11.ms.bandpass5s', 'cig96_11.ms.intphase', 'cig96_11.ms.flux'], gainfield=['','','','1',''], calwt=F)
# time.sleep(5)
applycal(vis='cig96_11.ms', field='2', gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays', 'cig96_11.ms.bandpass5s', 'cig96_11.ms.intphase', 'cig96_11.ms.flux'], gainfield=['','','','2',''], calwt=F)
# time.sleep(5)
# For the target sources we use the scanphase.gcal table:
applycal(vis='cig96_11.ms', field='3', gaintable=['cig96_11.ms.antpos', 'cig96_11.ms.delays', 'cig96_11.ms.bandpass5s', 'cig96_11.ms.scanphase', 'cig96_11.ms.flux'], gainfield=['','','','2',''], calwt=F)
# ===============================================================================
### Regridding of the .ms to a new frame:
#########################################
# cvel(vis='cig96_11.ms', outputvis='cig96_11.ms.cvel', mode='velocity', field='', spw='0', restfreq='1.42040575177GHz', outframe='LSRK', veltype='radio')
#
#
# # ===============================================================================
#
# ### Splitting of CIG96:
# #######################
#
# # Splitting of field = 3 (CIG96) calibrated data, in the spw=0:
#
# split(vis='cig96_11.ms.cvel', datacolumn='corrected', outputvis='cig96_11.cvel.corr.ms', field='3', spw='0:400~1700')
#
# # Now, for the new corr.ms file:
# # field = 3 is stored as: field = 0
# # spw = 0 keeps being spw = 0
#
# # Split of the whole spw:
#
# # split(vis='cig96_11.ms', datacolumn='corrected', outputvis='cig96_11.corr.spw0.ms', field='3', spw='0')
#
# # Emission in the higher frequency:
# #
# # The emission line is seen in 1.4203 GHz. We split the fields in spw=0 from channel 780 to 1600:
#
# # split(vis='cig96_11.ms', datacolumn='corrected', outputvis='cig96_11.corr.emission.ms', field='3', spw='0:780~1600')
#
# # Splitting CIG96 fields in spw=0 with time binning of 20s and channel width of 10 channels:
#
# split(vis='cig96_11.ms.cvel', datacolumn='corrected', outputvis='cig96_11.corr.20s.10chan.ms', width=10, timebin='20s', field='3', spw='0:451~1700')
#
# # ===============================================================================
#
# ### UV continuum subtraction:
# #############################
#
# # Since we have chopped out the first 390 channels where the RFI region was (see spw definition in the
# # split command) and the last 350 channels, the total number of channels has changed: from 2048 we
# # now have ~1350.
# #
# # Now, we remove the continuum from source by subtracting the channels indicated:
#
# uvcontsub(vis='cig96_11.cvel.corr.ms', field='0', fitspw='0:50~350;900~1050', fitorder=1)
# uvcontsub(vis='cig96_11.corr.20s.10chan.ms', field='0', fitspw='0:15~30;90~105', fitorder=1)
#
# # Also, for the second emission in 1.4203 GHz, the emission is in a very noisy region so the continuum
# # subtraction won't be perfect, likely more noisy on higher frequencies since the continuum is
# # selected in channels 780~1000:
#
# # uvcontsub(vis='cig96_11.corr.emission.ms', field='0', fitspw='0:0~510;710~1300')
#
# # For the time-channel binned file, we subtract as follows:
#
# # uvcontsub(vis='cig96_11.corr.20s.10chan.ms', field='0', fitspw='0:45~93;110~181')
#
# # For the whole spw 0 we use an as extended as possible continuum:
#
# # uvcontsub(vis='cig96_11.corr.spw0.ms', field='0', fitspw='0:300~365;396~460;490~920;1110~1520')
#
# # ===============================================================================
#
# ### Clean of the continuum subtracted data:
# ###########################################
#
# # Weighting natural with the factor of 6.25064 for the channel smoothing that will be necessary to combine with VLA data:
#
# clean(vis='cig96_11.cvel.corr.ms.contsub', imagename='cig96_11.cvel.corr.contsub.natural.line', field='0', spw='0:450~800', mode='frequency', start=450, nchan=351, niter=10000, width='48.830kHz', threshold='1.5mJy', interactive=T, npercycle=100, imsize=[200,200], phasecenter='', cell='8.0arcsec', restfreq='1.42040575177GHz', weighting='natural', usescratch=T)
#
# # Beam size:
#
# # WARN MFCleanImageSkyModel Clean not converging
# # Successfully deconvolved image
# # Beam used in restoration: 99.5838 by 47.044 (arcsec) at pa -56.6383 (deg)
#
# # Weighting uniform: /usr/local/casapy-42.1.29047-001-1-64b/casapy
#
# clean(vis='cig96_11.corr.ms.contsub', imagename='cig96_11.corr.v3.contsub.uniform.line', field='0', spw='0:510~730', mode='channel', start=0, niter=10000, threshold='1.2mJy', interactive=T, npercycle=100, imsize=[200,200], phasecenter='', cell='15.0arcsec', restfreq='1.4204GHz', weighting='uniform', usescratch=T)
#
# # Beam size:
#
# # Weighting Briggs rob=0.0:
#
# clean(vis='cig96_11.corr.ms.contsub', imagename='cig96_11.corr.v3.contsub.rob0.0.line', field='0', spw='0:510~730', mode='channel', start=0, niter=10000, threshold='1.2mJy', interactive=T, npercycle=100, imsize=[200,200], phasecenter='', cell='15.0arcsec', restfreq='1.4204GHz', weighting='briggs', robust='0.0', usescratch=T)
#
# # Beam size:
#
#
# # Collapse of cube (moments):
#
# immoments(imagename='cig96_11.cvel.corr.contsub.natural.line.image', axis='spectral', moments=[0,1], outfile='cig96_11.cvel.corr.contsub.natural.line.image.mom')
#
#
# ### Noise level via viewer task:
# ################################
#
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# (cig96_11.corr.contsub.line.image)
# Frequency Velocity Stokes BrightnessUnit BeamArea
# 1.41305e+09Hz 1553.49km/s I Jy/beam 4.44663
# Npts Sum FluxDensity Mean Rms
# 1155 2.966822e-02 6.672063e-03 2.568677e-05 4.262555e-04
# Std dev Minimum Maximum region count
# 4.256651e-04 -1.101821e-03 1.550763e-03 1
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
#
# # For the time-channel binned file, we clean as follows:
#
# # Weighting natural:
#
# clean(vis='cig96_11.corr.20s.10chan.ms.contsub', imagename='cig96_11.corr.20s.10chan.ms.contsub.natural.line.image', field='0', spw='0:93~110', mode='channel', start=0, niter=1000, threshold='1.2mJy', interactive=T, npercycle=100, imsize=[200,200], phasecenter='', cell='15.0arcsec', restfreq='1.4204GHz', weighting='natural', usescratch=T)
#
# # For the spw 0 split dataset:
#
# clean(vis='cig96_11.corr.spw0.ms.contsub', imagename='cig96_11.corr.spw0.ms.contsub.natural.line', field='0', spw='0:920~1110', mode='channel', start=920, nchan=190, niter=10000, threshold='1.2mJy', interactive=T, npercycle=100, imsize=[200,200], phasecenter='', cell='10.0arcsec', restfreq='1.4204GHz', weighting='natural', usescratch=T)
#
# # ===============================================================================
#
# ### Corrected data inspection via msview:
# #########################################
#
# # We inspect the baselines that present solar interferences via msview
#
# # Field 1 shows interference patterns as well as some noisy baselines:
# #
# # 2-8, 5-26, 5-20, 8-13, 8-12, 2-12, 2-25, 2-20, 22-24, 0-26, 12-13, 0-1, 12-25, 6-8, 3-15, 1-7, 20-25, 7-14,
# # 3-19, 2-24, 2-6, 2-5, 11-19, 0-5, 12-22, 9-25, 4-21, 11-16, 24-25, 5-25, 1-26, 5-8, 2-26, 2-9, 5-24, 5-6,
# # 25-26, 1-5, 20-22, 0-2, 10-24, 5-9, 7-26, 0-14, 5-7, 1-2, 5-15, 1-25, 8-17, 17-22, 12-17, 13-17, 17-25, 6-17,
# # 17-26, 18-24, 3-17,
# #
# # Antennas 0=ea01, 1=ea02, 4=ea05, 6=ea07 and 18=ea20 always shows very large amp differences, we flag it:
#
# flagdata(vis='cig96_11.ms', mode='manual', field='1', spw='0', antenna='ea07;ea01;ea02;ea05;ea20')
#
# # We flag the baselines that have shown interferences:
#
# flagdata(vis='cig96_11.ms', mode='manual', field='1', spw='0', antenna='2&8;5&26;5&20;8&13;8&12;2&12;2&25;2&20;22&24;0&26;12&13;0&1;12&25;6&8;3&15;1&7;20&25;7&14;3&19;2&24;2&6;2&5;11&19;0&5;12&22;9&25;4&21;11&16;24&25;5&25;1&26;5&8;2&26;2&9;5&24;5&6;25&26;1&5;20&22;0&2;10&24;5&9;7&26;0&14;5&7;1&2;5&15;1&25;8&17;17&22;12&17;13&17;17&25;6&17;17&26;18&24;3&17', flagbackup=True)
#
# # We now delete the calibration to perform it again with the new flagged data:
# #
# # To do so, we restart from the beginning, right after the flagging.
# #
# # New flux value:
# #
# # Flux density for J0238+1636 in SpW=0 (freq=1.40499e+09 Hz) is: 0.818094 +/- 0.0143805 (SNR = 56.8892, N = 44)
# #
# # After applying the whole calibration again, we split field = 3 (CIG96) re-calibrated data, in the spw=0:
#
# split(vis='cig96_11.ms', datacolumn='corrected', outputvis='cig96_11.corr.v2.ms', field='3', spw='0:399~2047')
# split(vis='cig96_11.ms', datacolumn='corrected', outputvis='cig96_11.corr.allfields.ms', field='1~3', spw='0')
#
# # and redo uvcontsub:
#
# # Since we have chopped out the first 400 channels where the RFI region was (see spw definition in the
# # split command) the total number of channels have changed now: from 2048 we now have 1648.
# # Now, we remove the continuum from source by subtracting the channels from 500to910 and from 730to1300.
#
# uvcontsub(vis='cig96_11.corr.v2.ms', field='0', fitspw='0:0~510;730~1300')
#
# # Cleaning: weighting natural:
#
# clean(vis='cig96_11.corr.v2.ms.contsub', imagename='cig96_11.corr.v2.contsub.natural.line', field='0', spw='0:510~730', mode='channel', start=0, niter=10000, threshold='1.2mJy', interactive=T, npercycle=100, imsize=[300,300], phasecenter='', cell='15.0arcsec', restfreq='1.4204GHz', weighting='natural', usescratch=T)
################ END
#
#
# IN CASE YOU WISH TO RESET THE WHOLE CALIBRATION, TAKE THE FOLLOWING STEPS:
#
# clearcal(vis='xxx')
#
# flagdata(vis='xxx',mode=unflag)
#
# Ready to redo calibration!
| 47.240964
| 379
| 0.622163
|
1ef4d50732db33b1f5c73636d5c289fd3df7916f
| 8,862
|
py
|
Python
|
rally_openstack/task/contexts/glance/images.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | null | null | null |
rally_openstack/task/contexts/glance/images.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | null | null | null |
rally_openstack/task/contexts/glance/images.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | 1
|
2021-08-10T03:11:51.000Z
|
2021-08-10T03:11:51.000Z
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.common import logging
from rally.common import utils as rutils
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.common.services.image import image
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="images", platform="openstack", order=410)
class ImageGenerator(context.OpenStackContext):
"""Uploads specified Glance images to every tenant."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image_url": {
"type": "string",
"description": "Location of the source to create image from."
},
"disk_format": {
"description": "The format of the disk.",
"enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki",
"ari", "ami"]
},
"container_format": {
"description": "Format of the image container.",
"enum": ["aki", "ami", "ari", "bare", "docker", "ova", "ovf"]
},
"image_name": {
"type": "string",
"description": "The name of image to create. NOTE: it will be "
"ignored in case when `images_per_tenant` is "
"bigger then 1."
},
"min_ram": {
"description": "Amount of RAM in MB",
"type": "integer",
"minimum": 0
},
"min_disk": {
"description": "Amount of disk space in GB",
"type": "integer",
"minimum": 0
},
"visibility": {
"description": "Visibility for this image ('shared' and "
"'community' are available only in case of "
"Glance V2).",
"enum": ["public", "private", "shared", "community"]
},
"images_per_tenant": {
"description": "The number of images to create per one single "
"tenant.",
"type": "integer",
"minimum": 1
},
"image_args": {
"description": "This param is deprecated since Rally-0.10.0, "
"specify exact arguments in a root section of "
"context instead.",
"type": "object",
"additionalProperties": True
},
"image_container": {
"description": "This param is deprecated since Rally-0.10.0, "
"use `container_format` instead.",
"type": "string",
},
"image_type": {
"description": "This param is deprecated since Rally-0.10.0, "
"use `disk_format` instead.",
"enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki",
"ari", "ami"],
},
},
"oneOf": [{"description": "It is been used since Rally 0.10.0",
"required": ["image_url", "disk_format",
"container_format"]},
{"description": "One of backward compatible way",
"required": ["image_url", "image_type",
"container_format"]},
{"description": "One of backward compatible way",
"required": ["image_url", "disk_format",
"image_container"]},
{"description": "One of backward compatible way",
"required": ["image_url", "image_type",
"image_container"]}],
"additionalProperties": False
}
DEFAULT_CONFIG = {"images_per_tenant": 1}
def setup(self):
image_url = self.config.get("image_url")
disk_format = self.config.get("disk_format")
container_format = self.config.get("container_format")
images_per_tenant = self.config.get("images_per_tenant")
visibility = self.config.get("visibility", "private")
min_disk = self.config.get("min_disk", 0)
min_ram = self.config.get("min_ram", 0)
image_args = self.config.get("image_args", {})
if "image_type" in self.config:
LOG.warning("The 'image_type' argument is deprecated since "
"Rally 0.10.0, use disk_format argument instead")
if not disk_format:
disk_format = self.config["image_type"]
if "image_container" in self.config:
LOG.warning("The 'image_container' argument is deprecated since "
"Rally 0.10.0; use container_format argument instead")
if not container_format:
container_format = self.config["image_container"]
if image_args:
LOG.warning(
"The 'image_args' argument is deprecated since Rally 0.10.0; "
"specify arguments in a root section of context instead")
if "is_public" in image_args:
if "visibility" not in self.config:
visibility = ("public" if image_args["is_public"]
else "private")
if "min_ram" in image_args:
if "min_ram" not in self.config:
min_ram = image_args["min_ram"]
if "min_disk" in image_args:
if "min_disk" not in self.config:
min_disk = image_args["min_disk"]
# None image_name means that image.Image will generate a random name
image_name = None
if "image_name" in self.config and images_per_tenant == 1:
image_name = self.config["image_name"]
for user, tenant_id in self._iterate_per_tenants():
current_images = []
clients = osclients.Clients(user["credential"])
image_service = image.Image(
clients, name_generator=self.generate_random_name)
for i in range(images_per_tenant):
image_obj = image_service.create_image(
image_name=image_name,
container_format=container_format,
image_location=image_url,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram)
current_images.append(image_obj.id)
self.context["tenants"][tenant_id]["images"] = current_images
def cleanup(self):
if self.context.get("admin", {}):
# NOTE(andreykurilin): Glance does not require the admin for
# listing tenant images, but the admin is required for
# discovering Cinder volumes which might be created for the
# purpose of caching. Removing such volumes are optional step,
# since Cinder should have own mechanism like garbage collector,
# but if we can, let's remove everything and make the cloud as
# close as possible to the original state.
admin = self.context["admin"]
admin_required = None
else:
admin = None
admin_required = False
if "image_name" in self.config:
matcher = rutils.make_name_matcher(self.config["image_name"])
else:
matcher = self.__class__
resource_manager.cleanup(names=["glance.images",
"cinder.image_volumes_cache"],
admin=admin,
admin_required=admin_required,
users=self.context.get("users", []),
superclass=matcher,
task_id=self.get_owner_id())
| 42.605769
| 79
| 0.534191
|
3dd2704de60d4e904792ef52785256ef5ef713b7
| 1,898
|
py
|
Python
|
setup.py
|
zen-juen/AutoCalendar
|
05cf9cec07dd0bceb2965970d9513faf8fec461c
|
[
"MIT"
] | 3
|
2020-10-18T05:14:12.000Z
|
2021-12-05T10:22:53.000Z
|
setup.py
|
zen-juen/AutoCalendar
|
05cf9cec07dd0bceb2965970d9513faf8fec461c
|
[
"MIT"
] | null | null | null |
setup.py
|
zen-juen/AutoCalendar
|
05cf9cec07dd0bceb2965970d9513faf8fec461c
|
[
"MIT"
] | 1
|
2021-12-28T19:02:45.000Z
|
2021-12-28T19:02:45.000Z
|
# -*- coding: utf-8 -*-
import re
from setuptools import find_packages, setup
# Utilities
with open("README.md") as readme_file:
readme = readme_file.read()
#
#with open("NEWS.rst") as history_file:
# history = history_file.read()
#history = history.replace("\n-------------------", "\n^^^^^^^^^^^^^^^^^^^").replace("\n=====", "\n-----")
def find_version():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"), open("autocalendar/__init__.py").read())
return result.group(1)
# Dependencies
requirements = ["numpy", "pandas", "pickle-mixin", "google", "google_auth_oauthlib"]
# Setup
setup(
# Info
name="autocalendar",
keywords="automation, calendar events, google calendar api, automatic scheduling, Python",
url="https://github.com/zen-juen/AutoCalendar",
download_url = 'https://github.com/zen-juen/AutoCalendar/tree/main/zipball',
version=find_version(),
description="A Python automation scheduling system based on the Google Calendar API.",
long_description=readme + "\n\n",
long_description_content_type="text/markdown",
license="MIT license",
# The name and contact of a maintainer
author="Zen Juen Lau",
author_email="lauzenjuen@gmail.com",
# Dependencies
install_requires=requirements,
# setup_requires=setup_requirements,
# extras_require={"test": test_requirements},
# test_suite="pytest",
# tests_require=test_requirements,
# Misc
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
)
| 30.612903
| 118
| 0.645416
|
3b3c3438ec7dcfdd2ede19e28b63ec2eec93dc55
| 1,811
|
py
|
Python
|
tests/integration/questionnaire/test_questionnaire_question_definition.py
|
ons-eq-team/eq-questionnaire-runner
|
8d029097faa2b9d53d9621064243620db60c62c7
|
[
"MIT"
] | null | null | null |
tests/integration/questionnaire/test_questionnaire_question_definition.py
|
ons-eq-team/eq-questionnaire-runner
|
8d029097faa2b9d53d9621064243620db60c62c7
|
[
"MIT"
] | null | null | null |
tests/integration/questionnaire/test_questionnaire_question_definition.py
|
ons-eq-team/eq-questionnaire-runner
|
8d029097faa2b9d53d9621064243620db60c62c7
|
[
"MIT"
] | null | null | null |
from tests.integration.integration_test_case import IntegrationTestCase
class TestQuestionnaireQuestionDefinition(IntegrationTestCase):
def test_question_definition(self):
# Given I launch a questionnaire with definitions
self.launchSurvey("test_question_definition")
# When I start the survey I am presented with the definitions title and content correctly
self.assertInBody(
"Do you connect a LiFePO4 battery to your <em>photovoltaic system</em> to store surplus energy?"
)
self.assertInBody("What is a photovoltaic system?")
self.assertInBody(
"A typical photovoltaic system employs solar panels, each comprising a number of solar cells, "
"which generate electrical power. PV installations may be ground-mounted, rooftop mounted or wall mounted. "
"The mount may be fixed, or use a solar tracker to follow the sun across the sky."
)
self.assertInBody("Why use LiFePO4 batteries?")
self.assertInBody("3 Benefits of LifePO4 batteries.")
self.assertInBody(
"LifePO4 batteries have a life span 10 times longer than that of traditional lead acid batteries. "
"This dramatically reduces the need for battery changes."
)
self.assertInBody(
"Lithium iron phosphate batteries operate with much lower resistance and consequently recharge at a faster rate."
)
self.assertInBody(
"LifeP04 lightweight batteries are lighter than lead acid batteries, usually weighing about 1/4 less."
)
# When we continue we go to the summary page
self.post()
self.assertInUrl("summary")
# And Submit my answers
self.post()
self.assertInUrl("thank-you")
| 44.170732
| 125
| 0.680839
|
25c012809813698c5d9942d60d4996edc71a5e93
| 17,086
|
py
|
Python
|
imagegen/gan.py
|
kostaleonard/mlops-image-generation-example
|
c5360be4361a9843466469a74b94dba997dbe778
|
[
"MIT"
] | 1
|
2021-12-20T22:03:56.000Z
|
2021-12-20T22:03:56.000Z
|
imagegen/gan.py
|
kostaleonard/mlops-image-generation-example
|
c5360be4361a9843466469a74b94dba997dbe778
|
[
"MIT"
] | 14
|
2021-12-22T15:52:39.000Z
|
2022-01-27T12:58:56.000Z
|
imagegen/gan.py
|
kostaleonard/mlops-image-generation-example
|
c5360be4361a9843466469a74b94dba997dbe778
|
[
"MIT"
] | null | null | null |
"""Demonstrates a generative adversarial network architecture on the pokemon
dataset. The model will generate images of never-before-seen pokemon. The
positive class is used to denote real images.
This model syncs with WandB.
"""
# pylint: disable=no-name-in-module
from typing import Optional
import time
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model, load_model as tf_load_model
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import History
from mlops.dataset.versioned_dataset import VersionedDataset
from mlops.model.training_config import TrainingConfig
import wandb
from imagegen.errors import GANShapeError, GANHasNoOptimizerError
DEFAULT_EPOCHS = 5
DEFAULT_BATCH_SIZE = 32
DEFAULT_BATCHES_PER_EPOCH = 100
DEFAULT_CKPT_PREFIX = 'models/checkpoints/gan'
CROSS_ENTROPY_LOSS = BinaryCrossentropy(from_logits=False)
WANDB_PROJECT_TITLE = 'gan_pokemon'
MAX_NUM_WANDB_IMAGES = 50
WANDB_IMAGE_ROWS = 4
WANDB_IMAGE_COLS = 4
ROTATION_RANGE_DEGREES = 15
WIDTH_SHIFT_RANGE = 0.1
HEIGHT_SHIFT_RANGE = 0.1
ZOOM_RANGE = 0.2
class GAN:
"""Represents a generative adversarial network model."""
def __init__(self, generator: Model, discriminator: Model) -> None:
"""Instantiates the GAN.
:param generator: The compiled generator model. Generates new images.
The input shape must be m x n, where m is the number of examples
and n is the length of the noise vector that the generator uses as
input. The output shape must be m x h x w x c, where m is the number
of examples, h is the image height, w is the image width, and c is
the image channels. The output must be in the range [0, 1].
:param discriminator: The compiled discriminator model. Classifies
images as either real or fake. The input shape must be
m x h x w x c, the same shape as the output of the generator. The
output shape must be m x 1, where the output represents the
probability that each example is real. This output must be in the
range [0, 1].
"""
if len(generator.input_shape) != 2:
raise GANShapeError('Generator input must be of shape (m, n)')
if len(generator.output_shape) != 4:
raise GANShapeError('Generator output must be of shape '
'(m, h, w, c)')
if generator.output_shape != discriminator.input_shape:
raise GANShapeError('Generator output shape must match '
'discriminator input shape')
if discriminator.output_shape[1:] != (1,):
raise GANShapeError('Discriminator output must be of shape (m, 1)')
if not generator.optimizer:
raise GANHasNoOptimizerError('Generator is missing optimizer')
if not discriminator.optimizer:
raise GANHasNoOptimizerError('Discriminator is missing optimizer')
self.gen_output_shape = generator.output_shape[1:]
self.gen_input_dim = generator.input_shape[1]
self.model_hyperparams = {
'gen_input_dim': self.gen_input_dim,
'gen_output_shape': self.gen_output_shape
}
self.generator = generator
self.discriminator = discriminator
def save(self,
generator_filename: str,
discriminator_filename: str) -> None:
"""Saves the generator and discriminator networks to the given paths.
:param generator_filename: The path to which to save the generator.
:param discriminator_filename: The path to which to save the
discriminator.
"""
self.generator.save(generator_filename)
self.discriminator.save(discriminator_filename)
@staticmethod
def load(generator_filename: str, discriminator_filename: str) -> 'GAN':
"""Returns the GAN loaded from the generator and discriminator files.
:param generator_filename: The path to the saved generator.
:param discriminator_filename: The path to the saved discriminator.
:return: The GAN loaded from the generator and discriminator files.
"""
generator = tf_load_model(generator_filename)
discriminator = tf_load_model(discriminator_filename)
return GAN(generator, discriminator)
@staticmethod
def _generator_loss(fake_output: np.ndarray) -> float:
"""Returns the generator's loss based on the discriminator's predictions
on generated images.
:param fake_output: The discriminator's predictions on fake images
output by the generator. If the discriminator can spot the fakes,
these predictions will be close to 0; if the generator can fool the
discriminator, these predictions will be close to 1. The predictions
are a tensor of shape m x 1, where m is the batch size.
:return: The cross-entropy loss of fake_output against an array of ones;
the generator loss is minimized when it completely fools the
discriminator.
"""
return CROSS_ENTROPY_LOSS(tf.ones_like(fake_output), fake_output)
@staticmethod
def _discriminator_loss(real_output: np.ndarray,
fake_output: np.ndarray) -> float:
"""Returns the discriminator's loss on batches of real and fake images.
:param real_output: The discriminator's predictions on real images. If
the discriminator can spot real images, these predictions will be
close to 1. The predictions are a tensor of shape m x 1, where m is
the batch size.
:param fake_output: The discriminator's predictions on fake images
output by the generator. If the discriminator can spot the fakes,
these predictions will be close to 0; if the generator can fool the
discriminator, these predictions will be close to 1. The predictions
are a tensor of shape m x 1, where m is the batch size.
:return: The cross-entropy loss of real_output against an array of ones
and fake_output against an array of zeros; the discriminator loss is
minimized when it correctly differentiates between real and fake
images.
"""
real_loss = CROSS_ENTROPY_LOSS(tf.ones_like(real_output), real_output)
fake_loss = CROSS_ENTROPY_LOSS(tf.zeros_like(fake_output), fake_output)
return real_loss + fake_loss
@tf.function
def _train_step(self, X_batch: np.ndarray) -> tuple[float, float]:
"""Runs one batch of images through the model, computes the loss, and
applies the gradients to the model; returns the generator and
discriminator losses on the batch.
:param X_batch: A batch of input images; a tensor of shape m x h x w x
c, where m is the batch size, h is the image height, w is the image
width, and c is the number of channels.
:return: A 2-tuple of the generator and discriminator losses on the
batch.
"""
noise = tf.random.normal((X_batch.shape[0], self.gen_input_dim))
with tf.GradientTape() as gen_tape, tf.GradientTape() as dis_tape:
generated_images = self.generator(noise, training=True)
real_output = self.discriminator(X_batch, training=True)
fake_output = self.discriminator(generated_images, training=True)
gen_loss = GAN._generator_loss(fake_output)
dis_loss = GAN._discriminator_loss(real_output, fake_output)
gen_gradients = gen_tape.gradient(
gen_loss, self.generator.trainable_variables)
dis_gradients = dis_tape.gradient(
dis_loss, self.discriminator.trainable_variables)
self.generator.optimizer.apply_gradients(
zip(gen_gradients, self.generator.trainable_variables))
self.discriminator.optimizer.apply_gradients(
zip(dis_gradients, self.discriminator.trainable_variables))
return gen_loss, dis_loss
def train(self,
dataset: VersionedDataset,
epochs: int = DEFAULT_EPOCHS,
batch_size: int = DEFAULT_BATCH_SIZE,
batches_per_epoch: int = DEFAULT_BATCHES_PER_EPOCH,
model_checkpoint_prefix: Optional[str] = DEFAULT_CKPT_PREFIX,
use_wandb: bool = False) -> TrainingConfig:
"""Trains the model on the training data.
:param dataset: The dataset on which to train the model.
:param epochs: The number of complete passes over the dataset to run
training.
:param batch_size: The size of the batches used in mini-batch gradient
descent.
:param batches_per_epoch: The number of batches to use per epoch.
:param model_checkpoint_prefix: If specified, the prefix of the path to
which to save the generator and discriminator. The generator
file will have the suffix '_generator.h5' and the discriminator
will have the suffix '_discriminator.h5'.
:param use_wandb: If True, sync the run with WandB.
:return: The training History object.
"""
# pylint: disable=too-many-locals, too-many-statements
# pylint: disable=too-many-arguments
train_hyperparams = locals()
train_hyperparams.pop('self')
train_hyperparams.pop('dataset')
# These files will only be created if a prefix was supplied.
generator_checkpoint_filename = \
f'{model_checkpoint_prefix}_generator.h5'
discriminator_checkpoint_filename = \
f'{model_checkpoint_prefix}_discriminator.h5'
if use_wandb:
all_hyperparams = {**self.model_hyperparams, **train_hyperparams}
wandb_run = wandb.init(project=WANDB_PROJECT_TITLE,
dir='.',
config=all_hyperparams,
reinit=True)
wandb.run.summary['generator_graph'] = wandb.Graph.from_keras(
self.generator)
wandb.run.summary['discriminator_graph'] = wandb.Graph.from_keras(
self.discriminator)
image_generator = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=ROTATION_RANGE_DEGREES,
width_shift_range=WIDTH_SHIFT_RANGE,
height_shift_range=HEIGHT_SHIFT_RANGE,
zoom_range=ZOOM_RANGE,
horizontal_flip=True
)
image_generator.fit(dataset.X_train)
train_dataset = tf.data.Dataset.from_generator(
lambda: image_generator.flow(dataset.X_train,
shuffle=True,
batch_size=batch_size),
output_signature=tf.TensorSpec(
shape=(None, *dataset.X_train[0].shape),
dtype=tf.float32)).as_numpy_iterator()
generate_images_epochs = {
int(num * (epochs - 1) / MAX_NUM_WANDB_IMAGES)
for num in range(1, MAX_NUM_WANDB_IMAGES + 1)
}
history = History()
history.history['epoch'] = []
history.history['loss'] = []
history.history['gen_loss'] = []
history.history['dis_loss'] = []
for epoch in range(epochs):
gen_loss = 0
dis_loss = 0
num_batches = 0
start_time = time.time()
for _ in tqdm(range(batches_per_epoch)):
train_batch = next(train_dataset)
gen_loss_batch, dis_loss_batch = self._train_step(train_batch)
gen_loss += gen_loss_batch
dis_loss += dis_loss_batch
num_batches += 1
end_time = time.time()
gen_loss /= num_batches
dis_loss /= num_batches
loss = gen_loss + dis_loss
print(f'Epoch {epoch}/{epochs} ({end_time - start_time:.1f}s): '
f'loss={loss:.3f}, gen_loss={gen_loss:.3f}')
if model_checkpoint_prefix:
self.save(generator_checkpoint_filename,
discriminator_checkpoint_filename)
print(f'Generator loss={gen_loss:.3f}; saving model.')
if use_wandb:
logged_items = {
'epoch': epoch,
'loss': loss,
'generator_loss': gen_loss,
'discriminator_loss': dis_loss
}
if epoch in generate_images_epochs:
generated_batch = self.generate(
WANDB_IMAGE_ROWS * WANDB_IMAGE_COLS)
concatenated_images = GAN.concatenate_images(
generated_batch, WANDB_IMAGE_ROWS, WANDB_IMAGE_COLS)
images = wandb.Image(
concatenated_images,
caption=f'Generated images at epoch {epoch}')
logged_items['generated_images'] = images
wandb.log(logged_items)
tmp_generator_filename = '/tmp/gan_generator.h5'
tmp_discriminator_filename = '/tmp/gan_discriminator.h5'
self.save(tmp_generator_filename,
tmp_discriminator_filename)
# pylint: disable=unexpected-keyword-arg
wandb.save(tmp_generator_filename, base_path='/tmp')
wandb.save(tmp_discriminator_filename, base_path='/tmp')
history.history['epoch'].append(epoch)
history.history['loss'].append(float(loss))
history.history['gen_loss'].append(float(gen_loss))
history.history['dis_loss'].append(float(dis_loss))
if use_wandb:
best_gen_epoch = min(
history.history['epoch'],
key=lambda gen_epoch: history.history['gen_loss'][gen_epoch])
best_gen_loss = history.history['gen_loss'][best_gen_epoch]
best_dis_epoch = min(
history.history['epoch'],
key=lambda dis_epoch: history.history['dis_loss'][dis_epoch])
best_dis_loss = history.history['dis_loss'][best_dis_epoch]
wandb.run.summary['best_gen_epoch'] = best_gen_epoch
wandb.run.summary['best_gen_loss'] = best_gen_loss
wandb.run.summary['best_dis_epoch'] = best_dis_epoch
wandb.run.summary['best_dis_loss'] = best_dis_loss
wandb_run.finish()
return TrainingConfig(history, train_hyperparams)
def generate(self, num_samples: int) -> np.ndarray:
"""Returns a batch of images generated by the (trained) model. The batch
is generated based on random noise vectors.
:param num_samples: The number of images to generate.
:return: A batch of generated images; a tensor of shape num_samples x
h x w x c, where h is the image height, w is the image width, and c
is the number of channels. All values are in the range [0, 1].
"""
noise = tf.random.normal((num_samples, self.gen_input_dim))
return self.generator(noise).numpy()
@staticmethod
def concatenate_images(images: np.ndarray,
rows: int,
cols: int) -> np.ndarray:
"""Returns a single image that is the concatenation of the (rows * cols)
images into the specified number of rows and columns; a tensor of shape
(h * rows) x (w * cols) x c, where images is of shape
(rows * cols) x h x w x c.
:param images: An array of (rows * cols) images; a tensor of shape
(rows * cols) x h x w x c, where h is the image height, w is the
image width, and c is the number of channels.
:param rows: The number of rows in which to display the images.
:param cols: The number of cols in which to display the images.
:return: A single image that is the concatenation of the (rows * cols)
images into the specified number of rows and columns; a tensor of
shape (h * rows) x (w * cols) x c, where images is of shape
(rows * cols) x h x w x c.
"""
result = np.zeros((rows * images.shape[1],
cols * images.shape[2],
images.shape[3]))
for row in range(rows):
for col in range(cols):
image_num = (row * cols) + col
row_start = row * images.shape[1]
row_end = (row + 1) * images.shape[1]
col_start = col * images.shape[2]
col_end = (col + 1) * images.shape[2]
result[row_start:row_end,
col_start:col_end,
:] = images[image_num]
return result
| 48.817143
| 80
| 0.62876
|
220f6d4221eef59174bf21ed23735ecd99898c56
| 2,390
|
py
|
Python
|
swcms_social/blog/migrations/0001_initial.py
|
ivanff/swcms
|
20d121003243abcc26e41409bc44f1c0ef3c6c2a
|
[
"MIT"
] | null | null | null |
swcms_social/blog/migrations/0001_initial.py
|
ivanff/swcms
|
20d121003243abcc26e41409bc44f1c0ef3c6c2a
|
[
"MIT"
] | 1
|
2019-06-25T11:17:35.000Z
|
2019-06-25T11:17:54.000Z
|
swcms_social/blog/migrations/0001_initial.py
|
ivanff/swcms-social
|
20d121003243abcc26e41409bc44f1c0ef3c6c2a
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-02-14 14:01
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('h1', models.CharField(blank=True, default='', max_length=250, verbose_name='H1')),
('text', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Текст')),
('is_active', models.BooleanField(default=True, verbose_name='Активен')),
('title', models.CharField(blank=True, default='', max_length=250, verbose_name='<title>')),
('description', models.TextField(blank=True, default='', verbose_name='<meta name="description">')),
('keywords', models.TextField(blank=True, default='', verbose_name='<meta name="keywords">')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='создан')),
('changed', models.DateTimeField(auto_now=True, verbose_name='изменен')),
('slug', models.SlugField()),
('anons', models.TextField(blank=True, default='', verbose_name='Анонс')),
],
options={
'verbose_name': 'Пост',
'verbose_name_plural': 'Посты',
},
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, default='', max_length=250, verbose_name='Заголовок')),
('h1', models.CharField(blank=True, default='', max_length=250, verbose_name='H1')),
('name', models.CharField(blank=True, default='', max_length=250, verbose_name='Имя')),
],
options={
'verbose_name': 'Тег',
'verbose_name_plural': 'Теги',
'ordering': ('-id',),
},
),
migrations.AddField(
model_name='posts',
name='tags',
field=models.ManyToManyField(blank=True, to='blog.Tags'),
),
]
| 43.454545
| 116
| 0.562762
|
470664548485c444218b1d12222c6b393eec2dab
| 350
|
py
|
Python
|
tests/admin_scripts/management/commands/app_command.py
|
trught007/django
|
d55d21dbb8b307941c2d26b95be46bf83015d868
|
[
"BSD-3-Clause"
] | 1
|
2021-11-11T04:13:11.000Z
|
2021-11-11T04:13:11.000Z
|
tests/admin_scripts/management/commands/app_command.py
|
trught007/django
|
d55d21dbb8b307941c2d26b95be46bf83015d868
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admin_scripts/management/commands/app_command.py
|
trught007/django
|
d55d21dbb8b307941c2d26b95be46bf83015d868
|
[
"BSD-3-Clause"
] | 1
|
2020-10-01T08:23:34.000Z
|
2020-10-01T08:23:34.000Z
|
from django.core.management.base import AppCommand
class Command(AppCommand):
help = 'Test Application-based commands'
requires_model_validation = False
args = '[app_label ...]'
def handle_app_config(self, app_config, **options):
print('EXECUTE:AppCommand name=%s, options=%s' % (app_config.name, sorted(options.items())))
| 31.818182
| 100
| 0.711429
|
be66a5815fdd5f8f3e26277a239935c81b8efddc
| 3,959
|
py
|
Python
|
setup.py
|
jspenc72/hummingbot
|
8e9129a77b157eea4cee2c7e497b93a2704f1197
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jspenc72/hummingbot
|
8e9129a77b157eea4cee2c7e497b93a2704f1197
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jspenc72/hummingbot
|
8e9129a77b157eea4cee2c7e497b93a2704f1197
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
import subprocess
import sys
from setuptools import find_packages, setup
from setuptools.command.build_ext import build_ext
from Cython.Build import cythonize
is_posix = (os.name == "posix")
if is_posix:
os_name = subprocess.check_output("uname").decode("utf8")
if "Darwin" in os_name:
os.environ["CFLAGS"] = "-stdlib=libc++ -std=c++11"
else:
os.environ["CFLAGS"] = "-std=c++11"
if os.environ.get('WITHOUT_CYTHON_OPTIMIZATIONS'):
os.environ["CFLAGS"] += " -O0"
# Avoid a gcc warning below:
# cc1plus: warning: command line option ???-Wstrict-prototypes??? is valid
# for C/ObjC but not for C++
class BuildExt(build_ext):
def build_extensions(self):
if os.name != "nt" and '-Wstrict-prototypes' in self.compiler.compiler_so:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
super().build_extensions()
def main():
cpu_count = os.cpu_count() or 8
version = "20220210"
packages = find_packages(include=["hummingbot", "hummingbot.*"])
package_data = {
"hummingbot": [
"core/cpp/*",
"VERSION",
"templates/*TEMPLATE.yml"
],
}
install_requires = [
"0x-contract-addresses",
"0x-contract-wrappers",
"0x-order-utils",
"aioconsole",
"aiohttp",
"aiokafka",
"appdirs",
"appnope",
"bidict",
"cachetools",
"certifi",
"cryptography",
"cython",
"cytoolz",
"diff-cover",
"dydx-python",
"dydx-v3-python",
"eth-abi",
"eth-account",
"eth-bloom",
"eth-keyfile",
"eth-typing",
"eth-utils",
"ethsnarks-loopring",
"flake8",
"hexbytes",
"importlib-metadata",
"mypy-extensions",
"numpy",
"pandas",
"pip",
"pre-commit",
"prompt-toolkit",
"psutil",
"pyjwt",
"pyperclip",
"python-dateutil",
"python-telegram-bot",
"requests",
"rsa",
"ruamel-yaml",
"scipy",
"signalr-client-aio",
"simplejson",
"six",
"sqlalchemy",
"sync-timeout",
"tzlocal",
"ujson",
"web3",
"websockets",
"yarl",
"terra_sdk"
]
cython_kwargs = {
"language": "c++",
"language_level": 3,
}
cython_sources = ["hummingbot/**/*.pyx"]
if os.path.exists('test'):
cython_sources.append("test/**/*.pyx")
if os.environ.get('WITHOUT_CYTHON_OPTIMIZATIONS'):
compiler_directives = {
"optimize.use_switch": False,
"optimize.unpack_method_calls": False,
}
else:
compiler_directives = {}
if is_posix:
cython_kwargs["nthreads"] = cpu_count
if "DEV_MODE" in os.environ:
version += ".dev1"
package_data[""] = [
"*.pxd", "*.pyx", "*.h"
]
package_data["hummingbot"].append("core/cpp/*.cpp")
if len(sys.argv) > 1 and sys.argv[1] == "build_ext" and is_posix:
sys.argv.append(f"--parallel={cpu_count}")
setup(name="hummingbot",
version=version,
description="Hummingbot",
url="https://github.com/CoinAlpha/hummingbot",
author="CoinAlpha, Inc.",
author_email="dev@hummingbot.io",
license="Apache 2.0",
packages=packages,
package_data=package_data,
install_requires=install_requires,
ext_modules=cythonize(cython_sources, compiler_directives=compiler_directives, **cython_kwargs),
include_dirs=[
np.get_include()
],
scripts=[
"bin/hummingbot.py",
"bin/hummingbot_quickstart.py"
],
cmdclass={'build_ext': BuildExt},
)
if __name__ == "__main__":
main()
| 25.541935
| 106
| 0.543824
|
cb1b58205f666d779ab940ea5df54d58886f49c2
| 6,792
|
py
|
Python
|
PyDAQmx/Task.py
|
MMathisLab/PyDAQmx
|
0b6e3dbdac15882c8cc46575ce4e33bf1f090747
|
[
"BSD-3-Clause"
] | 1
|
2017-12-28T19:17:40.000Z
|
2017-12-28T19:17:40.000Z
|
PyDAQmx/Task.py
|
automaticjack1/PyDAQmx
|
0b6e3dbdac15882c8cc46575ce4e33bf1f090747
|
[
"BSD-3-Clause"
] | null | null | null |
PyDAQmx/Task.py
|
automaticjack1/PyDAQmx
|
0b6e3dbdac15882c8cc46575ce4e33bf1f090747
|
[
"BSD-3-Clause"
] | 1
|
2020-01-30T20:59:48.000Z
|
2020-01-30T20:59:48.000Z
|
from DAQmxTypes import TaskHandle
import DAQmxFunctions
from DAQmxFunctions import *
import ctypes
# Create a list of the name of the function that uses TastHandle as the first argument
# All the function of this list will be converted to method of the task object
# The name of the method will be the same name as the name of the DAQmx function without the
# the DAQmx in front of the name
task_function_list = [name for name in function_dict.keys() if \
len(function_dict[name]['arg_type'])>0 and \
(function_dict[name]['arg_type'][0] is TaskHandle) and\
'task' in function_dict[name]['arg_name'][0]]
# Remove ClearTask in task_functon_list
task_function_list = [name for name in task_function_list if name not in ['DAQmxClearTask']]
try :
from DAQmxCallBack import *
_callback = True
except NotImplementedError:
_callback = False
if _callback:
class CallbackParent(object):
_EveryNSamplesEvent_already_register = False
def AutoRegisterEveryNSamplesEvent(self, everyNsamplesEventType,nSamples,options, name='EveryNCallback'):
"""Register the method named name as the callback function for EveryNSamplesEvent
With this method you can register a method of the class Task as a callback function.
The parameters everyNsamplesEventType, nSamples and options are the same
as the DAQmxRegisterEveryNSamplesEvent parameters
No parameters are passed to the method
If an event was already registered, the UnregisterEveryNSamplesEvent is automatically called
"""
if self._EveryNSamplesEvent_already_register:
self.UnregisterEveryNSamplesEvent()
self_id = create_callbackdata_id(self)
# Define the python function
def EveryNCallback_py(taskHandle, everyNsamplesEventType, nSamples, self_id):
self = get_callbackdata_from_id(self_id)
getattr(self,name)()
return 0
# Transform the python function to a CFunction
self.EveryNCallback_C = DAQmxEveryNSamplesEventCallbackPtr(EveryNCallback_py)
# Register the function
self.RegisterEveryNSamplesEvent(everyNsamplesEventType,nSamples,options,self.EveryNCallback_C,self_id)
self._EveryNSamplesEvent_already_register = True
def UnregisterEveryNSamplesEvent(self):
self.RegisterEveryNSamplesEvent(1,0,0,ctypes.cast(None, DAQmxEveryNSamplesEventCallbackPtr),0)
self._EveryNSamplesEvent_already_register = False
def AutoRegisterDoneEvent(self, options, name='DoneCallback'):
"""Register the method named name as the callback function for DoneEvent
With this method you can register a method of the class Task as a callback function.
The parameter options is the same as the DAQmxRegisterDoneEvent parameters
The method registered has one parameter : status
"""
self_id = create_callbackdata_id(self)
# Define the python function
def DoneCallback_py(taskHandle, status, self_id):
getattr(get_callbackdata_from_id(self_id),name)(status)
return 0
# Transform the python function to a CFunction
self.DoneCallback_C = DAQmxDoneEventCallbackPtr(DoneCallback_py)
# Register the function
self.RegisterDoneEvent(options,self.DoneCallback_C,self_id)
def AutoRegisterSignalEvent(self, signalID, options, name='SignalCallback'):
"""Register the method named name as the callback function for RegisterSignalEvent
With this method you can register a method of the class Task as a callback function.
The parameters signalID, options are the same
as the DAQmxRegisterSignalEvent parameters
No parameter are passed to the method
"""
self_id = create_callbackdata_id(self)
# Define the python function
def SignalCallback_py(taskHandle, signalID, self_id):
self = get_callbackdata_from_id(self_id)
getattr(self,name)()
return 0
# Transform the python function to a CFunction
self.SignalCallback_C = DAQmxSignalEventCallbackPtr(SignalCallback_py)
# Register the function
self.RegisterSignalEvent(signalID, options, self.SignalCallback_C, self_id)
else:
class CallbackParent(object):
def __getattr__(self, name):
if name in ['AutoRegisterEveryNSamplesEvent', 'AutoRegisterDoneEvent', 'AutoRegisterSignalEvent']:
raise NotImplementedError, 'Callback methods are not available'
return super(CallbackParent, self).__getattr__(name)
class Task(CallbackParent):
def __init__(self, name=""):
self.taskHandle = TaskHandle(0)
DAQmxCreateTask(name,byref(self.taskHandle))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.ClearTask()
def __del__(self):
""" Clear automatically the task to be able to reallocate resources """
# Clear the task before deleting the object
# If the task as already been manually cleared, nothing is done
# This prevent to clear a task that has a Handle attributed to a new task
# See this example
# a = Task(), ..., a.ClearTask(), b = Task(), del(a)
# b has the same taskHandle as a, and deleting a will clear the task of b
try:
self.ClearTask()
except Exception:
pass
def ClearTask(self):
if self.taskHandle:
try:
DAQmxClearTask(self.taskHandle)
finally:
self.taskHandle.value = 0
def __repr__(self):
if self.taskHandle:
return "Task number %d"%self.taskHandle.value
else:
return "Invalid or cleared Task"
# Dynamically creates the method from the task_function_list
for function_name in task_function_list:
name = function_name[5:] # remove the DAQmx in front of the name
func = getattr(DAQmxFunctions, function_name)
arg_names = function_dict[function_name]['arg_name']
doc = 'T.%s(%s) -> error.' %(name, ', '.join(arg_names[1:]))
cmd = """def {0}(self, {1}):
"{3}"
{2}(self.taskHandle, {1})"""
exec(cmd.format(name, ', '.join(arg_names[1:]), function_name, doc))
del function_name, name, func, arg_names, doc
del task_function_list
| 44.392157
| 114
| 0.650766
|
9b4f15fa9df3a6c66d3fa58d980980ed7bce896e
| 18,579
|
py
|
Python
|
tests/unit/states/iptables_test.py
|
vamshi98/salt-formulas
|
30edeadafd5d173efe4e1f767a8d562547ad128a
|
[
"Apache-2.0"
] | 1
|
2020-09-16T21:31:02.000Z
|
2020-09-16T21:31:02.000Z
|
tests/unit/states/iptables_test.py
|
vamshi98/salt-formulas
|
30edeadafd5d173efe4e1f767a8d562547ad128a
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/states/iptables_test.py
|
vamshi98/salt-formulas
|
30edeadafd5d173efe4e1f767a8d562547ad128a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import iptables
# Globals
iptables.__salt__ = {}
iptables.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class IptablesTestCase(TestCase):
'''
Validate the iptables state
'''
def test_chain_present(self):
'''
Test to verify the chain is exist.
'''
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': ''
}
mock = MagicMock(side_effect=[True, False, False, False])
with patch.dict(iptables.__salt__, {'iptables.check_chain': mock}):
ret.update({'comment': 'iptables salt chain is already'
' exist in filter table for ipv4'})
self.assertDictEqual(iptables.chain_present("salt"), ret)
with patch.dict(iptables.__opts__, {'test': True}):
ret.update({'comment': 'iptables salt chain in filter'
' table needs to be set for ipv4',
'result': None})
self.assertDictEqual(iptables.chain_present("salt"), ret)
with patch.dict(iptables.__opts__, {'test': False}):
mock = MagicMock(side_effect=[True, ''])
with patch.dict(iptables.__salt__,
{'iptables.new_chain': mock}):
ret.update({'result': True,
'comment': 'iptables salt chain in filter'
' table create success for ipv4',
'changes': {'locale': 'salt'}})
self.assertDictEqual(iptables.chain_present('salt'), ret)
ret.update({'changes': {}, 'result': False,
'comment': 'Failed to create salt chain'
' in filter table: for ipv4'})
self.assertDictEqual(iptables.chain_present('salt'), ret)
def test_chain_absent(self):
'''
Test to verify the chain is absent.
'''
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': ''
}
mock = MagicMock(side_effect=[False, True, True, True])
with patch.dict(iptables.__salt__, {'iptables.check_chain': mock}):
ret.update({'comment': 'iptables salt chain is already'
' absent in filter table for ipv4'})
self.assertDictEqual(iptables.chain_absent("salt"), ret)
with patch.dict(iptables.__opts__, {'test': True}):
ret.update({'comment': 'iptables salt chain in filter'
' table needs to be removed ipv4',
'result': None})
self.assertDictEqual(iptables.chain_absent("salt"), ret)
with patch.dict(iptables.__opts__, {'test': False}):
mock = MagicMock(side_effect=[False, 'a'])
with patch.dict(iptables.__salt__, {'iptables.flush': mock}):
mock = MagicMock(return_value=True)
with patch.dict(iptables.__salt__,
{'iptables.delete_chain': mock}):
ret.update({'changes': {'locale': 'salt'},
'comment': 'iptables salt chain in filter'
' table delete success for ipv4',
'result': True})
self.assertDictEqual(iptables.chain_absent("salt"),
ret)
ret.update({'changes': {}, 'result': False,
'comment': 'Failed to flush salt chain'
' in filter table: a for ipv4'})
self.assertDictEqual(iptables.chain_absent("salt"), ret)
def test_append(self):
'''
Test to append a rule to a chain
'''
ret = {'name': 'salt',
'changes': {},
'result': None,
'comment': ''
}
self.assertDictEqual(iptables.append('salt', rules=[]), ret)
mock = MagicMock(return_value=[])
with patch.object(iptables, '_STATE_INTERNAL_KEYWORDS', mock):
mock = MagicMock(return_value='a')
with patch.dict(iptables.__salt__, {'iptables.build_rule': mock}):
mock = MagicMock(side_effect=[True, False, False, False])
with patch.dict(iptables.__salt__, {'iptables.check': mock}):
ret.update({'comment': 'iptables rule for salt'
' already set (a) for ipv4',
'result': True})
self.assertDictEqual(iptables.append('salt',
table='', chain=''),
ret)
with patch.dict(iptables.__opts__, {'test': True}):
ret.update({'result': None,
'comment': 'iptables rule for salt'
' needs to be set (a) for ipv4'})
self.assertDictEqual(iptables.append('salt',
table='',
chain=''), ret)
with patch.dict(iptables.__opts__, {'test': False}):
mock = MagicMock(side_effect=[True, False])
with patch.dict(iptables.__salt__,
{'iptables.append': mock}):
ret.update({'changes': {'locale': 'salt'},
'result': True,
'comment': 'Set iptables rule'
' for salt to: a for ipv4'})
self.assertDictEqual(iptables.append('salt',
table='',
chain=''),
ret)
ret.update({'changes': {},
'result': False,
'comment': 'Failed to set iptables'
' rule for salt.\nAttempted rule was'
' a for ipv4'})
self.assertDictEqual(iptables.append('salt',
table='',
chain=''),
ret)
def test_insert(self):
'''
Test to insert a rule into a chain
'''
ret = {'name': 'salt',
'changes': {},
'result': None,
'comment': ''}
self.assertDictEqual(iptables.insert('salt', rules=[]), ret)
mock = MagicMock(return_value=[])
with patch.object(iptables, '_STATE_INTERNAL_KEYWORDS', mock):
mock = MagicMock(return_value='a')
with patch.dict(iptables.__salt__, {'iptables.build_rule': mock}):
mock = MagicMock(side_effect=[True, False, False, False])
with patch.dict(iptables.__salt__, {'iptables.check': mock}):
ret.update({'comment': 'iptables rule for salt'
' already set for ipv4 (a)',
'result': True})
self.assertDictEqual(iptables.insert('salt',
table='', chain=''),
ret)
with patch.dict(iptables.__opts__, {'test': True}):
ret.update({'result': None,
'comment': 'iptables rule for salt'
' needs to be set for ipv4 (a)'})
self.assertDictEqual(iptables.insert('salt',
table='',
chain=''), ret)
with patch.dict(iptables.__opts__, {'test': False}):
mock = MagicMock(side_effect=[False, True])
with patch.dict(iptables.__salt__,
{'iptables.insert': mock}):
ret.update({'changes': {'locale': 'salt'},
'result': True,
'comment': 'Set iptables rule'
' for salt to: a for ipv4'})
self.assertDictEqual(iptables.insert('salt',
table='',
chain='',
position=''),
ret)
ret.update({'changes': {},
'result': False,
'comment': 'Failed to set iptables'
' rule for salt.\nAttempted rule was a'
})
self.assertDictEqual(iptables.insert('salt',
table='',
chain='',
position=''),
ret)
def test_delete(self):
'''
Test to delete a rule to a chain
'''
ret = {'name': 'salt',
'changes': {},
'result': None,
'comment': ''}
self.assertDictEqual(iptables.delete('salt', rules=[]), ret)
mock = MagicMock(return_value=[])
with patch.object(iptables, '_STATE_INTERNAL_KEYWORDS', mock):
mock = MagicMock(return_value='a')
with patch.dict(iptables.__salt__, {'iptables.build_rule': mock}):
mock = MagicMock(side_effect=[False, True, True, True])
with patch.dict(iptables.__salt__, {'iptables.check': mock}):
ret.update({'comment': 'iptables rule for salt'
' already absent for ipv4 (a)',
'result': True})
self.assertDictEqual(iptables.delete('salt',
table='', chain=''),
ret)
with patch.dict(iptables.__opts__, {'test': True}):
ret.update({'result': None,
'comment': 'iptables rule for salt needs'
' to be deleted for ipv4 (a)'})
self.assertDictEqual(iptables.delete('salt',
table='',
chain=''), ret)
with patch.dict(iptables.__opts__, {'test': False}):
mock = MagicMock(side_effect=[False, True])
with patch.dict(iptables.__salt__,
{'iptables.delete': mock}):
ret.update({'result': True,
'changes': {'locale': 'salt'},
'comment': 'Delete iptables rule'
' for salt a'})
self.assertDictEqual(iptables.delete('salt',
table='',
chain='',
position=''),
ret)
ret.update({'result': False,
'changes': {},
'comment': 'Failed to delete iptables'
' rule for salt.\nAttempted rule was a'
})
self.assertDictEqual(iptables.delete('salt',
table='',
chain='',
position=''),
ret)
def test_set_policy(self):
'''
Test to sets the default policy for iptables firewall tables
'''
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(return_value=[])
with patch.object(iptables, '_STATE_INTERNAL_KEYWORDS', mock):
mock = MagicMock(return_value='stack')
with patch.dict(iptables.__salt__, {'iptables.get_policy': mock}):
ret.update({'comment': 'iptables default policy for chain'
' on table for ipv4 already set to stack'})
self.assertDictEqual(iptables.set_policy('salt',
table='',
chain='',
policy='stack'), ret)
with patch.dict(iptables.__opts__, {'test': True}):
ret.update({'comment': 'iptables default policy for chain'
' on table for ipv4 needs to be set'
' to sal', 'result': None})
self.assertDictEqual(iptables.set_policy('salt',
table='',
chain='',
policy='sal'),
ret)
with patch.dict(iptables.__opts__, {'test': False}):
mock = MagicMock(side_effect=[False, True])
with patch.dict(iptables.__salt__,
{'iptables.set_policy': mock}):
ret.update({'changes': {'locale': 'salt'},
'comment': 'Set default policy for'
' to sal family ipv4',
'result': True})
self.assertDictEqual(iptables.set_policy('salt',
table='',
chain='',
policy='sal'),
ret)
ret.update({'comment': 'Failed to set iptables'
' default policy',
'result': False,
'changes': {}})
self.assertDictEqual(iptables.set_policy('salt',
table='',
chain='',
policy='sal'),
ret)
def test_flush(self):
'''
Test to flush current iptables state
'''
ret = {'name': 'salt',
'changes': {},
'result': None,
'comment': ''}
mock = MagicMock(return_value=[])
with patch.object(iptables, '_STATE_INTERNAL_KEYWORDS', mock):
with patch.dict(iptables.__opts__, {'test': True}):
ret.update({'comment': 'iptables rules in salt table filter'
' chain ipv4 family needs to be flushed'})
self.assertDictEqual(iptables.flush('salt'), ret)
with patch.dict(iptables.__opts__, {'test': False}):
mock = MagicMock(side_effect=[False, True])
with patch.dict(iptables.__salt__,
{'iptables.flush': mock}):
ret.update({'changes': {'locale': 'salt'},
'comment': 'Flush iptables rules in'
' table chain ipv4 family',
'result': True})
self.assertDictEqual(iptables.flush('salt',
table='', chain=''),
ret)
ret.update({'changes': {},
'comment': 'Failed to flush iptables rules',
'result': False})
self.assertDictEqual(iptables.flush('salt',
table='', chain=''),
ret)
def test_mod_aggregate(self):
'''
Test to mod_aggregate function
'''
self.assertDictEqual(iptables.mod_aggregate({'fun': 'salt'}, [], []),
{'fun': 'salt'})
self.assertDictEqual(iptables.mod_aggregate({'fun': 'append'}, [], []),
{'fun': 'append'})
if __name__ == '__main__':
from integration import run_tests
run_tests(IptablesTestCase, needs_daemon=False)
| 48.382813
| 79
| 0.385166
|
0f04261294a82edc634740208a6a5daaee673f82
| 1,911
|
py
|
Python
|
pynonymizer/strategy/database.py
|
DocLM/pynonymizer
|
1ab2b6323a2b7324fef3a4224231329936a2356f
|
[
"MIT"
] | 40
|
2020-10-19T14:08:05.000Z
|
2021-11-19T10:44:52.000Z
|
pynonymizer/strategy/database.py
|
DocLM/pynonymizer
|
1ab2b6323a2b7324fef3a4224231329936a2356f
|
[
"MIT"
] | 51
|
2020-09-21T19:59:03.000Z
|
2021-11-12T09:19:00.000Z
|
pynonymizer/strategy/database.py
|
DocLM/pynonymizer
|
1ab2b6323a2b7324fef3a4224231329936a2356f
|
[
"MIT"
] | 19
|
2020-10-20T13:18:41.000Z
|
2021-11-11T13:22:00.000Z
|
from pynonymizer.strategy.table import TableStrategyTypes
from pynonymizer.strategy.update_column import UpdateColumnStrategyTypes
class DatabaseStrategy:
def __init__(self, table_strategies=None, before_scripts=None, after_scripts=None):
self.table_strategies = []
self.before_scripts = []
self.after_scripts = []
for table_strat in table_strategies:
self.table_strategies.append(table_strat)
if before_scripts:
for script in before_scripts:
self.before_scripts.append(script)
if after_scripts:
for script in after_scripts:
self.after_scripts.append(script)
@property
def scripts(self):
"""
Deprecated - use before/after vars
:return:
"""
return {"before": self.before_scripts, "after": self.after_scripts}
@property
def fake_update_qualifier_map(self):
column_strategies = {}
for table_strategy in self.table_strategies:
if table_strategy.strategy_type == TableStrategyTypes.UPDATE_COLUMNS:
for column_strategy in table_strategy.column_strategies:
if (
column_strategy.strategy_type
== UpdateColumnStrategyTypes.FAKE_UPDATE
):
column_strategies[column_strategy.qualifier] = column_strategy
return column_strategies
@property
def all_column_strategies(self):
column_strategies = {}
for table_strategy in self.table_strategies:
if table_strategy.strategy_type == TableStrategyTypes.UPDATE_COLUMNS:
column_strategies += table_strategy.column_strategies
return column_strategies
def get_all_column_strategies(self):
"""LEGACY: move to property"""
return self.all_column_strategies
| 33.526316
| 87
| 0.649398
|
b05b1e97c84a5dcf934f3f1cd7019f221df8a019
| 5,067
|
py
|
Python
|
Examples/es_helpers.py
|
julian-risch/CHIIR2021-Resource
|
99d626a4aa7ef8ed1476dc5c0ee087ed1857bafc
|
[
"MIT"
] | 11
|
2021-01-04T19:52:13.000Z
|
2022-03-22T17:34:54.000Z
|
Examples/es_helpers.py
|
julian-risch/CHIIR2021-Resource
|
99d626a4aa7ef8ed1476dc5c0ee087ed1857bafc
|
[
"MIT"
] | 3
|
2021-07-02T18:38:34.000Z
|
2021-08-09T20:46:33.000Z
|
Examples/es_helpers.py
|
julian-risch/PatentMatch
|
99d626a4aa7ef8ed1476dc5c0ee087ed1857bafc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 13:45:09 2017
@author: Samuele Garda
"""
import json
import glob
import logging
import argparse
import requests
import elasticsearch
from elasticsearch import Elasticsearch,helpers
from datetime import datetime
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(module)s: %(message)s', level = 'INFO')
def parse_arguments():
"""
Parse options for functions.
"""
parser = argparse.ArgumentParser(description='Tool for managing Elasticsearch indices')
subparsers = parser.add_subparsers()
create = subparsers.add_parser('create', help = 'Create Elasticsearch index')
create.add_argument('-i','--index', required = True,help = 'Name of the new index')
create.add_argument('-m','--mappings',type=argparse.FileType('r'),required = True,help = 'File where mappings configuration is store for the index')
delete = subparsers.add_parser('delete', help = 'Delete one or more Elasticsearch indices')
delete.add_argument('-e','--erase', nargs = '*', required = True,help = 'Name of index to delete')
index = subparsers.add_parser('index', help = 'Index JSON files')
index.add_argument('-d','--dir',required = True, help = 'Directory where the JSON files are stored. Be sure that in this path to the Spider folder!!!')
index.add_argument('-l','--location',required = True, help = 'Name of the index where to index JSON files. If index do not exist it is created' )
index.add_argument('-t','--item-type',required = True, help = 'Name of type to be stored in ES index' )
index.add_argument('-c','--chunk-size', type = int, nargs='?', default = 500 , help ='Number of JSON line to load into memory before indexing')
reindex = subparsers.add_parser('reindex', help='Reindex index')
reindex.add_argument('-s','--source', required = True, help='Source index where documents are stored')
reindex.add_argument('-t','--target',required = True, help = 'Target index where to move documents')
args = parser.parse_args()
return args
def create_index(es,index_name, body):
if not es.indices.exists(index_name):
es.indices.create(index = index_name, body = body)
def delete_indices(es,indices_name):
for index in indices_name:
if es.indices.exists(index):
es.indices.delete(index = index)
else:
logger.info('Index `{}` not found'.format(index))
def reindex(es,source_index, target_index):
helpers.reindex(es, source_index = source_index, target_index = target_index )
def lazy_indexing(es,path,chunck,index,item_type):
def serialize_json(json_line):
to_null = ['author', 'article_tag','list_of_tags','keywords','news_keywords']
for tag in to_null:
if json_line[tag] == '---':
json_line[tag] = None
if json_line['publication_date'] == '---':
json_line['publication_date'] = datetime.strptime('1900-01-01','%Y-%m-%d')
else:
try:
json_line['publication_date'] = datetime.strptime(json_line['publication_date'], '%d %B %Y').date()
except ValueError:
try:
json_line['publication_date'] = datetime.strptime(json_line['publication_date'].replace('T',' '), '%Y-%m-%d %H:%S')
except ValueError:
pass
return json_line
def lazy_json_load(filename):
with open(filename) as infile:
for line in infile:
json_line = json.loads(line)
formattd_json_line = serialize_json(json_line)
index_action = {
'_index': index,
'_type': item_type,
'_id' : formattd_json_line['url'],
'_source': formattd_json_line
}
yield index_action
files = [file for file in glob.glob(path + '/**/*.json', recursive=True) if not "active.json" in file.split('/')]
logger.info("Fond {0} documents to index".format(len(files)))
for filename in files:
logger.info("Indexing : {}".format(filename))
helpers.bulk(client = es,chunk_size = chunck, actions=lazy_json_load(filename), index= index,doc_type='news_article', stats_only = True)
if __name__ == "__main__":
elasticsearch.connection.http_urllib3.warnings.filterwarnings('ignore')
requests.packages.urllib3.disable_warnings()
args = parse_arguments()
# ES = Elasticsearch(hosts='localhost',verify_certs=False,use_ssl = True, http_auth= "admin:admin")
ES = Elasticsearch(['localhost'])
try:
if args.index:
create_index(es = ES, index_name = args.index, body = args.mappings.read())
except AttributeError:
pass
try:
if args.erase:
delete_indices(es = ES, indices_name = args.erase)
except AttributeError:
pass
try:
if args.source:
reindex(es = ES, source_index= args.source , target_index= args.target)
except AttributeError:
pass
try:
if args.dir:
lazy_indexing(es = ES, path = args.dir, index = args.location, item_type= args.item_type, chunck = args.chunk_size)
except AttributeError:
pass
| 34.944828
| 153
| 0.674561
|
e1da1fd179eaf67063dd723d4f9a795ee13bffd8
| 551
|
py
|
Python
|
appengine/flexible/hello_world/responses.py
|
pgiteam/pythonteaching
|
04102879a05f5d98f4238525d4f2e224b40ce581
|
[
"Apache-2.0"
] | null | null | null |
appengine/flexible/hello_world/responses.py
|
pgiteam/pythonteaching
|
04102879a05f5d98f4238525d4f2e224b40ce581
|
[
"Apache-2.0"
] | null | null | null |
appengine/flexible/hello_world/responses.py
|
pgiteam/pythonteaching
|
04102879a05f5d98f4238525d4f2e224b40ce581
|
[
"Apache-2.0"
] | null | null | null |
from pywebio import *
from pywebio.output import *
from pywebio.input import *
def main():
'''
An interactive web app that takes user's name
and output hello <username> on the webpage
'''
name = input("Hello! We look forward to working with you. What’s your name?")
print("Nice to meet you " + name + "!")
appdescription = input("Please describe the nature of the mobile app you would like to have built? ")
print("OK, thank you " + "!")
appname = input("Have you thought about an app name? Respond Yes or No ")
| 36.733333
| 105
| 0.664247
|
5125f8685f077910c0ce563c8327692cb35387d4
| 4,980
|
py
|
Python
|
main.py
|
Heath123/switch-joycon-animation-linux
|
8dbc7240b9dd55b4a7c7796a2a919546f3a9f44b
|
[
"MIT"
] | 1
|
2021-02-21T03:30:03.000Z
|
2021-02-21T03:30:03.000Z
|
main.py
|
Heath123/switch-joycon-animation-linux
|
8dbc7240b9dd55b4a7c7796a2a919546f3a9f44b
|
[
"MIT"
] | null | null | null |
main.py
|
Heath123/switch-joycon-animation-linux
|
8dbc7240b9dd55b4a7c7796a2a919546f3a9f44b
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication
import simpleaudio as sa
# https://stackoverflow.com/questions/25950049/creating-a-transparent-overlay-with-qt
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
# https://stackoverflow.com/questions/17968267/how-to-make-click-through-windows-pyqt
self.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents, True)
self.setAttribute(QtCore.Qt.WA_NoChildEventsForParent, True)
self.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.WindowStaysOnTopHint | # Puts the animation on top of everything
QtCore.Qt.FramelessWindowHint | # Removes the frame
QtCore.Qt.X11BypassWindowManagerHint # Make it fill the screen and not show up in the taskbar/dock
)
# Transparent background to draw over screen
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.geometry = QtWidgets.qApp.desktop().availableGeometry()
self.height = self.geometry.height()
self.width = self.geometry.width()
self.setGeometry(
QtWidgets.QStyle.alignedRect(
QtCore.Qt.LeftToRight, QtCore.Qt.AlignLeft,
QtCore.QSize(self.width, self.height), # Fill screen
QtWidgets.qApp.desktop().availableGeometry()
))
# Move window to top left corner
self.move(0, 0)
# The width of the animation thing
self.bar_width = 50
# How far it gets to the bottom
self.offset_from_bottom = int(self.height * 0.03)
# https://www.learnpyqt.com/tutorials/qpropertyanimation/
self.animationWidgets = {}
self.animationWidgets["left"] = QtWidgets.QWidget(self)
self.animationWidgets["right"] = QtWidgets.QWidget(self)
self.animations = {}
for anim_details in [{"name": "left_attach", "side": "left"}, {"name": "right_attach", "side": "right"}]:
# Create a widget for each animation
# TODO: Hide widgets when not in use instead of moving off screen
widget = self.animationWidgets[anim_details["side"]]
# Create animations
# TODO: Add comments here
first_part = QtCore.QPropertyAnimation(widget, b"pos")
if anim_details["side"] == "left":
first_part.setEndValue(QtCore.QPoint(self.bar_width - 100, -self.offset_from_bottom))
else:
first_part.setEndValue(QtCore.QPoint(self.width - self.bar_width, -self.offset_from_bottom))
first_part.setDuration(233)
first_part.setEasingCurve(QtCore.QEasingCurve.OutCubic)
second_part = QtCore.QPropertyAnimation(widget, b"pos")
if anim_details["side"] == "left":
second_part.setEndValue(QtCore.QPoint(-100, -self.offset_from_bottom))
else:
second_part.setEndValue(QtCore.QPoint(self.width, -self.offset_from_bottom))
second_part.setDuration(1000)
second_part.setEasingCurve(QtCore.QEasingCurve.OutCubic)
anim_group = QtCore.QSequentialAnimationGroup()
anim_group.addAnimation(first_part)
anim_group.addAnimation(second_part)
self.animations[anim_details["name"]] = anim_group
def playAnimation(self, side, anim_name, colour):
# Set styles
self.animationWidgets[side].setStyleSheet("background-color: rgba(255, 255, 255, 50); border: 15px solid " +
colour + "; border-radius: 25px;")
self.animationWidgets[side].resize(100, self.height)
if side == "left":
self.animationWidgets[side].move(self.bar_width - 100, -self.height)
else:
self.animationWidgets[side].move(self.width - self.bar_width, -self.height)
# https://simpleaudio.readthedocs.io/en/latest/
wave_obj = sa.WaveObject.from_wave_file("sounds/" + anim_name + ".wav")
wave_obj.play()
self.animations[anim_name].start()
# Just test functions, not that clean
def afterOneSecond(self):
self.playAnimation("right", "right_attach", "rgb(239, 43, 41)")
def afterThreeSeconds(self):
self.playAnimation("left", "left_attach", "rgb(27, 202, 226)")
def afterFiveSeconds(self):
self.playAnimation("left", "left_attach", "rgb(27, 202, 226)")
self.playAnimation("right", "right_attach", "rgb(239, 43, 41)")
if __name__ == '__main__':
app = QApplication(sys.argv)
mywindow = MainWindow()
mywindow.show()
# https://stackoverflow.com/questions/21897322/pyqt-application-load-complete-event
t = QtCore.QTimer()
t.singleShot(1000, mywindow.afterOneSecond)
t.singleShot(3000, mywindow.afterThreeSeconds)
t.singleShot(5000, mywindow.afterFiveSeconds)
sys.exit(app.exec_())
| 40.16129
| 116
| 0.647791
|
7b409e1067ad8ca8aaf15e78341c7deedcb2e33b
| 1,818
|
py
|
Python
|
backend/app/api/endpoints/results.py
|
cloud-bulldozer/ocp_perf_dashboard
|
d050538e9431b724a1963d73ba63aa78e2428942
|
[
"Apache-2.0"
] | null | null | null |
backend/app/api/endpoints/results.py
|
cloud-bulldozer/ocp_perf_dashboard
|
d050538e9431b724a1963d73ba63aa78e2428942
|
[
"Apache-2.0"
] | 5
|
2020-12-14T14:42:41.000Z
|
2021-06-08T19:38:23.000Z
|
backend/app/api/endpoints/results.py
|
mfleader/ocp_perf_dashboard
|
d050538e9431b724a1963d73ba63aa78e2428942
|
[
"Apache-2.0"
] | 3
|
2020-12-13T18:24:25.000Z
|
2020-12-15T15:52:30.000Z
|
import asyncio
from typing import Dict, Iterable
import httpx
from fastapi import APIRouter, Request
from app.services.airflow import AirflowService
from app.services.search import ElasticService
router = APIRouter()
airflow_service = AirflowService()
@router.get("/")
def root(request: Request):
return {
"url" : str(request.url),
"root_path": request.scope.get('root_path')
}
@router.get('/api/results/{pipeline_id}/{job_id}')
async def results_for_job(pipeline_id: str, job_id: str):
query = {
"query": {
"query_string": {
"query": (
f'upstream_job: "{pipeline_id}" '
f'AND upstream_job_build: "{job_id}"')
}
}
}
es = ElasticService()
response = await es.post(query)
await es.close()
tasks = [item['_source'] for item in response["hits"]["hits"]]
tasks_states = await async_tasks_states(tasks)
for task in tasks:
task['job_status'] = tasks_states[task['build_tag']]
return tasks
async def async_tasks_states(tasks: Iterable) -> Dict[str, str]:
async with airflow_service.httpx_client() as session:
tasks_states = await asyncio.gather(
*[call_url(session, task) for task in tasks]
)
return {
task: state for task_state in tasks_states
for task, state in task_state.items()
}
async def call_url(session: httpx.AsyncClient, task) -> Dict[str, str]:
path = (
f"{airflow_service.base_url}/api/v1"
f"/dags/{task['upstream_job']}"
f"/dagRuns/{task['upstream_job_build']}"
f"/taskInstances/{task['build_tag']}"
)
resp = await session.get(path)
resp.raise_for_status()
return {
task['build_tag']: resp.json()['state']
}
| 25.971429
| 71
| 0.617162
|
8fcd2a1a4917ba534e5030c545a33a2b9097eb67
| 4,190
|
py
|
Python
|
indico/modules/rb/operations/admin.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
indico/modules/rb/operations/admin.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
indico/modules/rb/operations/admin.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import datetime, time
from indico.core.db import db
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.core.permissions import get_unified_permissions, update_principals_permissions
from indico.modules.rb.models.equipment import EquipmentType
from indico.modules.rb.models.map_areas import MapArea
from indico.modules.rb.models.room_bookable_hours import BookableHours
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
@no_autoflush
def _populate_room(room, properties):
for prop, value in properties.items():
if prop not in ['available_equipment', 'bookable_hours', 'bookable_periods']:
setattr(room, prop, value)
def update_room_equipment(room, available_equipment_ids):
available_equipment = EquipmentType.query.filter(EquipmentType.id.in_(available_equipment_ids)).all()
room.available_equipment = available_equipment
db.session.flush()
def update_room_attributes(room, attributes):
current_attributes = {x.attribute.name for x in room.attributes}
new_attributes = {attribute['name'] for attribute in attributes}
deleted_attributes = current_attributes - new_attributes
for attribute in attributes:
room.set_attribute_value(attribute['name'], attribute['value'])
for deleted_attribute in deleted_attributes:
room.set_attribute_value(deleted_attribute, None)
db.session.flush()
def update_room_availability(room, availability):
if 'bookable_hours' in availability:
room.bookable_hours.order_by(False).delete()
unique_bh = set((hours['start_time'], hours['end_time']) for hours in availability['bookable_hours'])
db.session.add_all(
[BookableHours(room=room, start_time=hours[0], end_time=hours[1]) for hours in unique_bh])
if 'nonbookable_periods' in availability:
room.nonbookable_periods.order_by(False).delete()
unique_nbp = set((period['start_dt'], period['end_dt']) for period in availability['nonbookable_periods'])
db.session.add_all(
[NonBookablePeriod(room=room, start_dt=datetime.combine(period[0], time(0, 0)),
end_dt=datetime.combine(period[1], time(23, 59))) for period in unique_nbp])
def update_room(room, args):
acl_entries = args.pop('acl_entries', None)
if acl_entries:
current = {e.principal: get_unified_permissions(e) for e in room.acl_entries}
update_principals_permissions(room, current, acl_entries)
_populate_room(room, args)
db.session.flush()
def create_area(bounds, name, default=False):
top, bottom = bounds['north_east'], bounds['south_west']
if default:
MapArea.query.update({MapArea.is_default: False}, synchronize_session='fetch')
new_area = MapArea()
new_area.name = name
new_area.is_default = default
new_area.top_left_latitude = top['lat']
new_area.top_left_longitude = top['lng']
new_area.bottom_right_latitude = bottom['lat']
new_area.bottom_right_longitude = bottom['lng']
db.session.add(new_area)
db.session.flush()
return new_area
def update_area(area_id, area_data):
top = area_data['bounds']['north_east']
bottom = area_data['bounds']['south_west']
map_area = MapArea.get_one(area_id)
if 'name' in area_data:
map_area.name = area_data['name']
if 'default' in area_data:
if area_data['default']:
MapArea.query.update({MapArea.is_default: False}, synchronize_session='fetch')
map_area.is_default = area_data['default']
map_area.top_left_latitude = top['lat']
map_area.top_left_longitude = top['lng']
map_area.bottom_right_latitude = bottom['lat']
map_area.bottom_right_longitude = bottom['lng']
db.session.flush()
def delete_areas(area_ids):
MapArea.query.filter(MapArea.id.in_(area_ids)).delete(synchronize_session='fetch')
db.session.flush()
| 39.528302
| 114
| 0.730072
|
634e877667dde0868ce41a1bd212037392438cb1
| 1,552
|
py
|
Python
|
src/forest/git_tools/__init__.py
|
ADVRHumanoids/forest
|
22995b7bebf9809d49b0887dcb4a35c907fb3e13
|
[
"MIT"
] | null | null | null |
src/forest/git_tools/__init__.py
|
ADVRHumanoids/forest
|
22995b7bebf9809d49b0887dcb4a35c907fb3e13
|
[
"MIT"
] | 6
|
2022-02-24T14:00:39.000Z
|
2022-03-31T14:35:18.000Z
|
src/forest/git_tools/__init__.py
|
ADVRHumanoids/forest
|
22995b7bebf9809d49b0887dcb4a35c907fb3e13
|
[
"MIT"
] | null | null | null |
import typing
from forest.common import proc_utils
import shutil
class GitTools:
def __init__(self, srcdir) -> None:
self.srcdir = srcdir
def clone(self,
server: str,
repository: str,
tag: str,
proto='ssh',
recursive=False,
depth=None,
single_branch=False):
if proto == 'ssh':
addr = f'git@{server}:{repository}'
elif proto == 'https':
addr = f'https://{server}/{repository}'
else:
# TODO more specific exception
raise ValueError(f'unsupported protocol "{proto}"')
# create command
cmd = ['git', 'clone', '--branch', tag]
if single_branch:
cmd.append('--single-branch')
if recursive:
cmd.append('--recursive')
if depth is not None:
cmd.extend(['--depth', depth])
cmd.extend([addr, self.srcdir])
# clone, and delete the source folder on failure
# (either exception or git returns != 0)
try:
clone_ok = proc_utils.call_process(args=cmd)
if not clone_ok:
self.rm()
except BaseException as e:
# remove src and re-raise exception
self.rm()
raise e
return clone_ok
def checkout(self, tag):
return proc_utils.call_process(['git', 'checkout', tag], cwd=self.srcdir)
def rm(self):
shutil.rmtree(self.srcdir, ignore_errors=True)
| 25.442623
| 81
| 0.528351
|
a1287b29f9b150a1a62cc043358f54b1a48871a8
| 3,595
|
py
|
Python
|
qsdsan/__init__.py
|
philthestone/QSDsan
|
50a9f7ba5f24aca653c999fe0a8c52f940287fa7
|
[
"Unlicense"
] | null | null | null |
qsdsan/__init__.py
|
philthestone/QSDsan
|
50a9f7ba5f24aca653c999fe0a8c52f940287fa7
|
[
"Unlicense"
] | null | null | null |
qsdsan/__init__.py
|
philthestone/QSDsan
|
50a9f7ba5f24aca653c999fe0a8c52f940287fa7
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
Yalin Li <zoe.yalin.li@gmail.com>
Joy Zhang <joycheung1994@gmail.com>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt
for license details.
'''
# Check system environment, Python 3.7 and below will have issues unpickling saved results
import sys
py_version = sys.version.split('.')
_PY_MAJOR, _PY_MINOR = int(py_version[0]), int(py_version[1])
if (_PY_MAJOR, _PY_MINOR) <= (3, 7): # pragma: no cover
from warnings import warn
if (_PY_MAJOR, _PY_MINOR) >= (3, 5):
try: import pickle5 as _pk
except ModuleNotFoundError:
warn(f'Python version {_PY_MAJOR}.{_PY_MINOR} does not support Pickle Protocol 5, '
'installing `pickle5` by running `pip install pickle5` in your '
'command/Anaconda prompt or terminal can reduce the loading time.\n'
'For further information, check https://pypi.org/project/pickle5/.')
_pk = None
else:
warn(f'Python version {_PY_MAJOR}.{_PY_MINOR} does not support Pickle Protocol 5, '
'and will have slower speed in when loading the default processes.')
_pk = None
del warn
else:
import pickle as _pk
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('qsdsan').version
except pkg_resources.DistributionNotFound: # pragma: no cover
__version__ = None
del sys, py_version, pkg_resources
import thermosteam as tmo
import biosteam as bst
Chemical = tmo.Chemical
Chemicals = tmo.Chemicals
CompiledChemicals = tmo.CompiledChemicals
Stream = tmo.Stream
MultiStream = tmo.MultiStream
set_thermo = tmo.settings.set_thermo
get_components = tmo.settings.get_chemicals
get_thermo = tmo.settings.get_thermo
PowerUtility = bst.PowerUtility
Unit = bst.Unit
System = bst.System
Scope = bst.utils.Scope
Model = bst.Model
Flowsheet = bst.Flowsheet
main_flowsheet = bst.main_flowsheet
CEPCI = bst.CE # Chemical Engineering Plant Cost Index
CEPCI_by_year = bst.units.design_tools.CEPCI_by_year
del tmo, bst
currency = 'USD'
from . import utils
from ._component import *
from ._components import *
from ._sanstream import *
from ._waste_stream import *
from ._process import *
from ._impact_indicator import *
from ._impact_item import *
from ._construction import *
from ._equipment import *
from ._transportation import *
from ._sanunit import *
from ._simple_tea import *
from ._lca import *
from . import (
_component,
_components,
_sanstream,
_waste_stream,
_process,
_impact_indicator,
_impact_item,
_construction,
_equipment,
_transportation,
_sanunit,
_simple_tea,
_lca,
processes,
equipments,
sanunits,
stats,
)
utils._secondary_importing()
for _slot in utils.doc_examples.__all__:
setattr(utils, _slot, getattr(utils.doc_examples, _slot))
# Add the `pump` decorator to the util module
from .sanunits import wwtpump
utils.__all__ = (*utils.__all__, 'wwtpump')
setattr(utils, 'wwtpump', wwtpump)
__all__ = (
*_component.__all__,
*_components.__all__,
*_sanstream.__all__,
*_waste_stream.__all__,
*_process.__all__,
*_impact_indicator.__all__,
*_impact_item.__all__,
*_construction.__all__,
*_transportation.__all__,
*_equipment.__all__,
*_sanunit.__all__,
*_simple_tea.__all__,
*_lca.__all__,
)
| 27.030075
| 95
| 0.719054
|
5bc168093bf440319c99ed135a699dd0608e8df4
| 1,758
|
py
|
Python
|
Tarea 9/src/rosembrock.py
|
EsauPR/CIMAT-Numerical-Optimization
|
d7e932d4f1a6fe275492c4bc28044ef101ee69cf
|
[
"MIT"
] | null | null | null |
Tarea 9/src/rosembrock.py
|
EsauPR/CIMAT-Numerical-Optimization
|
d7e932d4f1a6fe275492c4bc28044ef101ee69cf
|
[
"MIT"
] | null | null | null |
Tarea 9/src/rosembrock.py
|
EsauPR/CIMAT-Numerical-Optimization
|
d7e932d4f1a6fe275492c4bc28044ef101ee69cf
|
[
"MIT"
] | null | null | null |
""" Rosembrock function """
import numpy as np
def function(x: np.array, n: int = 100) -> float:
""" Compute the evaluation for Extended Rosembrock function with n=100
Args:
x: Array of length=n with x's parameters
n: Rosembrock, n = 100
Returns:
Evaluation of f(X)
"""
ans = 0.0
for i in range(n-1):
ans += 100 * (x[i+1] - x[i]**2)**2 + (1 - x[i])**2
return ans
def gradient(x: np.array, n: int = 100) -> np.array:
""" Compute the gradient evaluation for Extended Rosembrock function with n=2
Args:
x: Array of length=n with x's parameters
n: Rosembrock, n = 100
Returns:
Gradient of f(x1, ..., xn), array with lenght=n
"""
# grad = np.zeros(n, dtype=np.float64)
# for i in range(n-1):
# grad[i] = -400 * x[i+1] * x[i] + 400 * x[i]**3 + 2 * x[i] -2
# grad[n-1] = 200 * (x[n-1] - x[n-2]**2)
# return grad
grad = np.array([-400*(x[1]-x[0]**2)*x[0]-2*(1-x[0])])
for i in range(1, n-1):
grad = np.append(grad, [200*(x[i]-x[i-1]**2)-400*(x[i+1]-x[i]**2)*x[i]-2*(1-x[i])])
grad = np.append(grad, [200*(x[99] - x[98]**2)])
return grad
def hessian(x: np.array, n: int = 100) -> np.array:
""" Compute the Hessian evaluation for Extended Rosembrock function with n=2
Args:
x: Array of length=n with x's parameters
Returns:
Hessian of f(x1, ..., xn), Matrix with size=nxn
"""
hess = np.zeros((n, n), dtype=np.float64)
for i in range(n-1):
hess[i][i] = -400 * x[i+1] + 1200 * x[i]**2 + 2
hess[i][i] += 200 if i != 0 else 0
hess[i][i+1] = hess[i+1][i] = -400 * x[i]
hess[n-1][n-1] = 200.0
return hess
| 29.3
| 91
| 0.517065
|
6d0bd05733ba5a87d322a1b123a37510b959f406
| 158
|
py
|
Python
|
contrib/wallettools/walletunlock.py
|
BowscoinBSC/Bowscoin
|
f7e1dc4f99ca996a8fbda596fe19fdb07ae7f1fa
|
[
"MIT"
] | 1
|
2019-04-30T19:42:42.000Z
|
2019-04-30T19:42:42.000Z
|
contrib/wallettools/walletunlock.py
|
BowscoinBSC/Bowscoin
|
f7e1dc4f99ca996a8fbda596fe19fdb07ae7f1fa
|
[
"MIT"
] | null | null | null |
contrib/wallettools/walletunlock.py
|
BowscoinBSC/Bowscoin
|
f7e1dc4f99ca996a8fbda596fe19fdb07ae7f1fa
|
[
"MIT"
] | 1
|
2018-07-02T12:18:45.000Z
|
2018-07-02T12:18:45.000Z
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:8145")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| 31.6
| 46
| 0.765823
|
e02b47a25c0d3e9918af721f047ccbff830126d5
| 5,985
|
py
|
Python
|
tools/db/replicateDbs.py
|
zachschultz/openwhisk
|
e9d5c505a391fe47585d706521ad991a9b65465d
|
[
"Apache-2.0"
] | null | null | null |
tools/db/replicateDbs.py
|
zachschultz/openwhisk
|
e9d5c505a391fe47585d706521ad991a9b65465d
|
[
"Apache-2.0"
] | null | null | null |
tools/db/replicateDbs.py
|
zachschultz/openwhisk
|
e9d5c505a391fe47585d706521ad991a9b65465d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Python script to replicate and replay databases.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import argparse
import time
import re
import couchdb.client
def retry(fn, retries):
try:
return fn
except:
if (retries > 0):
time.sleep(1)
return retry(fn, retries - 1)
else:
raise
def replicateDatabases(args):
"""Replicate databases."""
sourceDb = couchdb.client.Server(args.sourceDbUrl)
targetDb = couchdb.client.Server(args.targetDbUrl)
excludedDatabases = args.exclude.split(",")
# Create _replicator DB if it does not exist yet.
if "_replicator" not in sourceDb:
sourceDb.create("_replicator")
replicator = sourceDb["_replicator"]
now = int(time.time())
backupPrefix = "backup_%d_" % now
def isExcluded(dbName):
return dbName.replace(args.dbPrefix, "", 1) in excludedDatabases
# Create backup of all databases with given prefix
print("----- Create backups -----")
for db in filter(lambda dbName: dbName.startswith(args.dbPrefix) and not isExcluded(dbName), sourceDb):
backupDb = backupPrefix + db if not args.continuous else 'continuous_' + db
replicateDesignDocument = {
"_id": backupDb,
"source": args.sourceDbUrl + "/" + db,
"target": args.targetDbUrl + "/" + backupDb,
"create_target": True,
"continuous": args.continuous,
}
print("create backup: %s" % backupDb)
filterName = "snapshotFilters"
filterDesignDocument = sourceDb[db].get("_design/%s" % filterName)
if not args.continuous and filterDesignDocument:
replicateDesignDocument["filter"] = "%s/withoutDeletedAndDesignDocuments" % filterName
replicator.save(replicateDesignDocument)
def isBackupDb(dbName):
return re.match("^backup_\d+_" + args.dbPrefix, dbName)
def extractTimestamp(dbName):
return int(dbName.split("_")[1])
def isExpired(timestamp):
return now - args.expires > timestamp
# Delete all documents in the _replicator-database of old backups to avoid that they continue after they are deprecated
print("----- Delete backup-documents older than %d seconds -----" % args.expires)
for doc in filter(lambda doc: isBackupDb(doc.id) and isExpired(extractTimestamp(doc.id)), replicator.view('_all_docs', include_docs=True)):
print("deleting backup document: %s" % doc.id)
# Get again the latest version of the document to delete the right revision and avoid Conflicts
retry(lambda: replicator.delete(replicator[doc.id]), 5)
# Delete all backup-databases, that are older than specified
print("----- Delete backups older than %d seconds -----" % args.expires)
for db in filter(lambda db: isBackupDb(db) and isExpired(extractTimestamp(db)), targetDb):
print("deleting backup: %s" % db)
targetDb.delete(db)
def replayDatabases(args):
"""Replays databases."""
sourceDb = couchdb.client.Server(args.sourceDbUrl)
# Create _replicator DB if it does not exist yet.
if "_replicator" not in sourceDb:
sourceDb.create("_replicator")
for db in filter(lambda dbName: dbName.startswith(args.dbPrefix), sourceDb):
plainDbName = db.replace(args.dbPrefix, "")
(identifier, _) = sourceDb["_replicator"].save({
"source": args.sourceDbUrl + "/" + db,
"target": args.targetDbUrl + "/" + plainDbName,
"create_target": True
})
print("replaying backup: %s -> %s (%s)" % (db, plainDbName, identifier))
parser = argparse.ArgumentParser(description="Utility to create a backup of all databases with the defined prefix.")
parser.add_argument("--sourceDbUrl", required=True, help="Server URL of the source database, that has to be backed up. E.g. 'https://xxx:yyy@domain.couch.com:443'")
parser.add_argument("--targetDbUrl", required=True, help="Server URL of the target database, where the backup is stored. Like sourceDbUrl.")
subparsers = parser.add_subparsers(help='sub-command help')
# Replicate
replicateParser = subparsers.add_parser("replicate", help="Replicates source databases to the target database.")
replicateParser.add_argument("--dbPrefix", required=True, help="Prefix of the databases, that should be backed up.")
replicateParser.add_argument("--expires", required=True, type=int, help="Deletes all backups, that are older than the given value in seconds.")
replicateParser.add_argument("--continuous", action="store_true", help="Wether or not the backup should be continuous")
replicateParser.add_argument("--exclude", default="", help="Comma separated list of database names, that should not be backed up. (Without prefix).")
replicateParser.set_defaults(func=replicateDatabases)
# Replay
replicateParser = subparsers.add_parser("replay", help="Replays source databases to the target database.")
replicateParser.add_argument("--dbPrefix", required=True, help="Prefix of the databases, that should be replayed. Usually 'backup_{TIMESTAMP}_'")
replicateParser.set_defaults(func=replayDatabases)
arguments = parser.parse_args()
arguments.func(arguments)
| 43.686131
| 164
| 0.700418
|
fd53ee81d5f0b1413d6666c6257d229c273d286f
| 4,611
|
py
|
Python
|
single_email_confirmation/models.py
|
ozldmezot/django-single-email-confirmation
|
2b9c771c6399350de7593283bb97004841133bee
|
[
"Unlicense"
] | null | null | null |
single_email_confirmation/models.py
|
ozldmezot/django-single-email-confirmation
|
2b9c771c6399350de7593283bb97004841133bee
|
[
"Unlicense"
] | null | null | null |
single_email_confirmation/models.py
|
ozldmezot/django-single-email-confirmation
|
2b9c771c6399350de7593283bb97004841133bee
|
[
"Unlicense"
] | null | null | null |
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from .signals import (
email_confirmed,
confirmed_email_change_requested,
unconfirmed_email_change_requested,
)
from time import time
from .exceptions import ConfirmationTokenDoesNotExistException
from django.utils.crypto import get_random_string
class EmailAddressManager(models.Manager):
def generate_key(self):
# ensuring its gonna be unique over time
while True:
allowed_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
t = int( time() )
stamp = ''
while t:
stamp += allowed_chars[t % 62]
t = int( t / 62 )
queryset = self.all()
key = get_random_string(34) + str(stamp)
email_address = queryset.filter(key=key).first()
if not email_address:
break
return key
def confirm(self, key):
"Confirm an email address. Returns the address that was confirmed."
queryset = self.all()
email_address = queryset.filter(key=key).first()
if not email_address:
raise ConfirmationTokenDoesNotExistException(key)
email_address.confirmed_at = timezone.now()
email_address.key = None
owner = email_address.owner
owner.set_current_email(email_address.email)
owner._single_email_confirmation_signal = email_confirmed
owner.save()
return owner
class EmailAddress(models.Model):
"An email address belonging to a User"
email = models.EmailField(max_length=255)
key = models.TextField(unique=True, blank=True, null=True)
set_at = models.DateTimeField(
default=timezone.now,
help_text=_('When the confirmation key expiration was set'),
)
confirmed_at = models.DateTimeField(
blank=True, null=True,
help_text=_('First time this email was confirmed'),
)
objects = EmailAddressManager()
@property
def is_confirmed(self):
return self.confirmed_at is not None
def reset_confirmation(self, email=None):
self.key = EmailAddress._default_manager.generate_key()
self.set_at = timezone.now()
if email:
self.email=email
class EmailConfirmationMixin(models.Model):
"""
Mixin to be used with your django 1.9+ custom User model.
Provides python-level functionality only.
"""
"""
Confirmed Email will always be primary email or None, therefore
supplying get_unconfirmed_email, because this can differentiate from
primary email
"""
_single_email_confirmation_signal = None
email_field_name = 'email'
email_address = models.OneToOneField(EmailAddress, related_name='owner', null=True, blank=True)
class Meta:
abstract = True
@property
def email_is_confirmed(self):
email_address = self.email_address
if not email_address:
email_address = EmailAddress()
return email_address.is_confirmed
def get_current_email(self):
return getattr(self, self.email_field_name)
def set_current_email(self, email):
setattr(self, self.email_field_name, email)
def change_email(self, email, commit=True, force=False):
# email was not changed
if self.email == email:
return
email_address = self.email_address
# no metadata available, mixin might have been added later
if not email_address:
email_address = EmailAddress()
# this email is already requested for change
if email == email_address.email and not force:
return
if self.email_is_confirmed:
self._single_email_confirmation_signal = confirmed_email_change_requested
else:
self.email_address = email_address
self.email = email
self._single_email_confirmation_signal = unconfirmed_email_change_requested
email_address.reset_confirmation(email)
if commit:
self.save()
def save(self, *args, **kwargs):
if self.email_address:
self.email_address.save()
self.email_address = self.email_address
saved = super().save(*args, **kwargs)
if self._single_email_confirmation_signal:
self._single_email_confirmation_signal.send(sender=self, email_address=self.email_address)
self._single_email_confirmation_signal = None
return saved
| 29.941558
| 102
| 0.66298
|
7a39f19b4060c906607dc30d4414bf469cf5b226
| 2,032
|
py
|
Python
|
galaxy_utils/sequence/scripts/fastq_paired_end_interlacer.py
|
galaxyproject/sequence_utils
|
f7e8cd163d27cb6c16a86ae63e5912ffe32e92ba
|
[
"CC-BY-3.0"
] | 5
|
2015-10-31T11:28:50.000Z
|
2020-09-08T20:13:48.000Z
|
galaxy_utils/sequence/scripts/fastq_paired_end_interlacer.py
|
galaxyproject/sequence_utils
|
f7e8cd163d27cb6c16a86ae63e5912ffe32e92ba
|
[
"CC-BY-3.0"
] | 22
|
2015-12-09T00:13:48.000Z
|
2020-02-18T12:25:38.000Z
|
galaxy_utils/sequence/scripts/fastq_paired_end_interlacer.py
|
galaxyproject/sequence_utils
|
f7e8cd163d27cb6c16a86ae63e5912ffe32e92ba
|
[
"CC-BY-3.0"
] | 8
|
2015-10-21T13:22:18.000Z
|
2020-02-07T09:54:00.000Z
|
# Florent Angly
import sys
from galaxy_utils.sequence.fastq import (
fastqJoiner,
fastqNamedReader,
fastqReader,
fastqWriter,
)
def main():
mate1_filename = sys.argv[1]
mate1_type = sys.argv[2] or 'sanger'
mate2_filename = sys.argv[3]
mate2_type = sys.argv[4] or 'sanger'
outfile_pairs = sys.argv[5]
outfile_singles = sys.argv[6]
if mate1_type != mate2_type:
print(f"WARNING: You are trying to interlace files of two different types: {mate1_type} and {mate2_type}.")
return
type = mate1_type
joiner = fastqJoiner(type)
nof_singles = 0
nof_pairs = 0
i = None
j = None
out_pairs = fastqWriter(path=outfile_pairs, format=type)
out_singles = fastqWriter(path=outfile_singles, format=type)
mate2_input = fastqNamedReader(path=mate2_filename, format=type)
mate1_input = fastqNamedReader(path=mate1_filename, format=type)
reader1 = fastqReader(path=mate1_filename, format=type)
reader2 = fastqReader(path=mate2_filename, format=type)
with out_pairs, out_singles, mate2_input, mate1_input, reader1, reader2:
# Pairs + singles present in mate1
for i, mate1 in enumerate(reader1):
mate2 = mate2_input.get(joiner.get_paired_identifier(mate1))
if mate2:
out_pairs.write(mate1)
out_pairs.write(mate2)
nof_pairs += 1
else:
out_singles.write(mate1)
nof_singles += 1
# Singles present in mate2
for j, mate2 in enumerate(reader2):
mate1 = mate1_input.get(joiner.get_paired_identifier(mate2))
if not mate1:
out_singles.write(mate2)
nof_singles += 1
if (i is None) and (j is None):
print("Your input files contained no valid FASTQ sequences.")
else:
print(f'There were {nof_singles} single reads.')
print(f'Interlaced {nof_pairs} pairs of sequences.')
if __name__ == "__main__":
main()
| 29.882353
| 115
| 0.639764
|
392982a8b99e3c2d5397722e788f2413752e5a3c
| 2,644
|
py
|
Python
|
observations/r/o_brien_kaiser.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 199
|
2017-07-24T01:34:27.000Z
|
2022-01-29T00:50:55.000Z
|
observations/r/o_brien_kaiser.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 46
|
2017-09-05T19:27:20.000Z
|
2019-01-07T09:47:26.000Z
|
observations/r/o_brien_kaiser.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 45
|
2017-07-26T00:10:44.000Z
|
2022-03-16T20:44:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def o_brien_kaiser(path):
"""O'Brien and Kaiser's Repeated-Measures Data
These contrived repeated-measures data are taken from O'Brien and Kaiser
(1985). The data are from an imaginary study in which 16 female and male
subjects, who are divided into three treatments, are measured at a
pretest, postest, and a follow-up session; during each session, they are
measured at five occasions at intervals of one hour. The design,
therefore, has two between-subject and two within-subject factors.
The contrasts for the `treatment` factor are set to *-2, 1, 1* and *0,
-1, 1*. The contrasts for the `gender` factor are set to
`contr.sum`.
A data frame with 16 observations on the following 17 variables.
`treatment`
a factor with levels `control` `A` `B`
`gender`
a factor with levels `F` `M`
`pre.1`
pretest, hour 1
`pre.2`
pretest, hour 2
`pre.3`
pretest, hour 3
`pre.4`
pretest, hour 4
`pre.5`
pretest, hour 5
`post.1`
posttest, hour 1
`post.2`
posttest, hour 2
`post.3`
posttest, hour 3
`post.4`
posttest, hour 4
`post.5`
posttest, hour 5
`fup.1`
follow-up, hour 1
`fup.2`
follow-up, hour 2
`fup.3`
follow-up, hour 3
`fup.4`
follow-up, hour 4
`fup.5`
follow-up, hour 5
O'Brien, R. G., and Kaiser, M. K. (1985) MANOVA method for analyzing
repeated measures designs: An extensive primer. *Psychological Bulletin*
**97**, 316–333, Table 7.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `o_brien_kaiser.csv`.
Returns:
Tuple of np.ndarray `x_train` with 16 rows and 17 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'o_brien_kaiser.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/car/OBrienKaiser.csv'
maybe_download_and_extract(path, url,
save_file_name='o_brien_kaiser.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 23.81982
| 74
| 0.660741
|
4a63ac2f38cd14fdf0c9d66fba05c871e12d325c
| 972
|
py
|
Python
|
mtaani/urls.py
|
TonyKioko/NeighbourHood
|
617af13967f5dfbff70073475d69d9e7b82479ba
|
[
"MIT"
] | null | null | null |
mtaani/urls.py
|
TonyKioko/NeighbourHood
|
617af13967f5dfbff70073475d69d9e7b82479ba
|
[
"MIT"
] | null | null | null |
mtaani/urls.py
|
TonyKioko/NeighbourHood
|
617af13967f5dfbff70073475d69d9e7b82479ba
|
[
"MIT"
] | null | null | null |
"""mtaani URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('app.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
]
| 36
| 79
| 0.692387
|
58e860899faff9f2918db19780a91d56203275cd
| 1,273
|
py
|
Python
|
tests/normalization_layers_test.py
|
dynastes-team/dynastes
|
931b6d9ac83862eb39c2f5144c95b952e9efcd8e
|
[
"MIT"
] | 7
|
2020-01-18T14:28:04.000Z
|
2021-11-10T16:46:34.000Z
|
tests/normalization_layers_test.py
|
veqtor/dynastes
|
931b6d9ac83862eb39c2f5144c95b952e9efcd8e
|
[
"MIT"
] | null | null | null |
tests/normalization_layers_test.py
|
veqtor/dynastes
|
931b6d9ac83862eb39c2f5144c95b952e9efcd8e
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow_addons.layers.normalizations import GroupNormalization
from dynastes.layers.normalization_layers import AdaptiveMultiNormalization, PoolNormalization2D
def _test_grads(testCase: tf.test.TestCase, func, input):
_, grads = tf.test.compute_gradient(func, input)
for grad in grads:
testCase.assertNotAllClose(grad, np.zeros_like(grad))
to_tensor = tf.convert_to_tensor
normal = np.random.normal
class AdaptiveMultiNormalizationTest(tf.test.TestCase):
def test_simple(self):
normalizers = [
GroupNormalization(groups=1, center=False, scale=False),
GroupNormalization(groups=-1, center=False, scale=False),
PoolNormalization2D(pool_size=(-1, 3))
]
layer = AdaptiveMultiNormalization(layers=normalizers)
x = tf.convert_to_tensor(normal(size=(1, 8, 8, 8)).astype(np.float16))
y = tf.convert_to_tensor(normal(size=(1, 2, 3, 4)).astype(np.float16))
res = layer([x, y])
self.assertShapeEqual(x.numpy(), res)
y = tf.convert_to_tensor(normal(size=(1, 4)).astype(np.float16))
res = layer([x, y])
self.assertShapeEqual(x.numpy(), res)
| 36.371429
| 96
| 0.701493
|
c8171b689ba40cd2d7c00610372ddc8a19475c60
| 23,603
|
py
|
Python
|
tensorflow/python/kernel_tests/relu_op_test.py
|
jnorwood/tensorflow
|
67ab6c9cebc4cbb2103246a1523d04261bef22d2
|
[
"Apache-2.0"
] | 2
|
2020-03-15T12:18:42.000Z
|
2020-03-16T05:28:45.000Z
|
tensorflow/python/kernel_tests/relu_op_test.py
|
jnorwood/tensorflow
|
67ab6c9cebc4cbb2103246a1523d04261bef22d2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/kernel_tests/relu_op_test.py
|
jnorwood/tensorflow
|
67ab6c9cebc4cbb2103246a1523d04261bef22d2
|
[
"Apache-2.0"
] | 1
|
2020-01-09T19:28:49.000Z
|
2020-01-09T19:28:49.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Relu and ReluGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def _elu_grad_grad(activation):
if activation < 0:
return np.exp(activation)
return 0
class ReluTest(test.TestCase):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape))
def testNpRelu(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
self._npRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testRelu(self, np_features):
np_relu = self._npRelu(np_features)
tf_relu = nn_ops.relu(np_features)
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testReluInt8x4GoodShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = np.array([[-50, 7, 23, 0], [-1, -5, 6, 11]])
np_relu = self._npRelu(inputs)
tf_relu = nn_ops.relu(constant_op.constant(inputs, dtypes.qint8))
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
@test_util.disable_xla("b/123338077") # Passes with XLA
def testReluInt8x4BadShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = constant_op.constant(
np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"):
self.evaluate(nn_ops.relu(inputs))
inputs = constant_op.constant(
np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]),
dtypes.qint8)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"):
self.evaluate(nn_ops.relu(inputs))
def testNoElement(self):
self._testRelu(np.array([[], []], dtype=np.float32))
# The gradient test for ReLU is a bit tricky as the derivative is not well
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
print("relu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
# The gradient for fp16 is inaccurate due to the low-precision.
# We compare the fp16 analytical gradient against their fp32 counterpart.
def testGradientFloat16(self):
def grad(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.l2_loss(nn_ops.relu(x))
return tape.gradient(y, x)
def f():
with test_util.use_gpu():
# Randomly construct a 1D shape from [1, 40)
shape = random_ops.random_uniform([1],
minval=1,
maxval=40,
dtype=dtypes.int32)
x32 = random_ops.random_uniform(shape, minval=-1, maxval=1)
x16 = math_ops.cast(x32, dtype=dtypes.float16)
return grad(x32), grad(x16)
# We're going to ensure that the fp16 and fp32 gradients
# are "close" to each other for ~100 random values.
#
# In TensorFlow 1.x, invoking f() (without eager execution enabled)
# would construct a graph. Instead of construct a graph with O(100) nodes,
# we construct a single graph to be executed ~100 times in a Session.
if not tf2.enabled():
d32_tensor, d16_tensor = f()
with self.cached_session() as sess:
f = lambda: sess.run([d32_tensor, d16_tensor])
# Repeat the experiment for 100 times. All tensor shapes and its tensor
# values are randomly generated for each run.
for _ in xrange(100):
d32, d16 = f()
self.assertAllClose(d32, d16, atol=3e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
print("relu (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("relu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("relu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientScalar(self):
x = variables.Variable(100.)
def loss():
return nn_ops.relu(x)**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25)
self.evaluate(variables.global_variables_initializer())
self.evaluate(optimizer.minimize(loss))
self.assertAllClose(x.read_value(), 50.0)
def testGradientNoElement(self):
with self.cached_session():
def f(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray([[], []], dtype=np.float32)
z = list(gradient_checker_v2.compute_gradient(f, [x]))[0][0]
self.assertAllEqual(z, np.reshape(x, (0, 0)))
class Relu6Test(test.TestCase):
def _npRelu6(self, np_features):
sixes = np.copy(np_features)
sixes.fill(6.0)
return np.minimum(
np.maximum(np_features, np.zeros(np_features.shape)), sixes)
def testNpRelu6(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
self._npRelu6(
np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7,
0.9]])))
def _testRelu6(self, np_features):
np_relu6 = self._npRelu6(np_features)
tf_relu6 = nn_ops.relu6(np_features)
self.assertAllClose(np_relu6, tf_relu6)
self.assertShapeEqual(np_relu6, tf_relu6)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float, np.double]:
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
# The gradient test for ReLU6 is a bit tricky as the derivative is
# not well defined at around zero and six and we want to avoid that
# in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
print("relu6 (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
print("relu6 (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
class LeakyReluTest(test.TestCase):
def _npLeakyRelu(self, np_features, alpha=0.1):
return np.maximum(np_features, alpha * np_features)
def testNpLeakyRelu(self):
self.assertAllClose(
np.array([[-0.09, 0.7, -0.05, 0.3, -0.01],
[0.1, -0.03, 0.5, -0.07, 0.9]]),
self._npLeakyRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]]),
alpha=0.1))
def _testLeakyRelu(self, np_features, alpha):
np_leaky_relu = self._npLeakyRelu(np_features, alpha)
tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha)
self.assertAllClose(np_leaky_relu, tf_leaky_relu)
self.assertShapeEqual(np_leaky_relu, tf_leaky_relu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
alpha=0.2)
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
alpha=0.1)
# The gradient test for Leaky ReLU is a bit tricky as the derivative is not
# well defined at around zero and we want to avoid that in terms of input
# values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
print("leaky_relu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
print("leaky_relu (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradGradFloat32(self):
with compat.forward_compatibility_horizon(2018, 11, 2):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.leaky_relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("leaky_relu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with compat.forward_compatibility_horizon(2018, 11, 2):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.leaky_relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("leaky_relu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientScalar(self):
x = variables.Variable(-100.)
def loss():
return nn_ops.leaky_relu(x, 0.05)**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.2)
self.evaluate(variables.global_variables_initializer())
self.evaluate(optimizer.minimize(loss))
self.assertAllClose(x.read_value(), -99.9)
class EluTest(test.TestCase):
def _npElu(self, np_features):
return np.where(np_features < 0, np.exp(np_features) - 1, np_features)
def testNpElu(self):
self.assertAllClose(
np.array([[-0.59343034025, 0.7, -0.39346934028, 0.3, -0.09516258196],
[0.1, -0.25918177931, 0.5, -0.5034146962, 0.9]]),
self._npElu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testElu(self, np_features):
np_elu = self._npElu(np_features)
tf_elu = nn_ops.elu(np_features)
self.assertAllClose(np_elu, tf_elu)
self.assertShapeEqual(np_elu, tf_elu)
def testNumbersCPU(self):
for t in [np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testElu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testElu(np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testGradientFloat32(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
print("elu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
print("elu (float64) gradient err = ", err)
self.assertLess(err, 1e-6)
def testGradGrad(self):
with self.cached_session():
def f(x):
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
y = nn_ops.elu(x)
dy = tape.gradient(y, x)
return tape.gradient(dy, x)
for x in [-1., -0.5, 0.5, 1.]:
got = self.evaluate(f(constant_op.constant(x)))
want = _elu_grad_grad(x)
err = np.abs(got - want)
self.assertLess(err, 1e-4)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.elu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("elu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.elu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("elu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-6)
class SeluTest(test.TestCase):
def _npSelu(self, np_features):
scale = 1.0507009873554804934193349852946
scale_alpha = 1.7580993408473768599402175208123
return np.where(np_features < 0, scale_alpha * (np.exp(np_features) - 1),
scale * np_features)
def testNpSelu(self):
self.assertAllClose(
np.array([[-1.0433095, 0.73549069, -0.6917582, 0.3152103, -0.16730527],
[0.1050701, -0.45566732, 0.5253505, -0.88505305, 0.9456309]]),
self._npSelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testSelu(self, np_features):
np_selu = self._npSelu(np_features)
tf_selu = nn_ops.selu(np_features)
self.assertAllClose(np_selu, tf_selu)
self.assertShapeEqual(np_selu, tf_selu)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
# Force executed on CPU in case GPU kernels are available.
with ops.device("/device:CPU:0"):
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testGradientFloat32(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
print("selu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
print("selu (float64) gradient err = ", err)
self.assertLess(err, 1e-6)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.selu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("selu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.selu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
print("selu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-6)
class CreluTest(test.TestCase):
def testCreluShape(self):
f = random_ops.random_normal([50, 5, 7, 10])
t = nn_ops.crelu(f)
self.assertEqual([50, 5, 7, 20], t.get_shape())
def _testCrelu(self, np_features):
np_relu = np.maximum(np_features, np.zeros_like(np_features))
np_neg_relu = np.maximum(-np_features, np.zeros_like(np_features))
np_crelu = np.concatenate((np_relu, np_neg_relu),
len(np_features.shape) - 1)
tf_crelu = nn_ops.crelu(np_features)
self.assertAllClose(np_crelu, tf_crelu)
self.assertShapeEqual(np_crelu, tf_crelu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersWithAxis0(self):
tf_crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=0)
np_crelu = np.array([[0, 7, 0, 3, 0], [1, 0, 5, 0, 9], [9, 0, 5, 0, 1],
[0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_crelu)
def testNumbersWithAxis1(self):
tf_crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=1)
np_crelu = np.array([[0, 7, 0, 3, 0, 9, 0, 5, 0, 1],
[1, 0, 5, 0, 9, 0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_crelu)
if __name__ == "__main__":
test.main()
| 36.20092
| 80
| 0.600983
|
6b14a64c77e2bed8d04d68d8c414d6a5d7a5ffd9
| 1,830
|
py
|
Python
|
shop/admin/defaults/customer.py
|
taime/django-shop
|
19e2ebd5f7da584fbf2e8bab8984b41e11979fc2
|
[
"BSD-3-Clause"
] | 39
|
2015-02-21T00:45:02.000Z
|
2020-05-18T14:46:09.000Z
|
shop/admin/defaults/customer.py
|
taime/django-shop
|
19e2ebd5f7da584fbf2e8bab8984b41e11979fc2
|
[
"BSD-3-Clause"
] | 46
|
2015-02-03T19:51:37.000Z
|
2017-03-24T23:40:14.000Z
|
shop/admin/defaults/customer.py
|
taime/django-shop
|
19e2ebd5f7da584fbf2e8bab8984b41e11979fc2
|
[
"BSD-3-Clause"
] | 23
|
2015-04-12T09:03:41.000Z
|
2020-04-14T16:38:35.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.utils.html import format_html_join
from django.utils.translation import ugettext_lazy as _
from shop.admin.customer import CustomerProxy, CustomerInlineAdminBase, CustomerAdminBase
class CustomerInlineAdmin(CustomerInlineAdminBase):
fieldsets = (
(None, {'fields': ('get_number', 'salutation')}),
(_("Addresses"), {'fields': ('get_shipping_addresses', 'get_billing_addresses')})
)
readonly_fields = ('get_number', 'get_shipping_addresses', 'get_billing_addresses')
def get_number(self, customer):
return customer.get_number() or '–'
get_number.short_description = _("Customer Number")
def get_shipping_addresses(self, customer):
addresses = [(a.as_text(),) for a in customer.shippingaddress_set.all()]
return format_html_join('', '<address>{0}</address>', addresses)
get_shipping_addresses.short_description = _("Shipping")
def get_billing_addresses(self, customer):
addresses = [(a.as_text(),) for a in customer.billingaddress_set.all()]
return format_html_join('', '<address>{0}</address>', addresses)
get_billing_addresses.short_description = _("Billing")
@admin.register(CustomerProxy)
class CustomerAdmin(CustomerAdminBase):
inlines = (CustomerInlineAdmin,)
def get_list_display(self, request):
list_display = list(super(CustomerAdmin, self).get_list_display(request))
list_display.insert(1, 'salutation')
return list_display
def salutation(self, user):
if hasattr(user, 'customer'):
return user.customer.get_salutation_display()
return ''
salutation.short_description = _("Salutation")
salutation.admin_order_field = 'customer__salutation'
| 38.125
| 89
| 0.716393
|
0b4b4aeed0f5c123f17f68bbfedda6fa9e10b19a
| 4,228
|
py
|
Python
|
lambda/subscriber.py
|
yuya-mizuki771/bootcamp-2021-sample
|
70d8953723fda494ac309c347d3b05ae13b4164a
|
[
"Apache-2.0"
] | null | null | null |
lambda/subscriber.py
|
yuya-mizuki771/bootcamp-2021-sample
|
70d8953723fda494ac309c347d3b05ae13b4164a
|
[
"Apache-2.0"
] | null | null | null |
lambda/subscriber.py
|
yuya-mizuki771/bootcamp-2021-sample
|
70d8953723fda494ac309c347d3b05ae13b4164a
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Union
import uuid
import boto3
import os
import json
DENIAL_MESSAGE_UPPER = """ご連絡ありがとうございます。
申し訳ないのですが、ご連絡頂いた商品({})の価格({}円)では、
当方の予算({}円)を超過しております。
できれば今後は当方の要件にあった情報をお渡しいただきたく思います。
"""
DENIAL_MESSAGE_LOWER = """ご連絡ありがとうございます。
ご連絡頂いた商品({})の価格({}円)では、
当方の希望する価格({}円以上)よりも安価に過ぎます。
もっと付加価値の伴ったものを提案いただきたく考えています。
"""
ALLOWANCE_MESSAGE = """ご連絡ありがとうございます。
ご連絡頂いた商品({})ですが、
当方の希望する価格とマッチしており、
より詳細な話をお聞きしたく考えています。
"""
def lambda_handler(event: dict, context):
s3 = boto3.resource('s3')
feedback_upload_bucket = os.environ.get("FEEDBACK_UPLOAD_BUCKET")
records = event.get('Records')
for record in records:
try:
body = json.loads(record.get('body'))
message = json.loads(body.get('Message'))
except Exception as e:
print(e)
exit(1)
try:
result = generate_feedback_message(message)
obj = s3.Object(feedback_upload_bucket, str(uuid.uuid4()))
obj.put(Body=json.dumps(message, ensure_ascii=False))
except Exception as e:
print(e)
exit(1)
return {
'statusCode': 200
}
def generate_feedback_message(message: Dict[str, Union[int, str]]) -> str:
v = os.environ.get("PRICE_UPPER_LIMIT")
price_upper_limit = int(v) if v is not None else None
v = os.environ.get("PRICE_LOWER_LIMIT")
price_lower_limit = int(v) if v is not None else None
price = int(message.get('price'))
name = message.get('name')
if price_upper_limit is not None and price > price_upper_limit:
return DENIAL_MESSAGE_UPPER.format(name, price, price_upper_limit)
if price_lower_limit is not None and price < price_lower_limit:
return DENIAL_MESSAGE_LOWER.format(name, price, price_lower_limit)
return ALLOWANCE_MESSAGE.format(name)
if __name__ == '__main__':
event = {
'Records': [
{
'messageId': '40886b30-b88d-496a-be40-be9417f9e9b6',
'receiptHandle': 'AQEBa0uGT1hacqAzfBrLKAUicFyIedg5xV5Z074uMpc4lsWfM0xSLhUteh3OvKbKk79tFHz1SjsErEyy5a8KVhpHEwnAJGJ4/B4zTIzsZr8ngFQu1QzrT0iVCWkkYIosGIQL+uk+KQ4hBceO8QXHvhy0wfWS1HeGCqqgmzcEDRfxBqxXL/5xwWjLVBN1z6/dmlVNc58VvPr5f+DtvRoPgPwo1KrwzGrmDHSB1jz5ElBpwd4QSfBPT8E+Rl7JGOk4uvPUKZpmcXoCnxwD1Sw5ZYyR8HCfA+cXPXL+2zwYbappruuTj5H9EYTawoR+4CAMti04spfAhBUOakgMOWC2NqIxVRYhkSvSJoRC/HZBx+Bspgbh3HKgqrVFJDDGyYlsVr8Cy4TQOqaWpExMwOV3CPKopCHroV8D62oAa+DCbuAB9b8=',
'body': '{\n "Type" : "Notification",\n "MessageId" : "198b74d7-8b7e-5d63-a232-2c6c64914c58",\n "TopicArn" : "arn:aws:sns:ap-northeast-1:354384904427:advertisement-destribution-topic",\n "Subject" : "sample-message",\n "Message" : "{\\n \\"price\\": 25000000,\\n \\"name\\": \\"シーガルズグランドガーデン(架空)\\",\\n \\t\\"message\\": \\"適切なマンションポエムをここに入れる\\" \\n}",\n "Timestamp" : "2021-03-05T08:37:03.334Z",\n "SignatureVersion" : "1",\n "Signature" : "kHR94W5bP6HT8N+hM72I4VEKgev0IvtbIJUJl26JBM7EqzhuTb0P8Ba5yrZ/Bo0oyLtjPH4fpiTJ44I3CJoCG3uAsLvx+UvfHwBC+bcG3yt5YjbTbOMxSDGB4g2xCSTKg+um21fzNmCJp46F12NT15oIQy8P7T4JhxhRN3lmed1KXex3Iw/uqnHSYtTTynoAnZsnkfwCKsYU4ho0UChP1gZ8h7QqLAQXwKfoFEUgd3Ruqq30PxDTviUCKJw9k2W/xfKZZCEhPPQDQfbMSLVAt07OZ9/jJzpe3HbYg/DI87cIPyZpLqCX7YD9j+5hM1Hs8lYAXv+Cb5f6nkKhXwa7fg==",\n "SigningCertURL" : "https://sns.ap-northeast-1.amazonaws.com/SimpleNotificationService-010a507c1833636cd94bdb98bd93083a.pem",\n "UnsubscribeURL" : "https://sns.ap-northeast-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:ap-northeast-1:354384904427:advertisement-destribution-topic:c38449ff-9a09-43cc-aa26-0dcc66f45e3d"\n}',
'attributes': {
'ApproximateReceiveCount': '2', 'SentTimestamp': '1614933423396',
'SenderId': 'AIDAIERWYNSNBY7YRB6SY',
'ApproximateFirstReceiveTimestamp': '1614933661442'
},
'messageAttributes': {},
'md5OfBody': '0a1f3f8f25ec8cfc77d92fec505b2704',
'eventSource': 'aws:sqs',
'eventSourceARN': 'arn:aws:sqs:ap-northeast-1:354384904427:subscriber-a-queue',
'awsRegion': 'ap-northeast-1'}
]
}
lambda_handler(event, None)
| 46.461538
| 1,161
| 0.700095
|
93ac61e5eb75eed7a76eac6b48274011d13a502f
| 1,928
|
py
|
Python
|
mpikat/effelsberg/edd/pipeline/test/testHDFpipeline.py
|
TobiasWinchen/mpikat
|
46ea86d6861bcfd924daad7058de8a898ee6c6a1
|
[
"MIT"
] | null | null | null |
mpikat/effelsberg/edd/pipeline/test/testHDFpipeline.py
|
TobiasWinchen/mpikat
|
46ea86d6861bcfd924daad7058de8a898ee6c6a1
|
[
"MIT"
] | null | null | null |
mpikat/effelsberg/edd/pipeline/test/testHDFpipeline.py
|
TobiasWinchen/mpikat
|
46ea86d6861bcfd924daad7058de8a898ee6c6a1
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division, unicode_literals
from mpikat.effelsberg.edd.pipeline.edd_hdf_pipeline import EDDHDF5WriterPipeline
from katcp import FailReply
import unittest
import tornado.testing
import tornado.gen
import tempfile
import shutil
import logging
#
#class TestHDFPipeline(tornado.testing.AsyncTestCase):
#
# #def setUp(self):
# # super(TestHDFPipeline, self).setUp()
# # print(self.datadir)
#
#
# @tornado.testing.gen_test
# def test_sequence(self):
# pipeline = EDDHDF5WriterPipeline("localhost", 1234)
# self.datadir = tempfile.mkdtemp()
#
#
# self.assertEqual(pipeline.state, 'idle')
#
# #js = '{"output_directory":"' + self.datadir + '"}'
# #print(js)
#
# yield pipeline.configure()
# self.assertEqual(pipeline.state, 'configured')
#
# yield pipeline.capture_start()
# self.assertEqual(pipeline.state, 'ready')
#
# yield pipeline.measurement_prepare()
# self.assertEqual(pipeline.state, 'set')
#
# # Ignore mesaurement start, stop prepare
# yield pipeline.measurement_start()
# self.assertEqual(pipeline.state, 'measuring')
#
# yield pipeline.measurement_stop()
# self.assertEqual(pipeline.state, 'ready')
#
# # This test needs to be available,as otherwise on successfull test
# # pycoverage wont exit
# yield pipeline.deconfigure()
# self.assertEqual(pipeline.state, 'idle')
#
#
## @tornado.testing.gen_test(timeout=120)
## def test_measurement_prepare_prefix(self):
## super(TestHDFPipeline, self).setUp()
## pass
## #shutil.rmtree(self.datadir)
#
#
#
#if __name__ == '__main__':
# logging.basicConfig(filename='debug.log',
# format=("[ %(levelname)s - %(asctime)s - %(name)s "
# "- %(filename)s:%(lineno)s] %(message)s"),
# level=logging.DEBUG)
# unittest.main()
| 29.661538
| 82
| 0.653008
|
6b042c3228cb2d5350cc5e32d4f2f3e348f381f0
| 1,204
|
py
|
Python
|
comp0037_planner_controller/scripts/map_getter.py
|
tharmee99/COMP0037_CW1_Path-Planning-in-a-known-world
|
6c7e56b865743f3250cddf9a7e1f19f9fdcb98e1
|
[
"BSD-3-Clause"
] | null | null | null |
comp0037_planner_controller/scripts/map_getter.py
|
tharmee99/COMP0037_CW1_Path-Planning-in-a-known-world
|
6c7e56b865743f3250cddf9a7e1f19f9fdcb98e1
|
[
"BSD-3-Clause"
] | null | null | null |
comp0037_planner_controller/scripts/map_getter.py
|
tharmee99/COMP0037_CW1_Path-Planning-in-a-known-world
|
6c7e56b865743f3250cddf9a7e1f19f9fdcb98e1
|
[
"BSD-3-Clause"
] | 1
|
2020-02-16T22:56:25.000Z
|
2020-02-16T22:56:25.000Z
|
#!/usr/bin/env python
import sys
import rospy
from nav_msgs.srv import GetMap
from comp0037_planner_controller.occupancy_grid import OccupancyGrid
from comp0037_planner_controller.search_grid import SearchGrid
# This class pings the map server and gets the map.
class MapGetter(object):
def __init__(self):
rospy.loginfo('Waiting for static_map to become available.')
# print "Hello1"
rospy.wait_for_service('static_map')
# print "Hello2"
self.mapServer = rospy.ServiceProxy('static_map', GetMap)
# print "Hello3"
def getMapFromServer(self):
# print "starting"
resp = self.mapServer()
# rospy.logerr(resp)
# print "got from server"
occupancyGrid = OccupancyGrid(resp.map.info.width, resp.map.info.height, resp.map.info.resolution)
occupancyGrid.setScale(rospy.get_param('plan_scale', 5))
# print "make grid"
occupancyGrid.setFromDataArrayFromMapServer(resp.map.data)
# occupancyGrid.expandObstaclesToAccountForCircularRobotOfRadius(0.2)
return occupancyGrid
if __name__ == '__main__':
mapGetter = MapGetter()
mapGetter.getMapFromServer()
| 30.871795
| 106
| 0.696013
|
8c3da3520adac1967cecb90c8de0fe72d4635869
| 8,064
|
py
|
Python
|
libqtile/ipc.py
|
armoutihansen/qtile
|
5bcb2afcd1f5b98b10987a796c4c7d7f36d97340
|
[
"MIT"
] | null | null | null |
libqtile/ipc.py
|
armoutihansen/qtile
|
5bcb2afcd1f5b98b10987a796c4c7d7f36d97340
|
[
"MIT"
] | null | null | null |
libqtile/ipc.py
|
armoutihansen/qtile
|
5bcb2afcd1f5b98b10987a796c4c7d7f36d97340
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A simple IPC mechanism for communicating between two local processes. We
use marshal to serialize data - this means that both client and server must
run the same Python version, and that clients must be trusted (as
un-marshalling untrusted data can result in arbitrary code execution).
"""
import asyncio
import fcntl
import json
import marshal
import os.path
import socket
import struct
from typing import Any, Optional, Tuple
from libqtile.log_utils import logger
from libqtile.utils import get_cache_dir
HDRFORMAT = "!L"
HDRLEN = struct.calcsize(HDRFORMAT)
SOCKBASE = "qtilesocket.%s"
def find_sockfile(display: str = None):
"""Finds the appropriate socket file for the given display"""
display = display or os.environ.get("DISPLAY") or ":0.0"
if "." not in display:
display += ".0"
cache_directory = get_cache_dir()
return os.path.join(cache_directory, SOCKBASE % display)
class IPCError(Exception):
pass
class _IPC:
"""A helper class to handle properly packing and unpacking messages"""
@staticmethod
def unpack(data: bytes, *, is_json: Optional[bool] = None) -> Tuple[Any, bool]:
"""Unpack the incoming message
Parameters
----------
data : bytes
The incoming message to unpack
is_json : Optional[bool]
If the message should be unpacked as json. By default, try to
unpack json and fallback gracefully to marshalled bytes.
Returns
-------
Tuple[Any, bool]
A tuple of the unpacked object and a boolean denoting if the
message was deserialized using json. If True, the return message
should be packed as json.
"""
if is_json is None or is_json:
try:
return json.loads(data.decode()), True
except ValueError as e:
if is_json:
raise IPCError("Unable to decode json data") from e
try:
assert len(data) >= HDRLEN
size = struct.unpack(HDRFORMAT, data[:HDRLEN])[0]
assert size >= len(data[HDRLEN:])
return marshal.loads(data[HDRLEN:HDRLEN + size]), False
except AssertionError as e:
raise IPCError(
"error reading reply! (probably the socket was disconnected)"
) from e
@staticmethod
def pack(msg: Any, *, is_json: bool = False) -> bytes:
"""Pack the object into a message to pass"""
if is_json:
json_obj = json.dumps(msg)
return json_obj.encode()
msg_bytes = marshal.dumps(msg)
size = struct.pack(HDRFORMAT, len(msg_bytes))
return size + msg_bytes
class Client:
def __init__(self, socket_path: str, is_json=False) -> None:
"""Create a new IPC client
Parameters
----------
socket_path : str
The file path to the file that is used to open the connection to
the running IPC server.
is_json : bool
Pack and unpack messages as json
"""
self.socket_path = socket_path
self.is_json = is_json
def call(self, data: Any) -> Any:
return self.send(data)
def send(self, msg: Any) -> Any:
"""Send the message and return the response from the server
If any exception is raised by the server, that will propogate out of
this call.
"""
return asyncio.run(self.async_send(msg))
async def async_send(self, msg: Any) -> Any:
"""Send the message to the server
Connect to the server, then pack and send the message to the server,
then wait for and return the response from the server.
"""
try:
reader, writer = await asyncio.wait_for(
asyncio.open_unix_connection(path=self.socket_path), timeout=3
)
except (ConnectionRefusedError, FileNotFoundError):
raise IPCError("Could not open {}".format(self.socket_path))
try:
send_data = _IPC.pack(msg, is_json=self.is_json)
writer.write(send_data)
writer.write_eof()
read_data = await asyncio.wait_for(reader.read(), timeout=10)
except asyncio.TimeoutError:
raise IPCError("Server not responding")
finally:
# see the note in Server._server_callback()
writer.close()
await writer.wait_closed()
data, _ = _IPC.unpack(read_data, is_json=self.is_json)
return data
class Server:
def __init__(self, socket_path: str, handler) -> None:
self.socket_path = socket_path
self.handler = handler
self.server = None # type: Optional[asyncio.AbstractServer]
if os.path.exists(socket_path):
os.unlink(socket_path)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD)
fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
self.sock.bind(self.socket_path)
async def _server_callback(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Callback when a connection is made to the server
Read the data sent from the client, execute the requested command, and
send the reply back to the client.
"""
try:
logger.debug("Connection made to server")
data = await reader.read()
logger.debug("EOF received by server")
req, is_json = _IPC.unpack(data)
except IPCError:
logger.warn("Invalid data received, closing connection")
else:
rep = self.handler(req)
result = _IPC.pack(rep, is_json=is_json)
logger.debug("Sending result on receive EOF")
writer.write(result)
logger.debug("Closing connection on receive EOF")
writer.write_eof()
finally:
writer.close()
await writer.wait_closed()
async def __aenter__(self) -> "Server":
"""Start and return the server"""
await self.start()
return self
async def __aexit__(self, _exc_type, _exc_value, _tb) -> None:
"""Close and shutdown the server"""
await self.close()
async def start(self) -> None:
"""Start the server"""
assert self.server is None
logger.debug("Starting server")
server_coroutine = asyncio.start_unix_server(
self._server_callback, sock=self.sock
)
self.server = await server_coroutine
async def close(self) -> None:
"""Close and shutdown the server"""
assert self.server is not None
logger.debug("Stopping server on close")
self.server.close()
await self.server.wait_closed()
self.server = None
| 34.025316
| 83
| 0.633929
|
b7e2847ccbd8bde6ee1e6ba9914e22184f735392
| 14,994
|
py
|
Python
|
test/test_basic.py
|
lutraconsulting/mergin-work-packages
|
87850b9ca5069fbaf973a9341dca6aee72912528
|
[
"MIT"
] | 4
|
2021-03-03T09:39:00.000Z
|
2021-08-01T00:00:37.000Z
|
test/test_basic.py
|
lutraconsulting/mergin-workpackages
|
87850b9ca5069fbaf973a9341dca6aee72912528
|
[
"MIT"
] | 33
|
2021-03-03T12:21:51.000Z
|
2022-02-17T10:24:20.000Z
|
test/test_basic.py
|
lutraconsulting/mergin-workpackages
|
87850b9ca5069fbaf973a9341dca6aee72912528
|
[
"MIT"
] | 1
|
2021-04-16T18:28:34.000Z
|
2021-04-16T18:28:34.000Z
|
import glob
import os
import shutil
from tempfile import TemporaryDirectory
import sqlite3
from .init_test_data import (
create_farm_dataset,
open_layer_and_create_feature,
open_layer_and_update_feature,
open_layer_and_delete_feature,
)
from wp import load_config_from_yaml, make_work_packages
from wp_utils import escape_double_quotes
this_dir = os.path.dirname(os.path.realpath(__file__))
def _assert_row_counts(gpkg_filename, expected_farms, expected_trees):
"""Raises assertion errors if tables do not have the right number of rows"""
db = sqlite3.connect(gpkg_filename)
c = db.cursor()
c.execute("SELECT COUNT(*) FROM farms")
assert c.fetchone()[0] == expected_farms
c.execute("SELECT COUNT(*) FROM trees")
assert c.fetchone()[0] == expected_trees
def _assert_value_equals(gpkg_filename, table_name, fid, field_name, expected_value):
"""Raises assertion error if value of a particular field of a given feature
does not equal the expected value"""
db = sqlite3.connect(gpkg_filename)
c = db.cursor()
field_name_escaped = escape_double_quotes(field_name)
table_name_escaped = escape_double_quotes(table_name)
c.execute(f"""SELECT {field_name_escaped} FROM {table_name_escaped} WHERE fid = ?""", (fid,))
row = c.fetchone()
if row is None:
assert False, f"Missing row for fid {fid}"
assert row[0] == expected_value
def _assert_row_missing(gpkg_filename, table_name, fid):
"""Raises assertion error if given feature is present in the table"""
db = sqlite3.connect(gpkg_filename)
c = db.cursor()
table_name_escaped = escape_double_quotes(table_name)
c.execute(f"""SELECT count(*) FROM {table_name_escaped} WHERE fid = ?""", (fid,))
row = c.fetchone()
assert row[0] == 0, f"Row for fid {fid} is present but it should not be"
def _assert_row_exists(gpkg_filename, table_name, fid):
"""Raises assertion error if given feature is NOT present in the table"""
db = sqlite3.connect(gpkg_filename)
c = db.cursor()
table_name_escaped = escape_double_quotes(table_name)
c.execute(f"""SELECT count(*) FROM {table_name_escaped} WHERE fid = ?""", (fid,))
row = c.fetchone()
assert row[0] == 1, f"Row for fid {fid} is not present but it should be"
def _make_initial_farm_work_packages(config_file):
"""
1. create the initial "farms" dataset
2. run the WP algorithm with the initial dataset and given config file
Returns temporary directory object.
"""
tmp_dir_obj = TemporaryDirectory(prefix="test-mergin-work-packages-")
tmp_dir = tmp_dir_obj.name
os.makedirs(os.path.join(tmp_dir, "input"))
# get data
create_farm_dataset(os.path.join(tmp_dir, "input", "master.gpkg"))
# get config
wp_config = load_config_from_yaml(config_file)
# run alg
make_work_packages(tmp_dir, wp_config)
return tmp_dir_obj
def _prepare_next_run_work_packages(tmp_dir_1):
"""Creates a new temp directory with base+input files being output from the first step.
After this, work packaging can be run using the new temp directory, which is returned.
"""
tmp_dir_2 = TemporaryDirectory(prefix="test-mergin-work-packages-")
os.makedirs(os.path.join(tmp_dir_2.name, "base"))
os.makedirs(os.path.join(tmp_dir_2.name, "input"))
shutil.copy(os.path.join(tmp_dir_1.name, "output", "remap.db"), os.path.join(tmp_dir_2.name, "base", "remap.db"))
for file_path in glob.glob(os.path.join(tmp_dir_1.name, "output", "*.gpkg")):
file_name = os.path.basename(file_path)
shutil.copy(file_path, os.path.join(tmp_dir_2.name, "base", file_name))
shutil.copy(file_path, os.path.join(tmp_dir_2.name, "input", file_name))
return tmp_dir_2
def _keep_tmp_dir(tmp_dir, new_dir):
"""Makes a copy of a TemporaryDirectory. This is useful for debugging because
TemporaryDirectory has no way to disable removal if needed"""
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
shutil.copytree(tmp_dir.name, new_dir)
def test_farm_data():
"""Check whether the test data init function returns what we expect"""
tmp_dir_obj = TemporaryDirectory(prefix="test-mergin-work-packages-")
farm_gpkg = os.path.join(tmp_dir_obj.name, "farms.gpkg")
create_farm_dataset(farm_gpkg)
_assert_row_counts(farm_gpkg, expected_farms=4, expected_trees=9)
def test_first_run():
"""Checks whether the first run correctly generates work package data with 'filter-column' method"""
tmp_dir = _make_initial_farm_work_packages(os.path.join(this_dir, "config-farm-basic.yml"))
# run checks
output_dir = os.path.join(tmp_dir.name, "output")
output_files = os.listdir(output_dir)
assert "Emma.gpkg" in output_files
assert "Kyle.gpkg" in output_files
assert "master.gpkg" in output_files
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=9)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=2)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
def test_first_run_filtering_geom():
"""Checks whether the first run correctly generates work package data with 'filter-geometry' method"""
tmp_dir = _make_initial_farm_work_packages(os.path.join(this_dir, "config-farm-geom.yml"))
# run checks
output_dir = os.path.join(tmp_dir.name, "output")
output_files = os.listdir(output_dir)
assert "Emma.gpkg" in output_files
assert "Kyle.gpkg" in output_files
assert "master.gpkg" in output_files
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=9)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=2, expected_trees=3)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=3, expected_trees=1)
def test_update_row_wp():
"""One row has been updated in WP, no changes in master"""
config_file = os.path.join(this_dir, "config-farm-basic.yml")
tmp_dir_1 = _make_initial_farm_work_packages(config_file)
tmp_dir_2 = _prepare_next_run_work_packages(tmp_dir_1)
# modify a WP dataset - update a tree (master fid 8 mapped to 1000000 for Kyle)
open_layer_and_update_feature(
os.path.join(tmp_dir_2.name, "input", "Kyle.gpkg"), "trees", 1000000, {"age_years": 10}
)
# run work packaging
wp_config = load_config_from_yaml(config_file)
make_work_packages(tmp_dir_2.name, wp_config)
output_dir = os.path.join(tmp_dir_2.name, "output")
# there should be the same number of rows as initially
# and updated age both in master + kyle
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=9)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=2)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
_assert_value_equals(os.path.join(output_dir, "master.gpkg"), "trees", 8, "age_years", 10)
_assert_value_equals(os.path.join(output_dir, "Kyle.gpkg"), "trees", 1000000, "age_years", 10)
def test_update_row_master():
"""One row has been updated in master, no changes in WP"""
config_file = os.path.join(this_dir, "config-farm-basic.yml")
tmp_dir_1 = _make_initial_farm_work_packages(config_file)
tmp_dir_2 = _prepare_next_run_work_packages(tmp_dir_1)
# modify master dataset - update a tree (master fid 9 mapped to 1000001 for Kyle)
open_layer_and_update_feature(os.path.join(tmp_dir_2.name, "input", "master.gpkg"), "trees", 9, {"age_years": 20})
# run work packaging
wp_config = load_config_from_yaml(config_file)
make_work_packages(tmp_dir_2.name, wp_config)
output_dir = os.path.join(tmp_dir_2.name, "output")
# there should be the same number of rows as initially
# and updated age both in master + kyle
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=9)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=2)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
_assert_value_equals(os.path.join(output_dir, "master.gpkg"), "trees", 9, "age_years", 20)
_assert_value_equals(os.path.join(output_dir, "Kyle.gpkg"), "trees", 1000001, "age_years", 20)
def test_update_row_master_and_wp():
"""One row updated in master, another row in WP (no conflict)"""
config_file = os.path.join(this_dir, "config-farm-basic.yml")
tmp_dir_1 = _make_initial_farm_work_packages(config_file)
tmp_dir_2 = _prepare_next_run_work_packages(tmp_dir_1)
# modify a WP dataset - update a tree (master fid 8 mapped to 1000000 for Kyle)
open_layer_and_update_feature(
os.path.join(tmp_dir_2.name, "input", "Kyle.gpkg"), "trees", 1000000, {"age_years": 30}
)
# modify master dataset - update a tree (master fid 9 mapped to 1000001 for Kyle)
open_layer_and_update_feature(os.path.join(tmp_dir_2.name, "input", "master.gpkg"), "trees", 9, {"age_years": 40})
# run work packaging
wp_config = load_config_from_yaml(config_file)
make_work_packages(tmp_dir_2.name, wp_config)
output_dir = os.path.join(tmp_dir_2.name, "output")
# there should be the same number of rows as initially
# and updated age both in master + kyle
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=9)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=2)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
_assert_value_equals(os.path.join(output_dir, "master.gpkg"), "trees", 8, "age_years", 30)
_assert_value_equals(os.path.join(output_dir, "master.gpkg"), "trees", 9, "age_years", 40)
_assert_value_equals(os.path.join(output_dir, "Kyle.gpkg"), "trees", 1000000, "age_years", 30)
_assert_value_equals(os.path.join(output_dir, "Kyle.gpkg"), "trees", 1000001, "age_years", 40)
def test_delete_row_wp():
"""One row deleted in WP, no changes in master"""
config_file = os.path.join(this_dir, "config-farm-basic.yml")
tmp_dir_1 = _make_initial_farm_work_packages(config_file)
tmp_dir_2 = _prepare_next_run_work_packages(tmp_dir_1)
# modify a WP dataset - delete a tree (master fid 8 mapped to 1000000 for Kyle)
open_layer_and_delete_feature(os.path.join(tmp_dir_2.name, "input", "Kyle.gpkg"), "trees", 1000000)
# run work packaging
wp_config = load_config_from_yaml(config_file)
make_work_packages(tmp_dir_2.name, wp_config)
output_dir = os.path.join(tmp_dir_2.name, "output")
# there should be one tree missing for master and for Kyle
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=8)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=1)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
_assert_row_missing(os.path.join(output_dir, "master.gpkg"), "trees", 8)
def test_delete_row_master():
"""One row deleted in master, no changes in WP"""
config_file = os.path.join(this_dir, "config-farm-basic.yml")
tmp_dir_1 = _make_initial_farm_work_packages(config_file)
tmp_dir_2 = _prepare_next_run_work_packages(tmp_dir_1)
# modify a WP dataset - delete a tree (master fid 9 mapped to 1000001 for Kyle)
open_layer_and_delete_feature(os.path.join(tmp_dir_2.name, "input", "master.gpkg"), "trees", 9)
# run work packaging
wp_config = load_config_from_yaml(config_file)
make_work_packages(tmp_dir_2.name, wp_config)
output_dir = os.path.join(tmp_dir_2.name, "output")
# there should be one tree missing for master and for Kyle
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=8)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=1)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
_assert_row_missing(os.path.join(output_dir, "Kyle.gpkg"), "trees", 1000001)
def test_insert_row_wp():
"""One row has been added in WP, no changes in master"""
config_file = os.path.join(this_dir, "config-farm-basic.yml")
tmp_dir_1 = _make_initial_farm_work_packages(config_file)
tmp_dir_2 = _prepare_next_run_work_packages(tmp_dir_1)
# modify a WP dataset - add a row
open_layer_and_create_feature(
os.path.join(tmp_dir_2.name, "input", "Kyle.gpkg"), "trees", "POINT(6 16)", {"tree_species_id": 1, "farm_id": 4}
)
# run work packaging
wp_config = load_config_from_yaml(config_file)
make_work_packages(tmp_dir_2.name, wp_config)
output_dir = os.path.join(tmp_dir_2.name, "output")
# there should be one new tree in master and one new tree for Kyle
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=10)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=3)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
_assert_row_exists(os.path.join(output_dir, "master.gpkg"), "trees", 10)
def test_insert_row_master():
"""One row has inserted in master, no changes in WP"""
config_file = os.path.join(this_dir, "config-farm-basic.yml")
tmp_dir_1 = _make_initial_farm_work_packages(config_file)
tmp_dir_2 = _prepare_next_run_work_packages(tmp_dir_1)
# modify master dataset - add a row
open_layer_and_create_feature(
os.path.join(tmp_dir_2.name, "input", "master.gpkg"),
"trees",
"POINT(9 19)",
{"tree_species_id": 1, "farm_id": 4},
)
# run work packaging
wp_config = load_config_from_yaml(config_file)
make_work_packages(tmp_dir_2.name, wp_config)
output_dir = os.path.join(tmp_dir_2.name, "output")
# there should be one new tree in master and one new tree for Kyle
_assert_row_counts(os.path.join(output_dir, "master.gpkg"), expected_farms=4, expected_trees=10)
_assert_row_counts(os.path.join(output_dir, "Kyle.gpkg"), expected_farms=1, expected_trees=3)
_assert_row_counts(os.path.join(output_dir, "Emma.gpkg"), expected_farms=2, expected_trees=6)
_assert_row_exists(os.path.join(output_dir, "Kyle.gpkg"), "trees", 1000002)
# TODO: more test cases
# - delete_master_delete_wp # one row deleted in both master and WP
# - delete_master_update_wp # one row deleted in master while it is updated in WP
# - update_master_delete_wp # one row updated in master while it is deleted in WP
# - insert_row_master_and_wp # one row has bee inserted in master, another row in WP
# - update_master_update_wp # one row updated in master and the same row updated in WP
| 46.565217
| 120
| 0.73276
|
fca51bdc337eaab666c14f5beb312f00161152cc
| 31,246
|
py
|
Python
|
src/ircmsgs.py
|
atr000/Limnoria
|
1f60a9487ca4114f040135fb14cabc155a041918
|
[
"BSD-3-Clause"
] | null | null | null |
src/ircmsgs.py
|
atr000/Limnoria
|
1f60a9487ca4114f040135fb14cabc155a041918
|
[
"BSD-3-Clause"
] | null | null | null |
src/ircmsgs.py
|
atr000/Limnoria
|
1f60a9487ca4114f040135fb14cabc155a041918
|
[
"BSD-3-Clause"
] | 1
|
2021-01-23T21:20:57.000Z
|
2021-01-23T21:20:57.000Z
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2010, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
This module provides the basic IrcMsg object used throughout the bot to
represent the actual messages. It also provides several helper functions to
construct such messages in an easier way than the constructor for the IrcMsg
object (which, as you'll read later, is quite...full-featured :))
"""
import re
import time
import supybot.conf as conf
import supybot.utils as utils
from supybot.utils.iter import all
import supybot.ircutils as ircutils
###
# IrcMsg class -- used for representing IRC messages acquired from a network.
###
class MalformedIrcMsg(ValueError):
pass
class IrcMsg(object):
"""Class to represent an IRC message.
As usual, ignore attributes that begin with an underscore. They simply
don't exist. Instances of this class are *not* to be modified, since they
are hashable. Public attributes of this class are .prefix, .command,
.args, .nick, .user, and .host.
The constructor for this class is pretty intricate. It's designed to take
any of three major (sets of) arguments.
Called with no keyword arguments, it takes a single string that is a raw
IRC message (such as one taken straight from the network).
Called with keyword arguments, it *requires* a command parameter. Args is
optional, but with most commands will be necessary. Prefix is obviously
optional, since clients aren't allowed (well, technically, they are, but
only in a completely useless way) to send prefixes to the server.
Since this class isn't to be modified, the constructor also accepts a 'msg'
keyword argument representing a message from which to take all the
attributes not provided otherwise as keyword arguments. So, for instance,
if a programmer wanted to take a PRIVMSG he'd gotten and simply redirect it
to a different source, he could do this:
IrcMsg(prefix='', args=(newSource, otherMsg.args[1]), msg=otherMsg)
"""
# It's too useful to be able to tag IrcMsg objects with extra, unforeseen
# data. Goodbye, __slots__.
# On second thought, let's use methods for tagging.
__slots__ = ('args', 'command', 'host', 'nick', 'prefix', 'user',
'_hash', '_str', '_repr', '_len', 'tags')
def __init__(self, s='', command='', args=(), prefix='', msg=None):
assert not (msg and s), 'IrcMsg.__init__ cannot accept both s and msg'
if not s and not command and not msg:
raise MalformedIrcMsg, 'IRC messages require a command.'
self._str = None
self._repr = None
self._hash = None
self._len = None
self.tags = {}
if s:
originalString = s
try:
if not s.endswith('\n'):
s += '\n'
self._str = s
if s[0] == ':':
self.prefix, s = s[1:].split(None, 1)
else:
self.prefix = ''
if ' :' in s: # Note the space: IPV6 addresses are bad w/o it.
s, last = s.split(' :', 1)
self.args = s.split()
self.args.append(last.rstrip('\r\n'))
else:
self.args = s.split()
self.command = self.args.pop(0)
except (IndexError, ValueError):
raise MalformedIrcMsg, repr(originalString)
else:
if msg is not None:
if prefix:
self.prefix = prefix
else:
self.prefix = msg.prefix
if command:
self.command = command
else:
self.command = msg.command
if args:
self.args = args
else:
self.args = msg.args
self.tags = msg.tags.copy()
else:
self.prefix = prefix
self.command = command
assert all(ircutils.isValidArgument, args)
self.args = args
self.args = tuple(self.args)
if isUserHostmask(self.prefix):
(self.nick,self.user,self.host)=ircutils.splitHostmask(self.prefix)
else:
(self.nick, self.user, self.host) = (self.prefix,)*3
def __str__(self):
if self._str is not None:
return self._str
if self.prefix:
if len(self.args) > 1:
self._str = ':%s %s %s :%s\r\n' % \
(self.prefix, self.command,
' '.join(self.args[:-1]), self.args[-1])
else:
if self.args:
self._str = ':%s %s :%s\r\n' % \
(self.prefix, self.command, self.args[0])
else:
self._str = ':%s %s\r\n' % (self.prefix, self.command)
else:
if len(self.args) > 1:
self._str = '%s %s :%s\r\n' % \
(self.command,
' '.join(self.args[:-1]), self.args[-1])
else:
if self.args:
self._str = '%s :%s\r\n' % (self.command, self.args[0])
else:
self._str = '%s\r\n' % self.command
return self._str
def __len__(self):
return len(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and \
hash(self) == hash(other) and \
self.command == other.command and \
self.prefix == other.prefix and \
self.args == other.args
__req__ = __eq__ # I don't know exactly what this does, but it can't hurt.
def __ne__(self, other):
return not (self == other)
__rne__ = __ne__ # Likewise as above.
def __hash__(self):
if self._hash is not None:
return self._hash
self._hash = hash(self.command) ^ \
hash(self.prefix) ^ \
hash(repr(self.args))
return self._hash
def __repr__(self):
if self._repr is not None:
return self._repr
self._repr = format('IrcMsg(prefix=%q, command=%q, args=%r)',
self.prefix, self.command, self.args)
return self._repr
def __reduce__(self):
return (self.__class__, (str(self),))
def tag(self, tag, value=True):
self.tags[tag] = value
def tagged(self, tag):
return self.tags.get(tag) # Returns None if it's not there.
def __getattr__(self, attr):
return self.tagged(attr)
def isCtcp(msg):
"""Returns whether or not msg is a CTCP message."""
return msg.command in ('PRIVMSG', 'NOTICE') and \
msg.args[1].startswith('\x01') and \
msg.args[1].endswith('\x01') and \
len(msg.args[1]) >= 2
def isAction(msg):
"""A predicate returning true if the PRIVMSG in question is an ACTION"""
if isCtcp(msg):
s = msg.args[1]
payload = s[1:-1] # Chop off \x01.
command = payload.split(None, 1)[0]
return command == 'ACTION'
else:
return False
def isSplit(msg):
if msg.command == 'QUIT':
# It's a quit.
quitmsg = msg.args[0]
if not quitmsg.startswith('"') and not quitmsg.endswith('"'):
# It's not a user-generated quitmsg.
servers = quitmsg.split()
if len(servers) == 2:
# We could check if domains match, or if the hostnames actually
# resolve, but we're going to put that off for now.
return True
return False
_unactionre = re.compile(r'^\x01ACTION\s+(.*)\x01$')
def unAction(msg):
"""Returns the payload (i.e., non-ACTION text) of an ACTION msg."""
assert isAction(msg)
return _unactionre.match(msg.args[1]).group(1)
def _escape(s):
s = s.replace('&', '&')
s = s.replace('"', '"')
s = s.replace('<', '<')
s = s.replace('>', '>')
return s
def toXml(msg, pretty=True, includeTime=True):
assert msg.command == _escape(msg.command)
L = []
L.append('<msg command="%s" prefix="%s"'%(msg.command,_escape(msg.prefix)))
if includeTime:
L.append(' time="%s"' % time.time())
L.append('>')
if pretty:
L.append('\n')
for arg in msg.args:
if pretty:
L.append(' ')
L.append('<arg>%s</arg>' % _escape(arg))
if pretty:
L.append('\n')
L.append('</msg>\n')
return ''.join(L)
def prettyPrint(msg, addRecipients=False, timestampFormat=None, showNick=True):
"""Provides a client-friendly string form for messages.
IIRC, I copied BitchX's (or was it XChat's?) format for messages.
"""
def nickorprefix():
return msg.nick or msg.prefix
def nick():
if addRecipients:
return '%s/%s' % (msg.nick, msg.args[0])
else:
return msg.nick
if msg.command == 'PRIVMSG':
m = _unactionre.match(msg.args[1])
if m:
s = '* %s %s' % (nick(), m.group(1))
else:
if not showNick:
s = '%s' % msg.args[1]
else:
s = '<%s> %s' % (nick(), msg.args[1])
elif msg.command == 'NOTICE':
if not showNick:
s = '%s' % msg.args[1]
else:
s = '-%s- %s' % (nick(), msg.args[1])
elif msg.command == 'JOIN':
prefix = msg.prefix
if msg.nick:
prefix = '%s <%s>' % (msg.nick, prefix)
s = '*** %s has joined %s' % (prefix, msg.args[0])
elif msg.command == 'PART':
if len(msg.args) > 1:
partmsg = ' (%s)' % msg.args[1]
else:
partmsg = ''
s = '*** %s <%s> has parted %s%s' % (msg.nick, msg.prefix,
msg.args[0], partmsg)
elif msg.command == 'KICK':
if len(msg.args) > 2:
kickmsg = ' (%s)' % msg.args[1]
else:
kickmsg = ''
s = '*** %s was kicked by %s%s' % (msg.args[1], msg.nick, kickmsg)
elif msg.command == 'MODE':
s = '*** %s sets mode: %s' % (nickorprefix(), ' '.join(msg.args))
elif msg.command == 'QUIT':
if msg.args:
quitmsg = ' (%s)' % msg.args[0]
else:
quitmsg = ''
s = '*** %s <%s> has quit IRC%s' % (msg.nick, msg.prefix, quitmsg)
elif msg.command == 'TOPIC':
s = '*** %s changes topic to %s' % (nickorprefix(), msg.args[1])
elif msg.command == 'NICK':
s = '*** %s is now known as %s' % (msg.nick, msg.args[0])
else:
s = utils.str.format('--- Unknown command %q', ' '.join(msg.args))
at = getattr(msg, 'receivedAt', None)
if timestampFormat and at:
s = '%s %s' % (time.strftime(timestampFormat, time.localtime(at)), s)
return s
###
# Various IrcMsg functions
###
isNick = ircutils.isNick
isChannel = ircutils.isChannel
isUserHostmask = ircutils.isUserHostmask
def pong(payload, prefix='', msg=None):
"""Takes a payload and returns the proper PONG IrcMsg."""
if conf.supybot.protocols.irc.strictRfc():
assert payload, 'PONG requires a payload'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PONG', args=(payload,), msg=msg)
def ping(payload, prefix='', msg=None):
"""Takes a payload and returns the proper PING IrcMsg."""
if conf.supybot.protocols.irc.strictRfc():
assert payload, 'PING requires a payload'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PING', args=(payload,), msg=msg)
def op(channel, nick, prefix='', msg=None):
"""Returns a MODE to op nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+o', nick), msg=msg)
def ops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to op each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+' + ('o'*len(nicks))) + tuple(nicks),
msg=msg)
def deop(channel, nick, prefix='', msg=None):
"""Returns a MODE to deop nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-o', nick), msg=msg)
def deops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to deop each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '-' + ('o'*len(nicks))) + tuple(nicks))
def halfop(channel, nick, prefix='', msg=None):
"""Returns a MODE to halfop nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+h', nick), msg=msg)
def halfops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to halfop each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '+' + ('h'*len(nicks))) + tuple(nicks))
def dehalfop(channel, nick, prefix='', msg=None):
"""Returns a MODE to dehalfop nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-h', nick), msg=msg)
def dehalfops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to dehalfop each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '-' + ('h'*len(nicks))) + tuple(nicks))
def voice(channel, nick, prefix='', msg=None):
"""Returns a MODE to voice nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+v', nick), msg=msg)
def voices(channel, nicks, prefix='', msg=None):
"""Returns a MODE to voice each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '+' + ('v'*len(nicks))) + tuple(nicks))
def devoice(channel, nick, prefix='', msg=None):
"""Returns a MODE to devoice nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-v', nick), msg=msg)
def devoices(channel, nicks, prefix='', msg=None):
"""Returns a MODE to devoice each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '-' + ('v'*len(nicks))) + tuple(nicks))
def ban(channel, hostmask, exception='', prefix='', msg=None):
"""Returns a MODE to ban nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isUserHostmask(hostmask), repr(hostmask)
modes = [('+b', hostmask)]
if exception:
modes.append(('+e', exception))
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def bans(channel, hostmasks, exceptions=(), prefix='', msg=None):
"""Returns a MODE to ban each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert all(isUserHostmask, hostmasks), hostmasks
modes = [('+b', s) for s in hostmasks] + [('+e', s) for s in exceptions]
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def unban(channel, hostmask, prefix='', msg=None):
"""Returns a MODE to unban nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isUserHostmask(hostmask), repr(hostmask)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-b', hostmask), msg=msg)
def unbans(channel, hostmasks, prefix='', msg=None):
"""Returns a MODE to unban each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert all(isUserHostmask, hostmasks), hostmasks
modes = [('-b', s) for s in hostmasks]
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def kick(channel, nick, s='', prefix='', msg=None):
"""Returns a KICK to kick nick from channel with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
if s:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, nick, s), msg=msg)
else:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, nick), msg=msg)
def kicks(channel, nicks, s='', prefix='', msg=None):
"""Returns a KICK to kick each of nicks from channel with the message msg.
"""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
if s:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, ','.join(nicks), s), msg=msg)
else:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, ','.join(nicks)), msg=msg)
def privmsg(recipient, s, prefix='', msg=None):
"""Returns a PRIVMSG to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert (isChannel(recipient) or isNick(recipient)), repr(recipient)
assert s, 's must not be empty.'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PRIVMSG',
args=(recipient, s), msg=msg)
def dcc(recipient, kind, *args, **kwargs):
# Stupid Python won't allow (recipient, kind, *args, prefix=''), so we have
# to use the **kwargs form. Blech.
assert isNick(recipient), 'Can\'t DCC a channel.'
kind = kind.upper()
assert kind in ('SEND', 'CHAT', 'RESUME', 'ACCEPT'), 'Invalid DCC command.'
args = (kind,) + args
return IrcMsg(prefix=kwargs.get('prefix', ''), command='PRIVMSG',
args=(recipient, ' '.join(args)))
def action(recipient, s, prefix='', msg=None):
"""Returns a PRIVMSG ACTION to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert (isChannel(recipient) or isNick(recipient)), repr(recipient)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PRIVMSG',
args=(recipient, '\x01ACTION %s\x01' % s), msg=msg)
def notice(recipient, s, prefix='', msg=None):
"""Returns a NOTICE to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert (isChannel(recipient) or isNick(recipient)), repr(recipient)
assert s, 'msg must not be empty.'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='NOTICE', args=(recipient, s), msg=msg)
def join(channel, key=None, prefix='', msg=None):
"""Returns a JOIN to a channel"""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
if msg and not prefix:
prefix = msg.prefix
if key is None:
return IrcMsg(prefix=prefix, command='JOIN', args=(channel,), msg=msg)
else:
if conf.supybot.protocols.irc.strictRfc():
assert key.translate(utils.str.chars,
utils.str.chars[128:]) == key and \
'\x00' not in key and \
'\r' not in key and \
'\n' not in key and \
'\f' not in key and \
'\t' not in key and \
'\v' not in key and \
' ' not in key
return IrcMsg(prefix=prefix, command='JOIN',
args=(channel, key), msg=msg)
def joins(channels, keys=None, prefix='', msg=None):
"""Returns a JOIN to each of channels."""
if conf.supybot.protocols.irc.strictRfc():
assert all(isChannel, channels), channels
if msg and not prefix:
prefix = msg.prefix
if keys is None:
keys = []
assert len(keys) <= len(channels), 'Got more keys than channels.'
if not keys:
return IrcMsg(prefix=prefix,
command='JOIN',
args=(','.join(channels),), msg=msg)
else:
for key in keys:
if conf.supybot.protocols.irc.strictRfc():
assert key.translate(utils.str.chars,
utils.str.chars[128:])==key and \
'\x00' not in key and \
'\r' not in key and \
'\n' not in key and \
'\f' not in key and \
'\t' not in key and \
'\v' not in key and \
' ' not in key
return IrcMsg(prefix=prefix,
command='JOIN',
args=(','.join(channels), ','.join(keys)), msg=msg)
def part(channel, s='', prefix='', msg=None):
"""Returns a PART from channel with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
if msg and not prefix:
prefix = msg.prefix
if s:
return IrcMsg(prefix=prefix, command='PART',
args=(channel, s), msg=msg)
else:
return IrcMsg(prefix=prefix, command='PART',
args=(channel,), msg=msg)
def parts(channels, s='', prefix='', msg=None):
"""Returns a PART from each of channels with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert all(isChannel, channels), channels
if msg and not prefix:
prefix = msg.prefix
if s:
return IrcMsg(prefix=prefix, command='PART',
args=(','.join(channels), s), msg=msg)
else:
return IrcMsg(prefix=prefix, command='PART',
args=(','.join(channels),), msg=msg)
def quit(s='', prefix='', msg=None):
"""Returns a QUIT with the message msg."""
if msg and not prefix:
prefix = msg.prefix
if s:
return IrcMsg(prefix=prefix, command='QUIT', args=(s,), msg=msg)
else:
return IrcMsg(prefix=prefix, command='QUIT', msg=msg)
def topic(channel, topic=None, prefix='', msg=None):
"""Returns a TOPIC for channel with the topic topic."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
if msg and not prefix:
prefix = msg.prefix
if topic is None:
return IrcMsg(prefix=prefix, command='TOPIC',
args=(channel,), msg=msg)
else:
return IrcMsg(prefix=prefix, command='TOPIC',
args=(channel, topic), msg=msg)
def nick(nick, prefix='', msg=None):
"""Returns a NICK with nick nick."""
if conf.supybot.protocols.irc.strictRfc():
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='NICK', args=(nick,), msg=msg)
def user(ident, user, prefix='', msg=None):
"""Returns a USER with ident ident and user user."""
if conf.supybot.protocols.irc.strictRfc():
assert '\x00' not in ident and \
'\r' not in ident and \
'\n' not in ident and \
' ' not in ident and \
'@' not in ident
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='USER',
args=(ident, '0', '*', user), msg=msg)
def who(hostmaskOrChannel, prefix='', msg=None):
"""Returns a WHO for the hostmask or channel hostmaskOrChannel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(hostmaskOrChannel) or \
isUserHostmask(hostmaskOrChannel), repr(hostmaskOrChannel)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='WHO',
args=(hostmaskOrChannel,), msg=msg)
def whois(nick, mask='', prefix='', msg=None):
"""Returns a WHOIS for nick."""
if conf.supybot.protocols.irc.strictRfc():
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
args = (nick,)
if mask:
args = (nick, mask)
return IrcMsg(prefix=prefix, command='WHOIS', args=args, msg=msg)
def names(channel=None, prefix='', msg=None):
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel)
if msg and not prefix:
prefix = msg.prefix
if channel is not None:
return IrcMsg(prefix=prefix, command='NAMES', args=(channel,), msg=msg)
else:
return IrcMsg(prefix=prefix, command='NAMES', msg=msg)
def mode(channel, args=(), prefix='', msg=None):
if msg and not prefix:
prefix = msg.prefix
if isinstance(args, basestring):
args = (args,)
else:
args = tuple(map(str, args))
return IrcMsg(prefix=prefix, command='MODE', args=(channel,)+args, msg=msg)
def modes(channel, args=(), prefix='', msg=None):
"""Returns a MODE to quiet each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
modes = args
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def limit(channel, limit, prefix='', msg=None):
return mode(channel, ['+l', limit], prefix=prefix, msg=msg)
def unlimit(channel, limit, prefix='', msg=None):
return mode(channel, ['-l', limit], prefix=prefix, msg=msg)
def invite(nick, channel, prefix='', msg=None):
"""Returns an INVITE for nick."""
if conf.supybot.protocols.irc.strictRfc():
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='INVITE',
args=(nick, channel), msg=msg)
def password(password, prefix='', msg=None):
"""Returns a PASS command for accessing a server."""
if conf.supybot.protocols.irc.strictRfc():
assert password, 'password must not be empty.'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PASS', args=(password,), msg=msg)
def ison(nick, prefix='', msg=None):
if conf.supybot.protocols.irc.strictRfc():
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='ISON', args=(nick,), msg=msg)
def error(s, msg=None):
return IrcMsg(command='ERROR', args=(s,), msg=msg)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 39.106383
| 80
| 0.587659
|
84ef45d9593589dd69b869cebcf4ccca82eaf42c
| 2,239
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 56
|
2018-06-21T13:47:23.000Z
|
2020-05-13T09:31:47.000Z
|
tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 58
|
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 15
|
2018-09-06T14:18:32.000Z
|
2020-05-14T06:35:30.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `NoopElimination` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class NoopEliminationTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testNoopElimination(self):
a = constant_op.constant(1, dtype=dtypes.int64)
b = constant_op.constant(2, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.apply(
testing.assert_next(
["FiniteRepeat", "FiniteSkip", "Prefetch", "MemoryCacheImpl"]))
dataset = dataset.repeat(some_tensor).skip(5).take(-1).skip(0).repeat(
1).prefetch(0).prefetch(1).cache()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=range(5))
if __name__ == "__main__":
test.main()
| 40.709091
| 80
| 0.750335
|
9455e0e44cb5fce8f17cde02b11a0b46b3bdfdc1
| 8,921
|
py
|
Python
|
tensorflow_serving/apis/session_service_pb2.py
|
alexeygrigorev/tensorflow-protobuf
|
9863a9281eb6caa9be73128c03906d990639208c
|
[
"Apache-2.0"
] | 7
|
2020-12-28T02:53:05.000Z
|
2022-03-23T05:45:03.000Z
|
tensorflow_serving/apis/session_service_pb2.py
|
alexeygrigorev/tensorflow-protobuf
|
9863a9281eb6caa9be73128c03906d990639208c
|
[
"Apache-2.0"
] | 1
|
2021-01-27T16:06:16.000Z
|
2021-01-27T19:43:38.000Z
|
tensorflow_serving/apis/session_service_pb2.py
|
alexeygrigorev/tensorflow-protobuf
|
9863a9281eb6caa9be73128c03906d990639208c
|
[
"Apache-2.0"
] | 1
|
2021-02-11T11:46:01.000Z
|
2021-02-11T11:46:01.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/session_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2
from tensorflow.core.protobuf import config_pb2 as tensorflow_dot_core_dot_protobuf_dot_config__pb2
from tensorflow.core.protobuf import named_tensor_pb2 as tensorflow_dot_core_dot_protobuf_dot_named__tensor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/session_service.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_options=b'\370\001\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n-tensorflow_serving/apis/session_service.proto\x12\x12tensorflow.serving\x1a#tensorflow_serving/apis/model.proto\x1a%tensorflow/core/protobuf/config.proto\x1a+tensorflow/core/protobuf/named_tensor.proto\"\xba\x01\n\x11SessionRunRequest\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12*\n\x04\x66\x65\x65\x64\x18\x02 \x03(\x0b\x32\x1c.tensorflow.NamedTensorProto\x12\r\n\x05\x66\x65tch\x18\x03 \x03(\t\x12\x0e\n\x06target\x18\x04 \x03(\t\x12\'\n\x07options\x18\x05 \x01(\x0b\x32\x16.tensorflow.RunOptions\"\xa0\x01\n\x12SessionRunResponse\x12\x31\n\nmodel_spec\x18\x03 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12,\n\x06tensor\x18\x01 \x03(\x0b\x32\x1c.tensorflow.NamedTensorProto\x12)\n\x08metadata\x18\x02 \x01(\x0b\x32\x17.tensorflow.RunMetadata2m\n\x0eSessionService\x12[\n\nSessionRun\x12%.tensorflow.serving.SessionRunRequest\x1a&.tensorflow.serving.SessionRunResponseB\x03\xf8\x01\x01\x62\x06proto3'
,
dependencies=[tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_config__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_named__tensor__pb2.DESCRIPTOR,])
_SESSIONRUNREQUEST = _descriptor.Descriptor(
name='SessionRunRequest',
full_name='tensorflow.serving.SessionRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_spec', full_name='tensorflow.serving.SessionRunRequest.model_spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='feed', full_name='tensorflow.serving.SessionRunRequest.feed', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fetch', full_name='tensorflow.serving.SessionRunRequest.fetch', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target', full_name='tensorflow.serving.SessionRunRequest.target', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='tensorflow.serving.SessionRunRequest.options', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=191,
serialized_end=377,
)
_SESSIONRUNRESPONSE = _descriptor.Descriptor(
name='SessionRunResponse',
full_name='tensorflow.serving.SessionRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_spec', full_name='tensorflow.serving.SessionRunResponse.model_spec', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorflow.serving.SessionRunResponse.tensor', index=1,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='tensorflow.serving.SessionRunResponse.metadata', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=380,
serialized_end=540,
)
_SESSIONRUNREQUEST.fields_by_name['model_spec'].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC
_SESSIONRUNREQUEST.fields_by_name['feed'].message_type = tensorflow_dot_core_dot_protobuf_dot_named__tensor__pb2._NAMEDTENSORPROTO
_SESSIONRUNREQUEST.fields_by_name['options'].message_type = tensorflow_dot_core_dot_protobuf_dot_config__pb2._RUNOPTIONS
_SESSIONRUNRESPONSE.fields_by_name['model_spec'].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC
_SESSIONRUNRESPONSE.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_protobuf_dot_named__tensor__pb2._NAMEDTENSORPROTO
_SESSIONRUNRESPONSE.fields_by_name['metadata'].message_type = tensorflow_dot_core_dot_protobuf_dot_config__pb2._RUNMETADATA
DESCRIPTOR.message_types_by_name['SessionRunRequest'] = _SESSIONRUNREQUEST
DESCRIPTOR.message_types_by_name['SessionRunResponse'] = _SESSIONRUNRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SessionRunRequest = _reflection.GeneratedProtocolMessageType('SessionRunRequest', (_message.Message,), {
'DESCRIPTOR' : _SESSIONRUNREQUEST,
'__module__' : 'tensorflow_serving.apis.session_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.SessionRunRequest)
})
_sym_db.RegisterMessage(SessionRunRequest)
SessionRunResponse = _reflection.GeneratedProtocolMessageType('SessionRunResponse', (_message.Message,), {
'DESCRIPTOR' : _SESSIONRUNRESPONSE,
'__module__' : 'tensorflow_serving.apis.session_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.SessionRunResponse)
})
_sym_db.RegisterMessage(SessionRunResponse)
DESCRIPTOR._options = None
_SESSIONSERVICE = _descriptor.ServiceDescriptor(
name='SessionService',
full_name='tensorflow.serving.SessionService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=542,
serialized_end=651,
methods=[
_descriptor.MethodDescriptor(
name='SessionRun',
full_name='tensorflow.serving.SessionService.SessionRun',
index=0,
containing_service=None,
input_type=_SESSIONRUNREQUEST,
output_type=_SESSIONRUNRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SESSIONSERVICE)
DESCRIPTOR.services_by_name['SessionService'] = _SESSIONSERVICE
# @@protoc_insertion_point(module_scope)
| 46.952632
| 963
| 0.794754
|
936620f7a6e11dc66e8cf1ba017c7193aa9ebe15
| 916
|
py
|
Python
|
pykgr/environment.py
|
DylanEHolland/pykgr
|
e66442790a29b0fa0d1e4586abf442cd927c8015
|
[
"BSD-3-Clause"
] | null | null | null |
pykgr/environment.py
|
DylanEHolland/pykgr
|
e66442790a29b0fa0d1e4586abf442cd927c8015
|
[
"BSD-3-Clause"
] | null | null | null |
pykgr/environment.py
|
DylanEHolland/pykgr
|
e66442790a29b0fa0d1e4586abf442cd927c8015
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from pykgr import config
from pykgr.builder import Builder
class Environment(object):
builder = None
variables = None
def __init__(self):
self.variables = dict(os.environ)
self.builder = Builder(
directory=config.builder_directory
)
def build_builder(self):
pass
def build_package(self, package_name):
self.builder.build(package_name)
def __str__(self):
return "<Environment: %s>" % id(self)
def build_directories():
if not os.path.exists(config.root_directory):
os.mkdir(config.root_directory)
for d in [
config.main_directory,
config.source_directory,
config.source_tarballs_directory,
config.library_directory
]:
if not os.path.exists(d):
os.mkdir(d)
def initialize():
build_directories()
env = Environment()
return env
| 21.302326
| 49
| 0.635371
|
fcc793fb7ae9e5682a1bdd01da453cea065eb2cd
| 7,221
|
py
|
Python
|
generate_vectors.py
|
Huge/python-shamir-mnemonic
|
f736c38078207b0729994730621bba95e8b79ebd
|
[
"MIT"
] | 122
|
2018-10-23T13:02:49.000Z
|
2022-03-24T08:39:06.000Z
|
generate_vectors.py
|
precyx/python-shamir-mnemonic
|
be319758cf6bdb60b2dc3a3470d041f1ada25750
|
[
"MIT"
] | 40
|
2019-03-07T03:18:07.000Z
|
2021-12-07T11:11:16.000Z
|
generate_vectors.py
|
precyx/python-shamir-mnemonic
|
be319758cf6bdb60b2dc3a3470d041f1ada25750
|
[
"MIT"
] | 34
|
2018-11-07T13:59:36.000Z
|
2022-03-19T18:32:19.000Z
|
#!/usr/bin/env python3
import json
import random
import attr
from shamir_mnemonic import constants, rs1024, shamir, wordlist
from shamir_mnemonic.share import Share
def random_bytes(n):
return bytes(random.randrange(256) for _ in range(n))
def output(description, mnemonics, secret):
output.i += 1
output.data.append((f"{output.i}. {description}", mnemonics, secret.hex()))
def encode_mnemonic(*args):
return Share(*args).mnemonic()
def decode_mnemonic(mnemonic):
return list(attr.astuple(Share.from_mnemonic(mnemonic)))
def generate_mnemonics_random(group_threshold, groups):
secret = random_bytes(16)
return shamir.generate_mnemonics(
group_threshold, groups, secret, iteration_exponent=0
)
output.i = 0
output.data = []
shamir.RANDOM_BYTES = random_bytes
if __name__ == "__main__":
random.seed(1337)
for n in [16, 32]:
description = "Valid mnemonic without sharing ({} bits)"
secret = random_bytes(n)
groups = shamir.generate_mnemonics(
1, [(1, 1)], secret, b"TREZOR", iteration_exponent=0
)
output(description.format(8 * n), groups[0], secret)
description = "Mnemonic with invalid checksum ({} bits)"
indices = wordlist.mnemonic_to_indices(groups[0][0])
indices[-1] ^= 1
mnemonic = wordlist.mnemonic_from_indices(indices)
output(description.format(8 * n), [mnemonic], b"")
description = "Mnemonic with invalid padding ({} bits)"
overflowing_bits = (8 * n) % constants.RADIX_BITS
if overflowing_bits:
indices = wordlist.mnemonic_to_indices(groups[0][0])
indices[4] += 1 << overflowing_bits
indices = indices[: -constants.CHECKSUM_LENGTH_WORDS]
mnemonic = wordlist.mnemonic_from_indices(
indices + rs1024.create_checksum(indices)
)
output(description.format(8 * n), [mnemonic], b"")
description = "Basic sharing 2-of-3 ({} bits)"
secret = random_bytes(n)
groups = shamir.generate_mnemonics(1, [(2, 3)], secret, b"TREZOR", 2)
output(description.format(8 * n), random.sample(groups[0], 2), secret)
output(description.format(8 * n), random.sample(groups[0], 1), b"")
description = "Mnemonics with different identifiers ({} bits)"
groups = generate_mnemonics_random(1, [(2, 2)])
data = decode_mnemonic(groups[0][0])
data[0] ^= 1 # modify the identifier
mnemonics = [encode_mnemonic(*data), groups[0][1]]
output(description.format(8 * n), mnemonics, b"")
description = "Mnemonics with different iteration exponents ({} bits)"
groups = generate_mnemonics_random(1, [(2, 2)])
data = decode_mnemonic(groups[0][0])
data[1] = 3 # change iteration exponent from 0 to 3
mnemonics = [encode_mnemonic(*data), groups[0][1]]
output(description.format(8 * n), mnemonics, b"")
description = "Mnemonics with mismatching group thresholds ({} bits)"
groups = generate_mnemonics_random(2, [(1, 1), (2, 2)])
data = decode_mnemonic(groups[0][0])
data[3] = 1 # change group threshold from 2 to 1
mnemonics = groups[1] + [encode_mnemonic(*data)]
output(description.format(8 * n), mnemonics, b"")
description = "Mnemonics with mismatching group counts ({} bits)"
groups = generate_mnemonics_random(1, [(2, 2)])
data = decode_mnemonic(groups[0][0])
data[4] = 3 # change group count from 1 to 3
mnemonics = [encode_mnemonic(*data), groups[0][1]]
output(description.format(8 * n), mnemonics, b"")
description = (
"Mnemonics with greater group threshold than group counts ({} bits)"
)
groups = generate_mnemonics_random(2, [(2, 2), (1, 1)])
mnemonics = []
for group in groups:
for mnemonic in group:
data = decode_mnemonic(mnemonic)
data[4] = 1 # change group count from 2 to 1
mnemonics.append(encode_mnemonic(*data))
output(description.format(8 * n), mnemonics, b"")
description = "Mnemonics with duplicate member indices ({} bits)"
groups = generate_mnemonics_random(1, [(2, 3)])
data = decode_mnemonic(groups[0][0])
data[5] = 2 # change member index from 0 to 2
mnemonics = [encode_mnemonic(*data), groups[0][2]]
output(description.format(8 * n), mnemonics, b"")
description = "Mnemonics with mismatching member thresholds ({} bits)"
groups = generate_mnemonics_random(1, [(2, 2)])
data = decode_mnemonic(groups[0][0])
data[6] = 1 # change member threshold from 2 to 1
mnemonics = [encode_mnemonic(*data), groups[0][1]]
output(description.format(8 * n), mnemonics, b"")
description = "Mnemonics giving an invalid digest ({} bits)"
groups = generate_mnemonics_random(1, [(2, 2)])
data = decode_mnemonic(groups[0][0])
data[7] = bytes((data[7][0] ^ 1,)) + data[7][1:] # modify the share value
mnemonics = [encode_mnemonic(*data), groups[0][1]]
output(description.format(8 * n), mnemonics, b"")
# Group sharing.
secret = random_bytes(n)
groups = shamir.generate_mnemonics(
2, [(1, 1), (1, 1), (3, 5), (2, 6)], secret, b"TREZOR", iteration_exponent=0
)
description = "Insufficient number of groups ({} bits, case {})"
output(description.format(8 * n, 1), [groups[1][0]], b"")
output(description.format(8 * n, 2), random.sample(groups[3], 2), b"")
description = "Threshold number of groups, but insufficient number of members in one group ({} bits)"
output(description.format(8 * n), [groups[3][2], groups[1][0]], b"")
description = (
"Threshold number of groups and members in each group ({} bits, case {})"
)
mnemonics = random.sample(groups[2], 3) + random.sample(groups[3], 2)
random.shuffle(mnemonics)
output(description.format(8 * n, 1), mnemonics, secret)
mnemonics = groups[1] + random.sample(groups[3], 2)
random.shuffle(mnemonics)
output(description.format(8 * n, 2), mnemonics, secret)
output(description.format(8 * n, 3), [groups[1][0], groups[0][0]], secret)
description = "Mnemonic with insufficient length"
secret = random_bytes((shamir.MIN_STRENGTH_BITS // 8) - 2)
identifier = random.randrange(1 << shamir.ID_LENGTH_BITS)
mnemonic = encode_mnemonic(identifier, 0, 0, 1, 1, 0, 1, secret)
output(description, [mnemonic], b"")
description = "Mnemonic with invalid master secret length"
secret = b"\xff" + random_bytes(shamir.MIN_STRENGTH_BITS // 8)
identifier = random.randrange(1 << shamir.ID_LENGTH_BITS)
mnemonic = encode_mnemonic(identifier, 0, 0, 1, 1, 0, 1, secret)
output(description, [mnemonic], b"")
with open("vectors.json", "w") as f:
json.dump(
output.data,
f,
sort_keys=True,
indent=2,
separators=(",", ": "),
ensure_ascii=False,
)
| 39.675824
| 109
| 0.61515
|
541d80816b80f4bda9f934a2a4b19c2820d0e022
| 1,285
|
py
|
Python
|
python/graphscope/framework/vineyard_object.py
|
wenyuanyu/GraphScope
|
a40ccaf70557e608d8b091eb25ab04477f99ce21
|
[
"Apache-2.0"
] | 2
|
2020-12-15T08:42:10.000Z
|
2022-01-14T09:13:16.000Z
|
python/graphscope/framework/vineyard_object.py
|
wenyuanyu/GraphScope
|
a40ccaf70557e608d8b091eb25ab04477f99ce21
|
[
"Apache-2.0"
] | 1
|
2020-12-22T13:15:40.000Z
|
2020-12-22T13:15:40.000Z
|
python/graphscope/framework/vineyard_object.py
|
wenyuanyu/GraphScope
|
a40ccaf70557e608d8b091eb25ab04477f99ce21
|
[
"Apache-2.0"
] | 1
|
2021-11-23T03:40:43.000Z
|
2021-11-23T03:40:43.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class VineyardObject:
"""A vineyard object may hold a id, or a name.
Attributes:
object_id
object_name
"""
def __init__(self, object_id=None, object_name=None):
self._object_id = object_id
self._object_name = object_name
@property
def object_id(self):
return self._object_id
@object_id.setter
def object_id(self, object_id):
self._object_id = object_id
@property
def object_name(self):
return self._object_name
@object_name.setter
def object_name(self, object_name):
self._object_name = object_name
| 27.340426
| 74
| 0.700389
|
0c3f386612692b6ca0f74398b5d72bc4eb6481d8
| 1,237
|
tac
|
Python
|
docs/mail/tutorial/smtpserver/smtpserver-5.tac
|
hawkowl/twisted
|
c413aac3888dea2202c0dc26f978d7f88b4b837a
|
[
"Unlicense",
"MIT"
] | 1
|
2017-04-26T10:24:21.000Z
|
2017-04-26T10:24:21.000Z
|
docs/mail/tutorial/smtpserver/smtpserver-5.tac
|
hawkowl/twisted
|
c413aac3888dea2202c0dc26f978d7f88b4b837a
|
[
"Unlicense",
"MIT"
] | 5
|
2020-06-05T18:16:39.000Z
|
2022-01-13T00:45:49.000Z
|
docs/mail/tutorial/smtpserver/smtpserver-5.tac
|
hawkowl/twisted
|
c413aac3888dea2202c0dc26f978d7f88b4b837a
|
[
"Unlicense",
"MIT"
] | 1
|
2019-10-02T18:36:25.000Z
|
2019-10-02T18:36:25.000Z
|
import os
from zope.interface import implementer
from twisted.application import service
application = service.Application("SMTP Server Tutorial")
from twisted.application import internet
from twisted.internet import protocol, defer
smtpServerFactory = protocol.ServerFactory()
from twisted.mail import smtp
@implementer(smtp.IMessage)
class FileMessage(object):
def __init__(self, fileObj):
self.fileObj = fileObj
def lineReceived(self, line):
self.fileObj.write(line + '\n')
def eomReceived(self):
self.fileObj.close()
return defer.succeed(None)
def connectionLost(self):
self.fileObj.close()
os.remove(self.fileObj.name)
class TutorialESMTP(smtp.ESMTP):
counter = 0
def validateTo(self, user):
fileName = 'tutorial-smtp.' + str(self.counter)
self.counter += 1
return lambda: FileMessage(open(fileName, 'w'))
def validateFrom(self, helo, origin):
return origin
def receivedHeader(self, helo, origin, recipients):
return 'Received: Tutorially.'
smtpServerFactory.protocol = TutorialESMTP
smtpServerService = internet.TCPServer(2025, smtpServerFactory)
smtpServerService.setServiceParent(application)
| 24.74
| 63
| 0.717057
|
34d11e8ac422ba06d98f7d0105bdbf384f80f996
| 2,209
|
py
|
Python
|
tutorials/SAC/network/CriticNetwork.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | 3
|
2021-08-12T15:11:28.000Z
|
2021-09-27T16:04:16.000Z
|
tutorials/SAC/network/CriticNetwork.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | null | null | null |
tutorials/SAC/network/CriticNetwork.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | 1
|
2021-08-05T07:20:57.000Z
|
2021-08-05T07:20:57.000Z
|
# Copyright (c) 2021: Zhiyuan Nan (namjw@hanyang.ac.kr).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import torch as T
import torch.nn as nn
import torch.optim as optim
import os
import random
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import numpy as np
class CriticNetwork(nn.Module):
def __init__(self, n_states, n_actions, args):
super(CriticNetwork, self).__init__()
self.device = args.device
self.checkpoint = os.path.join(args.save_dir + '/' + args.env_name, 'SAC_critic.pth')
self.critic1 = nn.Sequential(nn.Linear(n_states + n_actions, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, 1)
)
self.critic2 = nn.Sequential(nn.Linear(n_states + n_actions, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, 1)
)
self.loss_func = nn.MSELoss()
self.reset_parameters(self.critic1)
self.reset_parameters(self.critic2)
self.optimizer = optim.Adam(self.parameters(), lr=args.critic_lr)
self.to(self.device)
def forward(self, state, action):
cat = T.cat((state, action), dim=1)
Q1 = self.critic1(cat)
Q2 = self.critic2(cat)
return Q1, Q2
def reset_parameters(self, Sequential, std=1.0, bias_const=1e-6):
for layer in Sequential:
if isinstance(layer, nn.Linear):
nn.init.orthogonal_(layer.weight, std)
nn.init.constant_(layer.bias, bias_const)
def save_model(self):
T.save(self.state_dict(), self.checkpoint)
def load_model(self):
self.load_state_dict(T.load(self.checkpoint))
| 35.063492
| 93
| 0.56632
|
3b6f90a42fa02099f7c2d769d97ec359bd82733d
| 9,326
|
py
|
Python
|
timm/models/ghostnet.py
|
Robert-JunWang/pytorch-image-models
|
7c67d6aca992f039eece0af5f7c29a43d48c00e4
|
[
"Apache-2.0"
] | 17,769
|
2019-05-02T08:08:25.000Z
|
2022-03-31T22:14:44.000Z
|
timm/models/ghostnet.py
|
Robert-JunWang/pytorch-image-models
|
7c67d6aca992f039eece0af5f7c29a43d48c00e4
|
[
"Apache-2.0"
] | 556
|
2019-05-26T16:31:37.000Z
|
2022-03-30T04:21:07.000Z
|
timm/models/ghostnet.py
|
Robert-JunWang/pytorch-image-models
|
7c67d6aca992f039eece0af5f7c29a43d48c00e4
|
[
"Apache-2.0"
] | 3,029
|
2019-05-14T01:18:28.000Z
|
2022-03-31T20:09:50.000Z
|
"""
An implementation of GhostNet Model as defined in:
GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907
The train script of the model is similar to that of MobileNetV3
Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch
"""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .layers import SelectAdaptivePool2d, Linear, make_divisible
from .efficientnet_blocks import SqueezeExcite, ConvBnAct
from .helpers import build_model_with_cfg
from .registry import register_model
__all__ = ['GhostNet']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = {
'ghostnet_050': _cfg(url=''),
'ghostnet_100': _cfg(
url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'),
'ghostnet_130': _cfg(url=''),
}
_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4))
class GhostModule(nn.Module):
def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True):
super(GhostModule, self).__init__()
self.oup = oup
init_channels = math.ceil(oup / ratio)
new_channels = init_channels * (ratio - 1)
self.primary_conv = nn.Sequential(
nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False),
nn.BatchNorm2d(init_channels),
nn.ReLU(inplace=True) if relu else nn.Sequential(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False),
nn.BatchNorm2d(new_channels),
nn.ReLU(inplace=True) if relu else nn.Sequential(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.oup, :, :]
class GhostBottleneck(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3,
stride=1, act_layer=nn.ReLU, se_ratio=0.):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
self.ghost1 = GhostModule(in_chs, mid_chs, relu=True)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
else:
self.conv_dw = None
self.bn_dw = None
# Squeeze-and-excitation
self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
shortcut = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.conv_dw is not None:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(shortcut)
return x
class GhostNet(nn.Module):
def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2, in_chans=3, output_stride=32, global_pool='avg'):
super(GhostNet, self).__init__()
# setting of inverted residual blocks
assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported'
self.cfgs = cfgs
self.num_classes = num_classes
self.dropout = dropout
self.feature_info = []
# building first layer
stem_chs = make_divisible(16 * width, 4)
self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False)
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem'))
self.bn1 = nn.BatchNorm2d(stem_chs)
self.act1 = nn.ReLU(inplace=True)
prev_chs = stem_chs
# building inverted residual blocks
stages = nn.ModuleList([])
block = GhostBottleneck
stage_idx = 0
net_stride = 2
for cfg in self.cfgs:
layers = []
s = 1
for k, exp_size, c, se_ratio, s in cfg:
out_chs = make_divisible(c * width, 4)
mid_chs = make_divisible(exp_size * width, 4)
layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio))
prev_chs = out_chs
if s > 1:
net_stride *= 2
self.feature_info.append(dict(
num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}'))
stages.append(nn.Sequential(*layers))
stage_idx += 1
out_chs = make_divisible(exp_size * width, 4)
stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1)))
self.pool_dim = prev_chs = out_chs
self.blocks = nn.Sequential(*stages)
# building last several layers
self.num_features = out_chs = 1280
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True)
self.act2 = nn.ReLU(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity()
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.pool_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.flatten(x)
if self.dropout > 0.:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.classifier(x)
return x
def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs):
"""
Constructs a GhostNet model
"""
cfgs = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]],
# stage2
[[3, 48, 24, 0, 2]],
[[3, 72, 24, 0, 1]],
# stage3
[[5, 72, 40, 0.25, 2]],
[[5, 120, 40, 0.25, 1]],
# stage4
[[3, 240, 80, 0, 2]],
[[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1]
],
# stage5
[[5, 672, 160, 0.25, 2]],
[[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1]
]
]
model_kwargs = dict(
cfgs=cfgs,
width=width,
**kwargs,
)
return build_model_with_cfg(
GhostNet, variant, pretrained,
default_cfg=default_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**model_kwargs)
@register_model
def ghostnet_050(pretrained=False, **kwargs):
""" GhostNet-0.5x """
model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnet_100(pretrained=False, **kwargs):
""" GhostNet-1.0x """
model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnet_130(pretrained=False, **kwargs):
""" GhostNet-1.3x """
model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs)
return model
| 33.66787
| 120
| 0.599936
|
57cea09ee014705aaf1659c5cf32edc7339cbccd
| 181,101
|
py
|
Python
|
python/ccxt/async_support/zb.py
|
DoctorSlimm/ccxt
|
8f19512dfc5dac159eaeb465c98226c00252a9b6
|
[
"MIT"
] | 1
|
2021-11-16T15:45:34.000Z
|
2021-11-16T15:45:34.000Z
|
python/ccxt/async_support/zb.py
|
DoctorSlimm/ccxt
|
8f19512dfc5dac159eaeb465c98226c00252a9b6
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/zb.py
|
DoctorSlimm/ccxt
|
8f19512dfc5dac159eaeb465c98226c00252a9b6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import BadResponse
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DuplicateOrderId
from ccxt.base.errors import NotSupported
from ccxt.base.errors import NetworkError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class zb(Exchange):
def describe(self):
return self.deep_extend(super(zb, self).describe(), {
'id': 'zb',
'name': 'ZB',
'countries': ['CN'],
# previously rateLimit = 100
# Trading and Margin 10 000 per minute(IP) => 10000 / 60 = 166.66666... per second => rateLimit = 1000/166.66666 = 6
# Trade and Margin 60 per second(apiKey) => weight = 166.666 / 60 = 2.778(2.7777777...)
# Kline 1 per second => weight = 166.667
# v2 Futures API 100 per 2 seconds => 50 per second => weight = 3.334(3.3333333...)
# for endpoints not mentioned in docs
# previous rateLimit was 100 translating to 10 requests per second => weight = 166.666 / 10 = 16.667(16.666666...)
'rateLimit': 6,
'version': 'v1',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': None,
'option': None,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createMarketOrder': None,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'fetchBalance': True,
'fetchBorrowRate': True,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': True,
'fetchCanceledOrders': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDepositAddresses': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchLedger': True,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarketLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchWithdrawals': True,
'reduceMargin': True,
'setLeverage': True,
'setMarginMode': False,
'setPositionMode': False,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'5d': '5d',
'1w': '1w',
},
'hostname': 'zb.com', # zb.cafe for users in China
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32859187-cd5214f0-ca5e-11e7-967d-96568e2e2bd1.jpg',
'api': {
'spot': {
'v1': {
'public': 'https://api.{hostname}/data',
'private': 'https://trade.{hostname}/api',
},
},
'contract': {
'v1': {
'public': 'https://fapi.{hostname}/api/public',
},
'v2': {
'public': 'https://fapi.{hostname}/Server/api',
'private': 'https://fapi.{hostname}/Server/api',
},
},
},
'www': 'https://www.zb.com',
'doc': 'https://www.zb.com/i/developer',
'fees': 'https://www.zb.com/i/rate',
'referral': {
'url': 'https://www.zbex.club/en/register?ref=4301lera',
'discount': 0.16,
},
},
'api': {
'spot': {
'v1': {
'public': {
'get': {
'markets': 16.667,
'ticker': 16.667,
'allTicker': 16.667,
'depth': 16.667,
'trades': 16.667,
'kline': 166.667, # Kline 1 per second
'getGroupMarkets': 16.667,
'getFeeInfo': 16.667,
},
},
'private': {
'get': {
# spot API
'order': 1, # Trade API
'orderMoreV2': 1, # Trade API
'cancelOrder': 1, # Trade API
'cancelAllOrdersAfter': 1, # Trade API TODO add cancelAllOrders
'getOrder': 1, # Trade API
'getOrders': 1, # Trade API
'getOrdersNew': 16.667,
'getOrdersIgnoreTradeType': 1, # Trade API
'getUnfinishedOrdersIgnoreTradeType': 1, # Trade API
'getFinishedAndPartialOrders': 1, # Trade API
'getAccountInfo': 16.667,
'getUserAddress': 16.667,
'getPayinAddress': 16.667,
'getWithdrawAddress': 16.667,
'getWithdrawRecord': 16.667,
'getChargeRecord': 16.667,
'getCnyWithdrawRecord': 16.667,
'getCnyChargeRecord': 16.667,
'withdraw': 16.667,
# sub accounts
'addSubUser': 16.667,
'getSubUserList': 16.667,
'doTransferFunds': 16.667,
'createSubUserKey': 16.667, # removed on 2021-03-16 according to the update log in the API doc
# leverage API
'getLeverAssetsInfo': 16.667,
'getLeverBills': 16.667,
'transferInLever': 16.667,
'transferOutLever': 16.667,
'loan': 16.667,
'cancelLoan': 16.667,
'getLoans': 16.667,
'getLoanRecords': 16.667,
'borrow': 16.667,
'autoBorrow': 16.667,
'repay': 16.667,
'doAllRepay': 16.667,
'getRepayments': 16.667,
'getFinanceRecords': 16.667,
'changeInvestMark': 16.667,
'changeLoop': 16.667,
# cross API
'getCrossAssets': 16.667,
'getCrossBills': 16.667,
'transferInCross': 16.667,
'transferOutCross': 16.667,
'doCrossLoan': 16.667,
'doCrossRepay': 16.667,
'getCrossRepayRecords': 16.667,
},
},
},
},
'contract': {
'v1': {
'public': {
'get': {
'depth': 16.667,
'fundingRate': 16.667,
'indexKline': 16.667,
'indexPrice': 16.667,
'kline': 16.667,
'markKline': 16.667,
'markPrice': 16.667,
'ticker': 16.667,
'trade': 16.667,
},
},
},
'v2': {
'public': {
'get': {
'allForceOrders': 3.334,
'config/marketList': 3.334,
'topLongShortAccountRatio': 3.334,
'topLongShortPositionRatio': 3.334,
'fundingRate': 3.334,
'premiumIndex': 3.334,
},
},
'private': {
'get': {
'Fund/balance': 3.334,
'Fund/getAccount': 3.334,
'Fund/getBill': 3.334,
'Fund/getBillTypeList': 3.334,
'Fund/marginHistory': 3.334,
'Positions/getPositions': 3.334,
'Positions/getNominalValue': 3.334,
'Positions/marginInfo': 3.334,
'setting/get': 3.334,
'trade/getAllOrders': 3.334,
'trade/getOrder': 3.334,
'trade/getOrderAlgos': 3.334,
'trade/getTradeList': 3.334,
'trade/getUndoneOrders': 3.334,
'trade/tradeHistory': 3.334,
},
'post': {
'activity/buyTicket': 3.334,
'Fund/transferFund': 3.334,
'Positions/setMarginCoins': 3.334,
'Positions/updateAppendUSDValue': 3.334,
'Positions/updateMargin': 3.334,
'setting/setLeverage': 3.334,
'trade/batchOrder': 3.334,
'trade/batchCancelOrder': 3.334,
'trade/cancelAlgos': 3.334,
'trade/cancelAllOrders': 3.334,
'trade/cancelOrder': 3.334,
'trade/order': 3.334,
'trade/orderAlgo': 3.334,
'trade/updateOrderAlgo': 3.334,
},
},
},
},
},
'fees': {
'funding': {
'withdraw': {},
},
'trading': {
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
},
'commonCurrencies': {
'ANG': 'Anagram',
'ENT': 'ENTCash',
'BCHABC': 'BCHABC', # conflict with BCH / BCHA
'BCHSV': 'BCHSV', # conflict with BCH / BSV
},
'options': {
'timeframes': {
'spot': {
'1m': '1min',
'3m': '3min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'6h': '6hour',
'12h': '12hour',
'1d': '1day',
'3d': '3day',
'1w': '1week',
},
'swap': {
'1m': '1M',
'5m': '5M',
'15m': '15M',
'30m': '30M',
'1h': '1H',
'6h': '6H',
'1d': '1D',
'5d': '5D',
},
},
},
'precisionMode': TICK_SIZE,
'exceptions': {
'ws': {
# '1000': ExchangeError, # The call is successful.
'1001': ExchangeError, # General error prompt
'1002': ExchangeError, # Internal Error
'1003': AuthenticationError, # Fail to verify
'1004': AuthenticationError, # The transaction password is locked
'1005': AuthenticationError, # Wrong transaction password, please check it and re-enter。
'1006': PermissionDenied, # Real-name authentication is pending approval or unapproved
'1007': ExchangeError, # Channel does not exist
'1009': OnMaintenance, # This interface is under maintenance
'1010': ExchangeNotAvailable, # Not available now
'1012': PermissionDenied, # Insufficient permissions
'1013': ExchangeError, # Cannot trade, please contact email: support@zb.cn for support.
'1014': ExchangeError, # Cannot sell during the pre-sale period
'2001': InsufficientFunds, # Insufficient CNY account balance
'2002': InsufficientFunds, # Insufficient BTC account balance
'2003': InsufficientFunds, # Insufficient LTC account balance
'2005': InsufficientFunds, # Insufficient ETH account balance
'2006': InsufficientFunds, # ETCInsufficient account balance
'2007': InsufficientFunds, # BTSInsufficient account balance
'2008': InsufficientFunds, # EOSInsufficient account balance
'2009': InsufficientFunds, # BCCInsufficient account balance
'3001': OrderNotFound, # Order not found or is completed
'3002': InvalidOrder, # Invalid amount
'3003': InvalidOrder, # Invalid quantity
'3004': AuthenticationError, # User does not exist
'3005': BadRequest, # Invalid parameter
'3006': PermissionDenied, # Invalid IP or not consistent with the bound IP
'3007': RequestTimeout, # The request time has expired
'3008': ExchangeError, # Transaction not found
'3009': InvalidOrder, # The price exceeds the limit
'3010': PermissionDenied, # It fails to place an order, due to you have set up to prohibit trading of self market.
'3011': InvalidOrder, # The entrusted price is abnormal, please modify it and place order again
'3012': InvalidOrder, # Duplicate custom customerOrderId
'4001': AccountSuspended, # APIThe interface is locked for one hour
'4002': RateLimitExceeded, # Request too frequently
},
'exact': {
# '1000': 'Successful operation',
'10001': ExchangeError, # Operation failed
'10002': PermissionDenied, # Operation is forbidden
'10003': BadResponse, # Data existed
'10004': BadResponse, # Date not exist
'10005': PermissionDenied, # Forbidden to access the interface
'10006': BadRequest, # Currency invalid or expired
'10007': ExchangeError, # {0}
'10008': ExchangeError, # Operation failed: {0}
'10009': ExchangeError, # URL error
'1001': ExchangeError, # 'General error message',
'10010': AuthenticationError, # API KEY not exist
'10011': AuthenticationError, # API KEY CLOSED
'10012': AccountSuspended, # User API has been frozen, please contact customer service for processing
'10013': AuthenticationError, # API verification failed
'10014': AuthenticationError, # Invalid signature(1001)
'10015': AuthenticationError, # Invalid signature(1002)
'10016': AuthenticationError, # Invalid ip
'10017': PermissionDenied, # Permission denied
'10018': AccountSuspended, # User has been frozen, please contact customer service
'10019': RequestTimeout, # Request time has expired
'1002': ExchangeError, # 'Internal error',
'10020': BadRequest, # {0}Parameter cannot be empty
'10021': BadRequest, # {0}Invalid parameter
'10022': BadRequest, # Request method error
'10023': RateLimitExceeded, # Request frequency is too fast, exceeding the limit allowed by the interface
'10024': AuthenticationError, # Login failed
'10025': ExchangeError, # Non-personal operation
'10026': NetworkError, # Failed to request interface, please try again
'10027': RequestTimeout, # Timed out, please try again later
'10028': ExchangeNotAvailable, # System busy, please try again later
'10029': DDoSProtection, # Frequent operation, please try again later
'1003': AuthenticationError, # 'Verification does not pass',
'10030': BadRequest, # Currency already exist
'10031': BadRequest, # Currency does not exist
'10032': BadRequest, # Market existed
'10033': BadRequest, # Market not exist
'10034': BadRequest, # Currency error
'10035': BadRequest, # Market not open
'10036': BadRequest, # Ineffective market type
'10037': ArgumentsRequired, # User id cannot be empty
'10038': BadRequest, # Market id cannot be empty
'10039': BadResponse, # Failed to get mark price
'1004': AuthenticationError, # 'Funding security password lock',
'10040': BadResponse, # Failed to obtain the opening margin configuration
'10041': BadResponse, # Failed to obtain maintenance margin allocation
'10042': ExchangeError, # Avg. price error
'10043': ExchangeError, # Abnormal acquisition of liquidation price
'10044': ExchangeError, # Unrealized profit and loss acquisition exception
'10045': ExchangeError, # jdbcData source acquisition failed
'10046': ExchangeError, # Invalid position opening direction
'10047': ExchangeError, # The maximum position allowed by the current leverage multiple has been exceeded
'10048': ExchangeError, # The maximum allowable order quantity has been exceeded
'10049': NetworkError, # Failed to get the latest price
'1005': AuthenticationError, # 'Funds security password is incorrect, please confirm and re-enter.',
'1006': AuthenticationError, # 'Real-name certification pending approval or audit does not pass',
'1009': ExchangeNotAvailable, # 'This interface is under maintenance',
'1010': ExchangeNotAvailable, # Not available now
'10100': OnMaintenance, # Sorry! System maintenance, stop operation
'1012': PermissionDenied, # Insufficient permissions
'1013': ExchangeError, # Cannot trade, please contact email: support@zb.cn for support.
'1014': ExchangeError, # Cannot sell during the pre-sale period
'11000': ExchangeError, # Funding change failed
'11001': ExchangeError, # Position change failed
'110011': ExchangeError, # Exceeds the maximum leverage allowed by the position
'11002': ExchangeError, # Funding not exist
'11003': ExchangeError, # Freeze records not exist
'11004': InsufficientFunds, # Insufficient frozen funds
'11005': InvalidOrder, # Insufficient positions
'11006': InsufficientFunds, # Insufficient frozen positions
'11007': OrderNotFound, # Position not exist
'11008': ExchangeError, # The contract have positions, cannot be modified
'11009': ExchangeError, # Failed to query data
'110110': ExchangeError, # Exceed the market's maximum leverage
'11012': InsufficientFunds, # Insufficient margin
'11013': ExchangeError, # Exceeding accuracy limit
'11014': ExchangeError, # Invalid bill type
'11015': AuthenticationError, # Failed to add default account
'11016': AuthenticationError, # Account not exist
'11017': ExchangeError, # Funds are not frozen or unfrozen
'11018': InsufficientFunds, # Insufficient funds
'11019': ExchangeError, # Bill does not exist
'11021': InsufficientFunds, # Inconsistent currency for funds transfer
'11023': ExchangeError, # Same transaction currency
'11030': PermissionDenied, # Position is locked, the operation is prohibited
'11031': ExchangeError, # The number of bill changes is zero
'11032': ExchangeError, # The same request is being processed, please do not submit it repeatedly
'11033': ArgumentsRequired, # Position configuration data is empty
'11034': ExchangeError, # Funding fee is being settled, please do not operate
'12000': InvalidOrder, # Invalid order price
'12001': InvalidOrder, # Invalid order amount
'12002': InvalidOrder, # Invalid order type
'12003': InvalidOrder, # Invalid price accuracy
'12004': InvalidOrder, # Invalid quantity precision
'12005': InvalidOrder, # order value less than the minimum or greater than the maximum
'12006': InvalidOrder, # Customize's order number format is wrong
'12007': InvalidOrder, # Direction error
'12008': InvalidOrder, # Order type error
'12009': InvalidOrder, # Commission type error
'12010': InvalidOrder, # Failed to place the order, the loss of the order placed at self price will exceed margin
'12011': InvalidOrder, # it's not a buz order
'12012': OrderNotFound, # order not exist
'12013': InvalidOrder, # Order user does not match
'12014': InvalidOrder, # Order is still in transaction
'12015': InvalidOrder, # Order preprocessing failed
'12016': InvalidOrder, # Order cannot be canceled
'12017': InvalidOrder, # Transaction Record not exist
'12018': InvalidOrder, # Order failed
'12019': ArgumentsRequired, # self.extend parameter cannot be empty
'12020': ExchangeError, # self.extend Parameter error
'12021': InvalidOrder, # The order price is not within the price limit rules!
'12022': InvalidOrder, # Stop placing an order while the system is calculating the fund fee
'12023': OrderNotFound, # There are no positions to close
'12024': InvalidOrder, # Orders are prohibited, stay tuned!
'12025': InvalidOrder, # Order cancellation is prohibited, so stay tuned!
'12026': DuplicateOrderId, # Order failed, customize order number exists
'12027': ExchangeNotAvailable, # System busy, please try again later
'12028': InvalidOrder, # The market has banned trading
'12029': InvalidOrder, # Forbidden place order, stay tuned
'12201': InvalidOrder, # Delegation strategy does not exist or the status has changed
'12202': InvalidOrder, # Delegation strategy has been changed, cannot be canceled
'12203': InvalidOrder, # Wrong order type
'12204': InvalidOrder, # Invalid trigger price
'12205': InvalidOrder, # The trigger price must be greater than the market’s selling price or lower than the buying price.
'12206': InvalidOrder, # Direction and order type do not match
'12207': RateLimitExceeded, # Submission failed, exceeding the allowed limit
'13001': AuthenticationError, # User not exist
'13002': PermissionDenied, # User did not activate futures
# '13003': AuthenticationError, # User is locked
'13003': InvalidOrder, # Margin gear is not continuous
'13004': InvalidOrder, # The margin quick calculation amount is less than 0
'13005': RateLimitExceeded, # You have exceeded the number of exports that day
'13006': ExchangeError, # No markets are bookmarked
'13007': ExchangeError, # Market not favorited
'13008': ExchangeError, # Not in any market user whitelist
'13009': ExchangeError, # Not in the whitelist of users in self market
'14000': ExchangeError, # {0}not support
'14001': AuthenticationError, # Already logged in, no need to log in multiple times
'14002': AuthenticationError, # Not logged in yet, please log in before subscribing
'14003': ExchangeError, # This is a channel for one-time queries, no need to unsubscribe
'14100': ExchangeError, # Accuracy does not support
'14101': RateLimitExceeded, # Request exceeded frequency limit
'14200': ArgumentsRequired, # id empty
'14300': ExchangeError, # activity not exist
'14301': ExchangeError, # The event has been opened and cannot be admitted
'14302': ExchangeError, # The purchase time has passed and cannot be admitted
'14303': ExchangeError, # Not yet open for the purchase
'14305': ExchangeError, # Cannot enter, the maximum number of returns has been exceeded
'14306': ExchangeError, # Cannot repeat admission
'14307': InvalidOrder, # Unable to cancel, status has been changed
'14308': InvalidOrder, # Unable to cancel, the amount does not match
'14309': ExchangeError, # Activity has not started
'14310': NotSupported, # Activity is over
'14311': NotSupported, # The activity does not support orders placed in self market
'14312': ExchangeError, # You have not participated in self activity
'14313': PermissionDenied, # Sorry! The purchase failed, the maximum number of participants has been reached
'14314': ExchangeError, # Active period id error
'2001': InsufficientFunds, # 'Insufficient CNY Balance',
'2002': InsufficientFunds, # 'Insufficient BTC Balance',
'2003': InsufficientFunds, # 'Insufficient LTC Balance',
'2005': InsufficientFunds, # 'Insufficient ETH Balance',
'2006': InsufficientFunds, # 'Insufficient ETC Balance',
'2007': InsufficientFunds, # 'Insufficient BTS Balance',
'2008': InsufficientFunds, # EOSInsufficient account balance
'2009': InsufficientFunds, # 'Account balance is not enough',
'3001': OrderNotFound, # 'Pending orders not found',
'3002': InvalidOrder, # 'Invalid price',
'3003': InvalidOrder, # 'Invalid amount',
'3004': AuthenticationError, # 'User does not exist',
'3005': BadRequest, # 'Invalid parameter',
'3006': AuthenticationError, # 'Invalid IP or inconsistent with the bound IP',
'3007': AuthenticationError, # 'The request time has expired',
'3008': OrderNotFound, # 'Transaction records not found',
'3009': InvalidOrder, # 'The price exceeds the limit',
'3010': PermissionDenied, # It fails to place an order, due to you have set up to prohibit trading of self market.
'3011': InvalidOrder, # 'The entrusted price is abnormal, please modify it and place order again',
'3012': InvalidOrder, # Duplicate custom customerOrderId
'4001': ExchangeNotAvailable, # 'API interface is locked or not enabled',
'4002': RateLimitExceeded, # 'Request too often',
'9999': ExchangeError, # Unknown error
},
'broad': {
'提币地址有误, 请先添加提币地址。': InvalidAddress, # {"code":1001,"message":"提币地址有误,请先添加提币地址。"}
'资金不足,无法划账': InsufficientFunds, # {"code":1001,"message":"资金不足,无法划账"}
'响应超时': RequestTimeout, # {"code":1001,"message":"响应超时"}
},
},
})
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for zb
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
markets = await self.spotV1PublicGetMarkets(params)
#
# {
# "zb_qc":{
# "amountScale":2,
# "minAmount":0.01,
# "minSize":5,
# "priceScale":4,
# },
# }
#
contracts = None
try:
# https://github.com/ZBFuture/docs_en/blob/main/API%20V2%20_en.md#7-public-markethttp
# https://fapi.zb.com/Server/api/v2/config/marketList 502 Bad Gateway
contracts = await self.contractV2PublicGetConfigMarketList(params)
except Exception as e:
contracts = {}
#
# {
# BTC_USDT: {
# symbol: 'BTC_USDT',
# buyerCurrencyId: '6',
# contractType: '1',
# defaultMarginMode: '1',
# marketType: '2',
# historyDBName: 'trade_history_readonly.dbc',
# defaultLeverage: '20',
# id: '100',
# canCancelOrder: True,
# area: '1',
# mixMarginCoinName: 'usdt',
# fundingRateRatio: '0.25',
# marginCurrencyName: 'usdt',
# minTradeMoney: '0.0001',
# enableTime: '1638954000000',
# maxTradeMoney: '10000000',
# canTrade: True,
# maxLeverage: '125',
# defaultPositionsMode: '2',
# onlyWhitelistVisible: False,
# riskWarnRatio: '0.8',
# marginDecimal: '8',
# spot: False,
# status: '1',
# amountDecimal: '3',
# leverage: False,
# minAmount: '0.001',
# canOrder: True,
# duration: '1',
# feeDecimal: '8',
# sellerCurrencyId: '1',
# maxAmount: '1000',
# canOpenPosition: True,
# isSupportMixMargin: False,
# markPriceLimitRate: '0.05',
# marginCurrencyId: '6',
# stopFundingFee: False,
# priceDecimal: '2',
# lightenUpFeeRate: '0',
# futures: True,
# sellerCurrencyName: 'btc',
# marketPriceLimitRate: '0.05',
# canRebate: True,
# marketName: 'BTC_USDT',
# depth: [0.01, 0.1, 1],
# createTime: '1607590430094',
# mixMarginCoinIds: [6],
# buyerCurrencyName: 'usdt',
# stopService: False
# },
# }
#
contractsData = self.safe_value(contracts, 'data', [])
contractsById = self.index_by(contractsData, 'marketName')
dataById = self.deep_extend(contractsById, markets)
keys = list(dataById.keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = dataById[id]
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settleId = self.safe_value(market, 'marginCurrencyName')
settle = self.safe_currency_code(settleId)
spot = settle is None
swap = self.safe_value(market, 'futures', False)
linear = True if swap else None
active = True
symbol = base + '/' + quote
amountPrecisionString = self.safe_string_2(market, 'amountScale', 'amountDecimal')
pricePrecisionString = self.safe_string_2(market, 'priceScale', 'priceDecimal')
if swap:
status = self.safe_string(market, 'status')
active = (status == '1')
symbol = base + '/' + quote + ':' + settle
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': 'swap' if swap else 'spot',
'spot': spot,
'margin': False,
'swap': swap,
'future': False,
'option': False,
'active': active,
'contract': swap,
'linear': linear,
'inverse': not linear if swap else None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(amountPrecisionString)),
'price': self.parse_number(self.parse_precision(pricePrecisionString)),
},
'limits': {
'leverage': {
'min': None,
'max': self.safe_number(market, 'maxLeverage'),
},
'amount': {
'min': self.safe_number(market, 'minAmount'),
'max': self.safe_number(market, 'maxAmount'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number_2(market, 'minSize', 'minTradeMoney'),
'max': self.safe_number(market, 'maxTradeMoney'),
},
},
'info': market,
})
return result
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.spotV1PublicGetGetFeeInfo(params)
#
# {
# "code":1000,
# "message":"success",
# "result":{
# "USDT":[
# {
# "chainName":"TRC20",
# "canWithdraw":true,
# "fee":1.0,
# "mainChainName":"TRX",
# "canDeposit":true
# },
# {
# "chainName":"OMNI",
# "canWithdraw":true,
# "fee":5.0,
# "mainChainName":"BTC",
# "canDeposit":true
# },
# {
# "chainName":"ERC20",
# "canWithdraw":true,
# "fee":15.0,
# "mainChainName":"ETH",
# "canDeposit":true
# }
# ],
# }
# }
#
currencies = self.safe_value(response, 'result', {})
ids = list(currencies.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = currencies[id]
code = self.safe_currency_code(id)
isWithdrawEnabled = True
isDepositEnabled = True
fees = {}
for j in range(0, len(currency)):
networkItem = currency[j]
network = self.safe_string(networkItem, 'chainName')
# name = self.safe_string(networkItem, 'name')
withdrawFee = self.safe_number(networkItem, 'fee')
depositEnable = self.safe_value(networkItem, 'canDeposit')
withdrawEnable = self.safe_value(networkItem, 'canWithdraw')
isDepositEnabled = isDepositEnabled or depositEnable
isWithdrawEnabled = isWithdrawEnabled or withdrawEnable
fees[network] = withdrawFee
active = (isWithdrawEnabled and isDepositEnabled)
result[code] = {
'id': id,
'name': None,
'code': code,
'precision': None,
'info': currency,
'active': active,
'deposit': isDepositEnabled,
'withdraw': isWithdrawEnabled,
'fee': None,
'fees': fees,
'limits': self.limits,
}
return result
def parse_balance(self, response):
balances = self.safe_value(response['result'], 'coins')
result = {
'info': response,
}
for i in range(0, len(balances)):
balance = balances[i]
# { enName: "BTC",
# freez: "0.00000000",
# unitDecimal: 8, # always 8
# cnName: "BTC",
# isCanRecharge: True, # TODO: should use self
# unitTag: "฿",
# isCanWithdraw: True, # TODO: should use self
# available: "0.00000000",
# key: "btc" }
account = self.account()
currencyId = self.safe_string(balance, 'key')
code = self.safe_currency_code(currencyId)
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'freez')
result[code] = account
return self.safe_balance(result)
def parse_swap_balance(self, response):
result = {
'info': response,
}
data = self.safe_value(response, 'data', {})
for i in range(0, len(data)):
balance = data[i]
#
# {
# "userId": "6896693805014120448",
# "currencyId": "6",
# "currencyName": "usdt",
# "amount": "30.56585118",
# "freezeAmount": "0",
# "contractType": 1,
# "id": "6899113714763638819",
# "createTime": "1644876888934",
# "modifyTime": "1645787446037",
# "accountBalance": "30.56585118",
# "allMargin": "0",
# "allowTransferOutAmount": "30.56585118"
# },
#
code = self.safe_currency_code(self.safe_string(balance, 'currencyName'))
account = self.account()
account['total'] = self.safe_string(balance, 'accountBalance')
account['free'] = self.safe_string(balance, 'allowTransferOutAmount')
account['used'] = self.safe_string(balance, 'freezeAmount')
result[code] = account
return self.safe_balance(result)
def parse_margin_balance(self, response, marginMode):
result = {
'info': response,
}
levers = None
if marginMode == 'isolated':
message = self.safe_value(response, 'message', {})
data = self.safe_value(message, 'datas', {})
levers = self.safe_value(data, 'levers', [])
else:
crossResponse = self.safe_value(response, 'result', {})
levers = self.safe_value(crossResponse, 'list', [])
for i in range(0, len(levers)):
balance = levers[i]
#
# Isolated Margin
#
# {
# "cNetUSD": "0.00",
# "repayLeverShow": "-",
# "cCanLoanIn": "0.002115400000000",
# "fNetCNY": "147.76081161",
# "fLoanIn": "0.00",
# "repayLevel": 0,
# "level": 1,
# "netConvertCNY": "147.760811613032",
# "cFreeze": "0.00",
# "cUnitTag": "BTC",
# "version": 1646783178609,
# "cAvailableUSD": "0.00",
# "cNetCNY": "0.00",
# "riskRate": "-",
# "fAvailableUSD": "20.49273433",
# "fNetUSD": "20.49273432",
# "cShowName": "BTC",
# "leverMultiple": "5.00",
# "couldTransferOutFiat": "20.49273433",
# "noticeLine": "1.13",
# "fFreeze": "0.00",
# "cUnitDecimal": 8,
# "fCanLoanIn": "81.970937320000000",
# "cAvailable": "0.00",
# "repayLock": False,
# "status": 1,
# "forbidType": 0,
# "totalConvertCNY": "147.760811613032",
# "cAvailableCNY": "0.00",
# "unwindPrice": "0.00",
# "fOverdraft": "0.00",
# "fShowName": "USDT",
# "statusShow": "%E6%AD%A3%E5%B8%B8",
# "cOverdraft": "0.00",
# "netConvertUSD": "20.49273433",
# "cNetBtc": "0.00",
# "loanInConvertCNY": "0.00",
# "fAvailableCNY": "147.760811613032",
# "key": "btcusdt",
# "fNetBtc": "0.0005291",
# "fUnitDecimal": 8,
# "loanInConvertUSD": "0.00",
# "showName": "BTC/USDT",
# "startLine": "1.25",
# "totalConvertUSD": "20.49273433",
# "couldTransferOutCoin": "0.00",
# "cEnName": "BTC",
# "leverMultipleInterest": "3.00",
# "fAvailable": "20.49273433",
# "fEnName": "USDT",
# "forceRepayLine": "1.08",
# "cLoanIn": "0.00"
# }
#
# Cross Margin
#
# [
# {
# "fundType": 2,
# "loanIn": 0,
# "amount": 0,
# "freeze": 0,
# "overdraft": 0,
# "key": "BTC",
# "canTransferOut": 0
# },
# ],
#
account = self.account()
if marginMode == 'isolated':
code = self.safe_currency_code(self.safe_string(balance, 'fShowName'))
account['total'] = self.safe_string(balance, 'fAvailableUSD') # total amount in USD
account['free'] = self.safe_string(balance, 'couldTransferOutFiat')
account['used'] = self.safe_string(balance, 'fFreeze')
result[code] = account
else:
code = self.safe_currency_code(self.safe_string(balance, 'key'))
account['total'] = self.safe_string(balance, 'amount')
account['free'] = self.safe_string(balance, 'canTransferOut')
account['used'] = self.safe_string(balance, 'freeze')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
marketType, query = self.handle_market_type_and_params('fetchBalance', None, params)
margin = (marketType == 'margin')
swap = (marketType == 'swap')
marginMethod = None
defaultMargin = 'isolated' if margin else 'cross'
marginMode = self.safe_string_2(self.options, 'defaultMarginMode', 'marginMode', defaultMargin)
if marginMode == 'isolated':
marginMethod = 'spotV1PrivateGetGetLeverAssetsInfo'
elif marginMode == 'cross':
marginMethod = 'spotV1PrivateGetGetCrossAssets'
method = self.get_supported_mapping(marketType, {
'spot': 'spotV1PrivateGetGetAccountInfo',
'swap': 'contractV2PrivateGetFundBalance',
'margin': marginMethod,
})
request = {
# 'futuresAccountType': 1, # SWAP
# 'currencyId': currency['id'], # SWAP
# 'currencyName': 'usdt', # SWAP
}
if swap:
request['futuresAccountType'] = 1
response = await getattr(self, method)(self.extend(request, query))
#
# Spot
#
# {
# "result": {
# "coins": [
# {
# "isCanWithdraw": "true",
# "canLoan": False,
# "fundstype": 51,
# "showName": "ZB",
# "isCanRecharge": "true",
# "cnName": "ZB",
# "enName": "ZB",
# "available": "0",
# "freez": "0",
# "unitTag": "ZB",
# "key": "zb",
# "unitDecimal": 8
# },
# ],
# "version": 1645856691340,
# "base": {
# "auth_google_enabled": True,
# "auth_mobile_enabled": False,
# "trade_password_enabled": True,
# "username": "blank@gmail.com"
# }
# },
# "leverPerm": True,
# "otcPerm": False,
# "assetPerm": True,
# "moneyPerm": True,
# "subUserPerm": True,
# "entrustPerm": True
# }
#
# Swap
#
# {
# "code": 10000,
# "data": [
# {
# "userId": "6896693805014120448",
# "currencyId": "6",
# "currencyName": "usdt",
# "amount": "30.56585118",
# "freezeAmount": "0",
# "contractType": 1,
# "id": "6899113714763638819",
# "createTime": "1644876888934",
# "modifyTime": "1645787446037",
# "accountBalance": "30.56585118",
# "allMargin": "0",
# "allowTransferOutAmount": "30.56585118"
# },
# ],
# "desc": "操作成功"
# }
#
# Isolated Margin
#
# {
# "code": 1000,
# "message": {
# "des": "success",
# "isSuc": True,
# "datas": {
# "leverPerm": True,
# "levers": [
# {
# "cNetUSD": "0.00",
# "repayLeverShow": "-",
# "cCanLoanIn": "0.002115400000000",
# "fNetCNY": "147.76081161",
# "fLoanIn": "0.00",
# "repayLevel": 0,
# "level": 1,
# "netConvertCNY": "147.760811613032",
# "cFreeze": "0.00",
# "cUnitTag": "BTC",
# "version": 1646783178609,
# "cAvailableUSD": "0.00",
# "cNetCNY": "0.00",
# "riskRate": "-",
# "fAvailableUSD": "20.49273433",
# "fNetUSD": "20.49273432",
# "cShowName": "BTC",
# "leverMultiple": "5.00",
# "couldTransferOutFiat": "20.49273433",
# "noticeLine": "1.13",
# "fFreeze": "0.00",
# "cUnitDecimal": 8,
# "fCanLoanIn": "81.970937320000000",
# "cAvailable": "0.00",
# "repayLock": False,
# "status": 1,
# "forbidType": 0,
# "totalConvertCNY": "147.760811613032",
# "cAvailableCNY": "0.00",
# "unwindPrice": "0.00",
# "fOverdraft": "0.00",
# "fShowName": "USDT",
# "statusShow": "%E6%AD%A3%E5%B8%B8",
# "cOverdraft": "0.00",
# "netConvertUSD": "20.49273433",
# "cNetBtc": "0.00",
# "loanInConvertCNY": "0.00",
# "fAvailableCNY": "147.760811613032",
# "key": "btcusdt",
# "fNetBtc": "0.0005291",
# "fUnitDecimal": 8,
# "loanInConvertUSD": "0.00",
# "showName": "BTC/USDT",
# "startLine": "1.25",
# "totalConvertUSD": "20.49273433",
# "couldTransferOutCoin": "0.00",
# "cEnName": "BTC",
# "leverMultipleInterest": "3.00",
# "fAvailable": "20.49273433",
# "fEnName": "USDT",
# "forceRepayLine": "1.08",
# "cLoanIn": "0.00"
# }
# ]
# }
# }
# }
#
# Cross Margin
#
# {
# "code": 1000,
# "message": "操作成功",
# "result": {
# "loanIn": 0,
# "total": 71.167,
# "riskRate": "-",
# "list" :[
# {
# "fundType": 2,
# "loanIn": 0,
# "amount": 0,
# "freeze": 0,
# "overdraft": 0,
# "key": "BTC",
# "canTransferOut": 0
# },
# ],
# "net": 71.167
# }
# }
#
# todo: use self somehow
# permissions = response['result']['base']
if swap:
return self.parse_swap_balance(response)
elif margin:
return self.parse_margin_balance(response, marginMode)
else:
return self.parse_balance(response)
def parse_deposit_address(self, depositAddress, currency=None):
#
# fetchDepositAddress
#
# {
# "key": "0x0af7f36b8f09410f3df62c81e5846da673d4d9a9"
# }
#
# fetchDepositAddresses
#
# {
# "blockChain": "btc",
# "isUseMemo": False,
# "address": "1LL5ati6pXHZnTGzHSA3rWdqi4mGGXudwM",
# "canWithdraw": True,
# "canDeposit": True
# }
# {
# "blockChain": "bts",
# "isUseMemo": True,
# "account": "btstest",
# "memo": "123",
# "canWithdraw": True,
# "canDeposit": True
# }
#
address = self.safe_string_2(depositAddress, 'key', 'address')
tag = None
memo = self.safe_string(depositAddress, 'memo')
if memo is not None:
tag = memo
elif address.find('_') >= 0:
parts = address.split('_')
address = parts[0] # WARNING: MAY BE tag_address INSTEAD OF address_tag FOR SOME CURRENCIESnot !
tag = parts[1]
self.check_address(address)
currencyId = self.safe_string(depositAddress, 'blockChain')
code = self.safe_currency_code(currencyId, currency)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': depositAddress,
}
async def fetch_deposit_addresses(self, codes=None, params={}):
await self.load_markets()
response = await self.spotV1PrivateGetGetPayinAddress(params)
#
# {
# "code": 1000,
# "message": {
# "des": "success",
# "isSuc": True,
# "datas": [
# {
# "blockChain": "btc",
# "isUseMemo": False,
# "address": "1LL5ati6pXHZnTGzHSA3rWdqi4mGGXudwM",
# "canWithdraw": True,
# "canDeposit": True
# },
# {
# "blockChain": "bts",
# "isUseMemo": True,
# "account": "btstest",
# "memo": "123",
# "canWithdraw": True,
# "canDeposit": True
# },
# ]
# }
# }
#
message = self.safe_value(response, 'message', {})
datas = self.safe_value(message, 'datas', [])
return self.parse_deposit_addresses(datas, codes)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.spotV1PrivateGetGetUserAddress(self.extend(request, params))
#
# {
# "code": 1000,
# "message": {
# "des": "success",
# "isSuc": True,
# "datas": {
# "key": "0x0af7f36b8f09410f3df62c81e5846da673d4d9a9"
# }
# }
# }
#
message = self.safe_value(response, 'message', {})
datas = self.safe_value(message, 'datas', {})
return self.parse_deposit_address(datas, currency)
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
market = self.market(symbol)
request = {
# 'market': market['id'], # only applicable to SPOT
# 'symbol': market['id'], # only applicable to SWAP
# 'size': limit, # 1-50 applicable to SPOT and SWAP
# 'merge': 5.0, # float default depth only applicable to SPOT
# 'scale': 5, # int accuracy, only applicable to SWAP
}
marketIdField = 'symbol' if market['swap'] else 'market'
request[marketIdField] = market['id']
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PublicGetDepth',
'swap': 'contractV1PublicGetDepth',
})
if limit is not None:
request['size'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# Spot
#
# {
# "asks":[
# [35000.0,0.2741],
# [34949.0,0.0173],
# [34900.0,0.5004],
# ],
# "bids":[
# [34119.32,0.0030],
# [34107.83,0.1500],
# [34104.42,0.1500],
# ],
# "timestamp":1624536510
# }
#
# Swap
#
# {
# "code": 10000,
# "desc": "操作成功",
# "data": {
# "asks": [
# [43416.6,0.02],
# [43418.25,0.04],
# [43425.82,0.02]
# ],
# "bids": [
# [43414.61,0.1],
# [43414.18,0.04],
# [43413.03,0.05]
# ],
# "time": 1645087743071
# }
# }
#
result = None
timestamp = None
if market['type'] == 'swap':
result = self.safe_value(response, 'data')
timestamp = self.safe_integer(result, 'time')
else:
result = response
timestamp = self.safe_timestamp(response, 'timestamp')
return self.parse_order_book(result, symbol, timestamp)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.spotV1PublicGetAllTicker(params)
result = {}
marketsByIdWithoutUnderscore = {}
marketIds = list(self.markets_by_id.keys())
for i in range(0, len(marketIds)):
tickerId = marketIds[i].replace('_', '')
marketsByIdWithoutUnderscore[tickerId] = self.markets_by_id[marketIds[i]]
ids = list(response.keys())
for i in range(0, len(ids)):
market = self.safe_value(marketsByIdWithoutUnderscore, ids[i])
if market is not None:
symbol = market['symbol']
ticker = self.safe_value(response, ids[i])
if ticker is not None:
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
# 'market': market['id'], # only applicable to SPOT
# 'symbol': market['id'], # only applicable to SWAP
}
marketIdField = 'symbol' if market['swap'] else 'market'
request[marketIdField] = market['id']
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PublicGetTicker',
'swap': 'contractV1PublicGetTicker',
})
response = await getattr(self, method)(self.extend(request, params))
#
# Spot
#
# {
# "date":"1624399623587",
# "ticker":{
# "high":"33298.38",
# "vol":"56152.9012",
# "last":"32578.55",
# "low":"28808.19",
# "buy":"32572.68",
# "sell":"32615.37",
# "turnover":"1764201303.6100",
# "open":"31664.85",
# "riseRate":"2.89"
# }
# }
#
# Swap
#
# {
# "code": 10000,
# "desc": "操作成功",
# "data": {
# "BTC_USDT": [44053.47,44357.77,42911.54,43297.79,53471.264,-1.72,1645093002,302201.255084]
# }
# }
#
ticker = None
if market['type'] == 'swap':
ticker = {}
data = self.safe_value(response, 'data')
values = self.safe_value(data, market['id'], [])
for i in range(0, len(values)):
ticker['open'] = self.safe_value(values, 0)
ticker['high'] = self.safe_value(values, 1)
ticker['low'] = self.safe_value(values, 2)
ticker['last'] = self.safe_value(values, 3)
ticker['vol'] = self.safe_value(values, 4)
ticker['riseRate'] = self.safe_value(values, 5)
else:
ticker = self.safe_value(response, 'ticker', {})
ticker['date'] = self.safe_value(response, 'date')
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# Spot
#
# {
# "date":"1624399623587", # injected from outside
# "high":"33298.38",
# "vol":"56152.9012",
# "last":"32578.55",
# "low":"28808.19",
# "buy":"32572.68",
# "sell":"32615.37",
# "turnover":"1764201303.6100",
# "open":"31664.85",
# "riseRate":"2.89"
# }
#
# Swap
#
# {
# open: 44083.82,
# high: 44357.77,
# low: 42911.54,
# last: 43097.87,
# vol: 53451.641,
# riseRate: -2.24
# }
#
timestamp = self.safe_integer(ticker, 'date', self.milliseconds())
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': self.safe_symbol(None, market),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': self.safe_string(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}, market)
def parse_ohlcv(self, ohlcv, market=None):
if market['swap']:
ohlcvLength = len(ohlcv)
if ohlcvLength > 5:
return [
self.safe_timestamp(ohlcv, 5),
self.safe_number(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
]
else:
return [
self.safe_timestamp(ohlcv, 4),
self.safe_number(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
None,
]
else:
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the zb api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
swap = market['swap']
spot = market['spot']
options = self.safe_value(self.options, 'timeframes', {})
timeframes = self.safe_value(options, market['type'], {})
timeframeValue = self.safe_string(timeframes, timeframe)
if timeframeValue is None:
raise NotSupported(self.id + ' fetchOHLCV() does not support ' + timeframe + ' timeframe for ' + market['type'] + ' markets')
if limit is None:
limit = 1000
request = {
# 'market': market['id'], # spot only
# 'symbol': market['id'], # swap only
# 'type': timeframeValue, # spot only
# 'period': timeframeValue, # swap only
# 'since': since, # spot only
# 'size': limit, # spot and swap
}
marketIdField = 'symbol' if swap else 'market'
request[marketIdField] = market['id']
periodField = 'period' if swap else 'type'
request[periodField] = timeframeValue
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PublicGetKline',
'swap': 'contractV1PublicGetKline',
})
if swap:
if price == 'mark':
method = 'contractV1PublicGetMarkKline'
elif price == 'index':
method = 'contractV1PublicGetIndexKline'
elif spot:
if since is not None:
request['since'] = since
if limit is not None:
request['size'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# Spot
#
# {
# "symbol": "BTC",
# "data": [
# [1645091400000,43183.24,43187.49,43145.92,43182.28,0.9110],
# [1645091460000,43182.18,43183.15,43182.06,43183.15,1.4393],
# [1645091520000,43182.11,43240.1,43182.11,43240.1,0.3802]
# ],
# "moneyType": "USDT"
# }
#
# Swap
#
# {
# "code": 10000,
# "desc": "操作成功",
# "data": [
# [41433.44,41433.44,41405.88,41408.75,21.368,1646366460],
# [41409.25,41423.74,41408.8,41423.42,9.828,1646366520],
# [41423.96,41429.39,41369.98,41370.31,123.104,1646366580]
# ]
# }
#
# Mark
#
# {
# "code": 10000,
# "desc": "操作成功",
# "data": [
# [41603.39,41603.39,41591.59,41600.81,1646381760],
# [41600.36,41605.75,41587.69,41601.97,1646381820],
# [41601.97,41601.97,41562.62,41593.96,1646381880]
# ]
# }
#
# Index
#
# {
# "code": 10000,
# "desc": "操作成功",
# "data": [
# [41697.53,41722.29,41689.16,41689.16,1646381640],
# [41690.1,41691.73,41611.61,41611.61,1646381700],
# [41611.61,41619.49,41594.87,41594.87,1646381760]
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# Spot
#
# {
# "date":1624537391,
# "amount":"0.0142",
# "price":"33936.42",
# "trade_type":"ask",
# "type":"sell",
# "tid":1718869018
# }
#
# Swap
#
# {
# "amount": "0.002",
# "createTime": "1645787446034",
# "feeAmount": "-0.05762699",
# "feeCurrency": "USDT",
# "id": "6902932868050395136",
# "maker": False,
# "orderId": "6902932868042006528",
# "price": "38417.99",
# "relizedPnl": "0.30402",
# "side": 4,
# "userId": "6896693805014120448"
# },
#
sideField = 'side' if market['swap'] else 'trade_type'
side = self.safe_string(trade, sideField)
takerOrMaker = None
maker = self.safe_value(trade, 'maker')
if maker is not None:
takerOrMaker = 'maker' if maker else 'taker'
if market['spot']:
side = 'buy' if (side == 'bid') else 'sell'
else:
if side == '3':
side = 'sell' # close long
elif side == '4':
side = 'buy' # close short
elif side == '1':
side = 'buy' # open long
elif side == '2':
side = 'sell' # open short
timestamp = None
if market['swap']:
timestamp = self.safe_integer(trade, 'createTime')
else:
timestamp = self.safe_timestamp(trade, 'date')
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'amount')
fee = None
feeCostString = self.safe_string(trade, 'feeAmount')
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
fee = {
'cost': feeCostString,
'currency': self.safe_currency_code(feeCurrencyId),
}
market = self.safe_market(None, market)
return self.safe_trade({
'info': trade,
'id': self.safe_string(trade, 'tid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': side,
'order': self.safe_string(trade, 'orderId'),
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': None,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the zb api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
swap = market['swap']
request = {
# 'market': market['id'], # SPOT
# 'symbol': market['id'], # SWAP
# 'side': 1, # SWAP
# 'dateRange': 0, # SWAP
# 'startTime': since, # SWAP
# 'endtime': self.milliseconds(), # SWAP
# 'pageNum': 1, # SWAP
# 'pageSize': limit, # SWAP default is 10
}
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startTime'] = since
marketIdField = 'symbol' if swap else 'market'
request[marketIdField] = market['id']
if swap and params['pageNum'] is None:
request['pageNum'] = 1
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PublicGetTrades',
'swap': 'contractV2PrivateGetTradeTradeHistory',
})
response = await getattr(self, method)(self.extend(request, params))
#
# Spot
#
# [
# {"date":1624537391,"amount":"0.0142","price":"33936.42","trade_type":"ask","type":"sell","tid":1718869018},
# {"date":1624537391,"amount":"0.0010","price":"33936.42","trade_type":"ask","type":"sell","tid":1718869020},
# {"date":1624537391,"amount":"0.0133","price":"33936.42","trade_type":"ask","type":"sell","tid":1718869021},
# ]
#
# Swap
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "amount": "0.002",
# "createTime": "1645787446034",
# "feeAmount": "-0.05762699",
# "feeCurrency": "USDT",
# "id": "6902932868050395136",
# "maker": False,
# "orderId": "6902932868042006528",
# "price": "38417.99",
# "relizedPnl": "0.30402",
# "side": 4,
# "userId": "6896693805014120448"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
if swap:
data = self.safe_value(response, 'data')
response = self.safe_value(data, 'list')
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
swap = market['swap']
spot = market['spot']
timeInForce = self.safe_string(params, 'timeInForce')
reduceOnly = self.safe_value(params, 'reduceOnly')
stop = self.safe_value(params, 'stop')
stopPrice = self.safe_number_2(params, 'triggerPrice', 'stopPrice')
if type == 'market':
raise InvalidOrder(self.id + ' createOrder() on ' + market['type'] + ' markets does not allow market orders')
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PrivateGetOrder',
'swap': 'contractV2PrivatePostTradeOrder',
})
request = {
'amount': self.amount_to_precision(symbol, amount),
# 'symbol': market['id'],
# 'acctType': 0, # Spot, Margin 0/1/2 [Spot/Isolated/Cross] Optional, Default to: 0 Spot
# 'customerOrderId': '1f2g', # Spot, Margin
# 'orderType': 1, # Spot, Margin order type 1/2 [PostOnly/IOC] Optional
# 'triggerPrice': 30000.0, # Stop trigger price
# 'algoPrice': 29000.0, # Stop order price
# 'priceType': 1, # Stop Loss Take Profit, 1: Mark price, 2: Last price
# 'bizType': 1, # Stop Loss Take Profit, 1: TP, 2: SL
}
if stop or stopPrice:
method = 'contractV2PrivatePostTradeOrderAlgo'
orderType = self.safe_integer(params, 'orderType')
priceType = self.safe_integer(params, 'priceType')
bizType = self.safe_integer(params, 'bizType')
algoPrice = self.safe_number(params, 'algoPrice')
request['symbol'] = market['id']
if side == 'sell' and reduceOnly:
request['side'] = 3 # close long
elif side == 'buy' and reduceOnly:
request['side'] = 4 # close short
elif side == 'buy':
request['side'] = 1 # open long
elif side == 'sell':
request['side'] = 2 # open short
elif side == 5:
request['side'] = 5 # one way position buy
elif side == 6:
request['side'] = 6 # one way position sell
elif side == 0:
request['side'] = 0 # one way position close only
if type == 'trigger' or orderType == 1:
request['orderType'] = 1
elif type == 'stop loss' or type == 'take profit' or orderType == 2 or priceType or bizType:
request['orderType'] = 2
request['priceType'] = priceType
request['bizType'] = bizType
request['triggerPrice'] = self.price_to_precision(symbol, stopPrice)
request['algoPrice'] = self.price_to_precision(symbol, algoPrice)
else:
if price:
request['price'] = self.price_to_precision(symbol, price)
if spot:
request['tradeType'] = '1' if (side == 'buy') else '0'
request['currency'] = market['id']
if timeInForce is not None:
if timeInForce == 'PO':
request['orderType'] = 1
elif timeInForce == 'IOC':
request['orderType'] = 2
else:
raise InvalidOrder(self.id + ' createOrder() on ' + market['type'] + ' markets does not allow ' + timeInForce + ' orders')
elif swap:
if side == 'sell' and reduceOnly:
request['side'] = 3 # close long
elif side == 'buy' and reduceOnly:
request['side'] = 4 # close short
elif side == 'buy':
request['side'] = 1 # open long
elif side == 'sell':
request['side'] = 2 # open short
if type == 'limit':
request['action'] = 1
elif timeInForce == 'IOC':
request['action'] = 3
elif timeInForce == 'PO':
request['action'] = 4
elif timeInForce == 'FOK':
request['action'] = 5
else:
request['action'] = type
request['symbol'] = market['id']
clientOrderId = self.safe_string(params, 'clientOrderId') # OPTIONAL '^[a-zA-Z0-9-_]{1,36}$', # The user-defined order number
if clientOrderId is not None:
request['clientOrderId'] = clientOrderId
# using self.extend as name causes issues in python
extendOrderAlgos = self.safe_value(params, 'extend', None) # OPTIONAL {"orderAlgos":[{"bizType":1,"priceType":1,"triggerPrice":"70000"},{"bizType":2,"priceType":1,"triggerPrice":"40000"}]}
if extendOrderAlgos is not None:
request['extend'] = extendOrderAlgos
query = self.omit(params, ['reduceOnly', 'stop', 'stopPrice', 'orderType', 'triggerPrice', 'algoPrice', 'priceType', 'bizType', 'clientOrderId', 'extend'])
response = await getattr(self, method)(self.extend(request, query))
#
# Spot
#
# {
# "code": 1000,
# "message": "操作成功",
# "id": "202202224851151555"
# }
#
# Swap
#
# {
# "code": 10000,
# "desc": "操作成功",
# "data": {
# "orderId": "6901786759944937472",
# "orderCode": null
# }
# }
#
# Algo order
#
# {
# "code": 10000,
# "data": "6919884551305242624",
# "desc": "操作成功"
# }
#
if (swap) and (not stop) and (stopPrice is None):
response = self.safe_value(response, 'data')
response['timeInForce'] = timeInForce
tradeType = self.safe_string(response, 'tradeType')
if tradeType is None:
response['type'] = tradeType
response['total_amount'] = amount
response['price'] = price
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
swap = market['swap']
request = {
# 'currency': self.market_id(symbol), # only applicable to SPOT
# 'id': str(id), # only applicable to SPOT
# 'symbol': self.market_id(symbol), # only applicable to SWAP
# 'orderId': str(id), # only applicable to SWAP
# 'clientOrderId': params['clientOrderId'], # only applicable to SWAP
}
marketIdField = 'symbol' if swap else 'currency'
request[marketIdField] = self.market_id(symbol)
orderIdField = 'orderId' if swap else 'id'
request[orderIdField] = str(id)
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PrivateGetCancelOrder',
'swap': 'contractV2PrivatePostTradeCancelOrder',
})
response = await getattr(self, method)(self.extend(request, params))
#
# Spot
#
# {
# "code": 1000,
# "message": "Success。"
# }
#
# Swap
#
# {
# "code": 10007,
# "desc": "orderId与clientOrderId选填1个"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders in a market
:param str symbol: unified market symbol of the market to cancel orders in
:param dict params: extra parameters specific to the zb api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
stop = self.safe_value(params, 'stop')
if market['spot']:
raise NotSupported(self.id + ' cancelAllOrders() is not supported on ' + market['type'] + ' markets')
request = {
'symbol': market['id'],
# 'ids': [6904603200733782016, 6819506476072247297], # STOP
# 'side': params['side'], # STOP, for stop orders: 1 Open long(buy), 2 Open short(sell), 3 Close long(sell), 4 Close Short(Buy). One-Way Positions: 5 Buy, 6 Sell, 0 Close Only
}
method = 'contractV2PrivatePostTradeCancelAllOrders'
if stop:
method = 'contractV2PrivatePostTradeCancelAlgos'
query = self.omit(params, 'stop')
return await getattr(self, method)(self.extend(request, query))
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
reduceOnly = self.safe_value(params, 'reduceOnly')
stop = self.safe_value(params, 'stop')
swap = market['swap']
request = {
# 'currency': self.market_id(symbol), # only applicable to SPOT
# 'id': str(id), # only applicable to SPOT
# 'orderId': str(id), # only applicable to SWAP
# 'clientOrderId': params['clientOrderId'], # only applicable to SWAP
# 'symbol': market['id'], # STOP and SWAP
# 'side': params['side'], # STOP and SWAP, for stop orders: 1 Open long(buy), 2 Open short(sell), 3 Close long(sell), 4 Close Short(Buy). One-Way Positions: 5 Buy, 6 Sell, 0 Close Only
# 'orderType': 1, # STOP, 1: Plan order, 2: SP/SL
# 'bizType': 1, # Plan order, 1: TP, 2: SL
# 'status': 1, # STOP, 1: untriggered, 2: cancelled, 3:triggered, 4:failed, 5:completed
# 'startTime': since, # STOP and SWAP
# 'endTime': params['endTime'], # STOP and SWAP
# 'pageNum': 1, # STOP and SWAP, default 1
# 'pageSize': limit, # STOP, default 10
}
marketIdField = 'symbol' if swap else 'currency'
request[marketIdField] = self.market_id(symbol)
orderIdField = 'orderId' if swap else 'id'
request[orderIdField] = str(id)
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PrivateGetGetOrder',
'swap': 'contractV2PrivateGetTradeGetOrder',
})
if stop:
method = 'contractV2PrivateGetTradeGetOrderAlgos'
orderType = self.safe_integer(params, 'orderType')
if orderType is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires an orderType parameter for stop orders')
side = self.safe_integer(params, 'side')
bizType = self.safe_integer(params, 'bizType')
if side == 'sell' and reduceOnly:
request['side'] = 3 # close long
elif side == 'buy' and reduceOnly:
request['side'] = 4 # close short
elif side == 'buy':
request['side'] = 1 # open long
elif side == 'sell':
request['side'] = 2 # open short
elif side == 5:
request['side'] = 5 # one way position buy
elif side == 6:
request['side'] = 6 # one way position sell
elif side == 0:
request['side'] = 0 # one way position close only
if orderType == 1:
request['orderType'] = 1
elif orderType == 2 or bizType:
request['orderType'] = 2
request['bizType'] = bizType
query = self.omit(params, ['reduceOnly', 'stop', 'side', 'orderType', 'bizType'])
response = await getattr(self, method)(self.extend(request, query))
#
# Spot
#
# {
# 'total_amount': 0.01,
# 'id': '20180910244276459',
# 'price': 180.0,
# 'trade_date': 1536576744960,
# 'status': 2,
# 'trade_money': '1.96742',
# 'trade_amount': 0.01,
# 'type': 0,
# 'currency': 'eth_usdt'
# }
#
# Swap
#
# {
# "code": 10000,
# "data": {
# "action": 1,
# "amount": "0.002",
# "availableAmount": "0.002",
# "availableValue": "60",
# "avgPrice": "0",
# "canCancel": True,
# "cancelStatus": 20,
# "createTime": "1646185684379",
# "entrustType": 1,
# "id": "6904603200733782016",
# "leverage": 2,
# "margin": "30",
# "marketId": "100",
# "modifyTime": "1646185684416",
# "price": "30000",
# "priority": 0,
# "showStatus": 1,
# "side": 1,
# "sourceType": 4,
# "status": 12,
# "tradeAmount": "0",
# "tradeValue": "0",
# "type": 1,
# "userId": "6896693805014120448",
# "value": "60"
# },
# "desc":"操作成功"
# }
#
# Algo order
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "action": 1,
# "algoPrice": "30000",
# "amount": "0.003",
# "bizType": 0,
# "canCancel": True,
# "createTime": "1649913941109",
# "errorCode": 0,
# "id": "6920240642849449984",
# "isLong": False,
# "leverage": 10,
# "marketId": "100",
# "modifyTime": "1649913941109",
# "orderType": 1,
# "priceType": 2,
# "side": 5,
# "sourceType": 4,
# "status": 1,
# "submitPrice": "41270.53",
# "symbol": "BTC_USDT",
# "tradedAmount": "0",
# "triggerCondition": "<=",
# "triggerPrice": "31000",
# "triggerTime": "0",
# "userId": "6896693805014120448"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
if stop:
data = self.safe_value(response, 'data', {})
response = self.safe_value(data, 'list', [])
result = []
for i in range(0, len(response)):
entry = response[i]
algoId = self.safe_string(entry, 'id')
if id == algoId:
result.append(entry)
response = result[0]
if swap and not stop:
response = self.safe_value(response, 'data', {})
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
reduceOnly = self.safe_value(params, 'reduceOnly')
stop = self.safe_value(params, 'stop')
swap = market['swap']
request = {
'pageSize': limit, # default pageSize is 50 for spot, 30 for swap
# 'currency': market['id'], # only applicable to SPOT
# 'pageIndex': 1, # only applicable to SPOT
# 'type': params['type'], # only applicable to SWAP
# 'dateRange': params['dateRange'], # only applicable to SWAP
# 'action': params['action'], # only applicable to SWAP
# 'symbol': market['id'], # STOP and SWAP
# 'side': params['side'], # STOP and SWAP, for stop orders: 1 Open long(buy), 2 Open short(sell), 3 Close long(sell), 4 Close Short(Buy). One-Way Positions: 5 Buy, 6 Sell, 0 Close Only
# 'orderType': 1, # STOP, 1: Plan order, 2: SP/SL
# 'bizType': 1, # Plan order, 1: TP, 2: SL
# 'status': 1, # STOP, 1: untriggered, 2: cancelled, 3:triggered, 4:failed, 5:completed
# 'startTime': since, # STOP and SWAP
# 'endTime': params['endTime'], # STOP and SWAP
# 'pageNum': 1, # STOP and SWAP, default 1
# 'pageSize': limit, # STOP, default 10
}
marketIdField = 'symbol' if market['swap'] else 'currency'
request[marketIdField] = market['id']
pageNumField = 'pageNum' if market['swap'] else 'pageIndex'
request[pageNumField] = 1
if swap:
request['startTime'] = since
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PrivateGetGetOrdersIgnoreTradeType',
'swap': 'contractV2PrivateGetTradeGetAllOrders',
})
# tradeType 交易类型1/0[buy/sell]
if 'tradeType' in params:
method = 'spotV1PrivateGetGetOrdersNew'
if stop:
method = 'contractV2PrivateGetTradeGetOrderAlgos'
orderType = self.safe_integer(params, 'orderType')
if orderType is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires an orderType parameter for stop orders')
side = self.safe_integer(params, 'side')
bizType = self.safe_integer(params, 'bizType')
if side == 'sell' and reduceOnly:
request['side'] = 3 # close long
elif side == 'buy' and reduceOnly:
request['side'] = 4 # close short
elif side == 'buy':
request['side'] = 1 # open long
elif side == 'sell':
request['side'] = 2 # open short
elif side == 5:
request['side'] = 5 # one way position buy
elif side == 6:
request['side'] = 6 # one way position sell
elif side == 0:
request['side'] = 0 # one way position close only
if orderType == 1:
request['orderType'] = 1
elif orderType == 2 or bizType:
request['orderType'] = 2
request['bizType'] = bizType
query = self.omit(params, ['reduceOnly', 'stop', 'side', 'orderType', 'bizType'])
response = None
try:
response = await getattr(self, method)(self.extend(request, query))
except Exception as e:
if isinstance(e, OrderNotFound):
return []
raise e
# Spot
#
# [
# {
# "acctType": 0,
# "currency": "btc_usdt",
# "fees": 0,
# "id": "202202234857482656",
# "price": 30000.0,
# "status": 3,
# "total_amount": 0.0006,
# "trade_amount": 0.0000,
# "trade_date": 1645610254524,
# "trade_money": 0.000000,
# "type": 1,
# "useZbFee": False,
# "webId": 0
# }
# ]
#
# Swap
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "action": 1,
# "amount": "0.004",
# "availableAmount": "0.004",
# "availableValue": "120",
# "avgPrice": "0",
# "canCancel": True,
# "cancelStatus": 20,
# "createTime": "1645609643885",
# "entrustType": 1,
# "id": "6902187111785635850",
# "leverage": 5,
# "margin": "24",
# "marketId": "100",
# "marketName": "BTC_USDT",
# "modifyTime": "1645609643889",
# "price": "30000",
# "showStatus": 1,
# "side": 1,
# "sourceType": 1,
# "status": 12,
# "tradeAmount": "0",
# "tradeValue": "0",
# "type": 1,
# "userId": "6896693805014120448",
# "value": "120"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
# Algo order
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "action": 1,
# "algoPrice": "30000",
# "amount": "0.003",
# "bizType": 0,
# "canCancel": True,
# "createTime": "1649913941109",
# "errorCode": 0,
# "id": "6920240642849449984",
# "isLong": False,
# "leverage": 10,
# "marketId": "100",
# "modifyTime": "1649913941109",
# "orderType": 1,
# "priceType": 2,
# "side": 5,
# "sourceType": 4,
# "status": 1,
# "submitPrice": "41270.53",
# "symbol": "BTC_USDT",
# "tradedAmount": "0",
# "triggerCondition": "<=",
# "triggerPrice": "31000",
# "triggerTime": "0",
# "userId": "6896693805014120448"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
if swap:
data = self.safe_value(response, 'data', {})
response = self.safe_value(data, 'list', [])
return self.parse_orders(response, market, since, limit)
async def fetch_canceled_orders(self, symbol=None, since=None, limit=10, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchCanceledOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
reduceOnly = self.safe_value(params, 'reduceOnly')
stop = self.safe_value(params, 'stop')
request = {
'pageSize': limit, # SPOT and STOP, default pageSize is 10, doesn't work with other values now
# 'currency': market['id'], # SPOT
# 'pageIndex': 1, # SPOT, default pageIndex is 1
# 'symbol': market['id'], # STOP
# 'side': params['side'], # STOP, for stop orders: 1 Open long(buy), 2 Open short(sell), 3 Close long(sell), 4 Close Short(Buy). One-Way Positions: 5 Buy, 6 Sell, 0 Close Only
# 'orderType': 1, # STOP, 1: Plan order, 2: SP/SL
# 'bizType': 1, # Plan order, 1: TP, 2: SL
# 'status': 1, # STOP, 1: untriggered, 2: cancelled, 3:triggered, 4:failed, 5:completed
# 'startTime': since, # STOP
# 'endTime': params['endTime'], # STOP
# 'pageNum': 1, # STOP, default 1
}
marketIdField = 'currency' if market['spot'] else 'symbol'
request[marketIdField] = market['id']
pageNumField = 'pageIndex' if market['spot'] else 'pageNum'
request[pageNumField] = 1
method = 'spotV1PrivateGetGetOrdersIgnoreTradeType'
if stop:
method = 'contractV2PrivateGetTradeGetOrderAlgos'
orderType = self.safe_integer(params, 'orderType')
if orderType is None:
raise ArgumentsRequired(self.id + ' fetchCanceledOrders() requires an orderType parameter for stop orders')
side = self.safe_integer(params, 'side')
bizType = self.safe_integer(params, 'bizType')
if side == 'sell' and reduceOnly:
request['side'] = 3 # close long
elif side == 'buy' and reduceOnly:
request['side'] = 4 # close short
elif side == 'buy':
request['side'] = 1 # open long
elif side == 'sell':
request['side'] = 2 # open short
elif side == 5:
request['side'] = 5 # one way position buy
elif side == 6:
request['side'] = 6 # one way position sell
elif side == 0:
request['side'] = 0 # one way position close only
if orderType == 1:
request['orderType'] = 1
elif orderType == 2 or bizType:
request['orderType'] = 2
request['bizType'] = bizType
request['status'] = 2
# tradeType 交易类型1/0[buy/sell]
if 'tradeType' in params:
method = 'spotV1PrivateGetGetOrdersNew'
response = None
try:
response = await getattr(self, method)(self.extend(request, params))
except Exception as e:
if isinstance(e, OrderNotFound):
return []
raise e
query = self.omit(params, ['reduceOnly', 'stop', 'side', 'orderType', 'bizType'])
response = await getattr(self, method)(self.extend(request, query))
#
# Spot
#
# [
# {
# "acctType": 0,
# "currency": "btc_usdt",
# "fees": 0,
# "id": "202202234857482656",
# "price": 30000.0,
# "status": 1,
# "total_amount": 0.0006,
# "trade_amount": 0.0000,
# "trade_date": 1645610254524,
# "trade_money": 0.000000,
# "type": 1,
# "useZbFee": False,
# "webId": 0
# }
# ]
#
# Algo order
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "action": 1,
# "algoPrice": "30000",
# "amount": "0.003",
# "bizType": 0,
# "canCancel": True,
# "createTime": "1649913941109",
# "errorCode": 0,
# "id": "6920240642849449984",
# "isLong": False,
# "leverage": 10,
# "marketId": "100",
# "modifyTime": "1649913941109",
# "orderType": 1,
# "priceType": 2,
# "side": 5,
# "sourceType": 4,
# "status": 2,
# "submitPrice": "41270.53",
# "symbol": "BTC_USDT",
# "tradedAmount": "0",
# "triggerCondition": "<=",
# "triggerPrice": "31000",
# "triggerTime": "0",
# "userId": "6896693805014120448"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
if stop:
data = self.safe_value(response, 'data', {})
response = self.safe_value(data, 'list', [])
result = []
if market['type'] == 'spot':
for i in range(0, len(response)):
entry = response[i]
status = self.safe_string(entry, 'status')
if status == '1':
result.append(entry)
response = result
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=10, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
reduceOnly = self.safe_value(params, 'reduceOnly')
stop = self.safe_value(params, 'stop')
request = {
'pageSize': limit, # SPOT and STOP, default pageSize is 10, doesn't work with other values now
# 'currency': market['id'], # SPOT
# 'pageIndex': 1, # SPOT, default pageIndex is 1
# 'symbol': market['id'], # STOP
# 'side': params['side'], # STOP, for stop orders: 1 Open long(buy), 2 Open short(sell), 3 Close long(sell), 4 Close Short(Buy). One-Way Positions: 5 Buy, 6 Sell, 0 Close Only
# 'orderType': 1, # STOP, 1: Plan order, 2: SP/SL
# 'bizType': 1, # Plan order, 1: TP, 2: SL
# 'status': 1, # STOP, 1: untriggered, 2: cancelled, 3:triggered, 4:failed, 5:completed
# 'startTime': since, # STOP
# 'endTime': params['endTime'], # STOP
# 'pageNum': 1, # STOP, default 1
}
marketIdField = 'currency' if market['spot'] else 'symbol'
request[marketIdField] = market['id']
pageNumField = 'pageIndex' if market['spot'] else 'pageNum'
request[pageNumField] = 1
method = 'spotV1PrivateGetGetFinishedAndPartialOrders'
if stop:
method = 'contractV2PrivateGetTradeGetOrderAlgos'
orderType = self.safe_integer(params, 'orderType')
if orderType is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires an orderType parameter for stop orders')
side = self.safe_integer(params, 'side')
bizType = self.safe_integer(params, 'bizType')
if side == 'sell' and reduceOnly:
request['side'] = 3 # close long
elif side == 'buy' and reduceOnly:
request['side'] = 4 # close short
elif side == 'buy':
request['side'] = 1 # open long
elif side == 'sell':
request['side'] = 2 # open short
elif side == 5:
request['side'] = 5 # one way position buy
elif side == 6:
request['side'] = 6 # one way position sell
elif side == 0:
request['side'] = 0 # one way position close only
if orderType == 1:
request['orderType'] = 1
elif orderType == 2 or bizType:
request['orderType'] = 2
request['bizType'] = bizType
request['status'] = 5
query = self.omit(params, ['reduceOnly', 'stop', 'side', 'orderType', 'bizType'])
response = await getattr(self, method)(self.extend(request, query))
#
# Spot
#
# [
# {
# "acctType": 0,
# "currency": "btc_usdt",
# "fees": 0.00823354,
# "id": "202204145086706337",
# "price": 41167.7,
# "status": 2,
# "total_amount": 0.0001,
# "trade_amount": 0.0001,
# "trade_date": 1649917867370,
# "trade_money": 4.116770,
# "type": 0,
# "useZbFee": False,
# "webId": 0
# },
# ]
#
# Algo order
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "action": 1,
# "algoPrice": "30000",
# "amount": "0.003",
# "bizType": 0,
# "canCancel": True,
# "createTime": "1649913941109",
# "errorCode": 0,
# "id": "6920240642849449984",
# "isLong": False,
# "leverage": 10,
# "marketId": "100",
# "modifyTime": "1649913941109",
# "orderType": 1,
# "priceType": 2,
# "side": 5,
# "sourceType": 4,
# "status": 1,
# "submitPrice": "41270.53",
# "symbol": "BTC_USDT",
# "tradedAmount": "0",
# "triggerCondition": "<=",
# "triggerPrice": "31000",
# "triggerTime": "0",
# "userId": "6896693805014120448"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
if stop:
data = self.safe_value(response, 'data', {})
response = self.safe_value(data, 'list', [])
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the zb api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
reduceOnly = self.safe_value(params, 'reduceOnly')
stop = self.safe_value(params, 'stop')
swap = market['swap']
request = {
# 'pageSize': limit, # default pageSize is 10 for spot, 30 for swap
# 'currency': market['id'], # SPOT
# 'pageIndex': 1, # SPOT
# 'symbol': market['id'], # SWAP and STOP
# 'pageNum': 1, # SWAP and STOP, default 1
# 'type': params['type'], # swap only
# 'side': params['side'], # SWAP and STOP, for stop orders: 1 Open long(buy), 2 Open short(sell), 3 Close long(sell), 4 Close Short(Buy). One-Way Positions: 5 Buy, 6 Sell, 0 Close Only
# 'action': params['action'], # SWAP
# 'orderType': 1, # STOP, 1: Plan order, 2: SP/SL
# 'bizType': 1, # Plan order, 1: TP, 2: SL
# 'status': 1, # STOP, 1: untriggered, 2: cancelled, 3:triggered, 4:failed, 5:completed
# 'startTime': since, # SWAP and STOP
# 'endTime': params['endTime'], # STOP
}
if limit is not None:
request['pageSize'] = limit # default pageSize is 10 for spot, 30 for swap
marketIdField = 'symbol' if market['swap'] else 'currency'
request[marketIdField] = market['id']
pageNumField = 'pageNum' if market['swap'] else 'pageIndex'
request[pageNumField] = 1
if swap and (since is not None):
request['startTime'] = since
method = self.get_supported_mapping(market['type'], {
'spot': 'spotV1PrivateGetGetUnfinishedOrdersIgnoreTradeType',
'swap': 'contractV2PrivateGetTradeGetUndoneOrders',
})
if stop:
method = 'contractV2PrivateGetTradeGetOrderAlgos'
orderType = self.safe_integer(params, 'orderType')
if orderType is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires an orderType parameter for stop orders')
side = self.safe_integer(params, 'side')
bizType = self.safe_integer(params, 'bizType')
if side == 'sell' and reduceOnly:
request['side'] = 3 # close long
elif side == 'buy' and reduceOnly:
request['side'] = 4 # close short
elif side == 'buy':
request['side'] = 1 # open long
elif side == 'sell':
request['side'] = 2 # open short
elif side == 5:
request['side'] = 5 # one way position buy
elif side == 6:
request['side'] = 6 # one way position sell
elif side == 0:
request['side'] = 0 # one way position close only
if orderType == 1:
request['orderType'] = 1
elif orderType == 2 or bizType:
request['orderType'] = 2
request['bizType'] = bizType
request['status'] = 1
query = self.omit(params, ['reduceOnly', 'stop', 'side', 'orderType', 'bizType'])
# tradeType 交易类型1/0[buy/sell]
if 'tradeType' in params:
method = 'spotV1PrivateGetGetOrdersNew'
response = None
try:
response = await getattr(self, method)(self.extend(request, query))
except Exception as e:
if isinstance(e, OrderNotFound):
return []
raise e
#
# Spot
#
# [
# {
# "currency": "btc_usdt",
# "id": "20150928158614292",
# "price": 1560,
# "status": 3,
# "total_amount": 0.1,
# "trade_amount": 0,
# "trade_date": 1443410396717,
# "trade_money": 0,
# "type": 0,
# "fees": "0.03",
# "useZbFee": True
# },
# ]
#
# Swap
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "action": 1,
# "amount": "0.003",
# "availableAmount": "0.003",
# "availableValue": "90",
# "avgPrice": "0",
# "canCancel": True,
# "cancelStatus": 20,
# "createTime": "1645694610880",
# "entrustType": 1,
# "id": "6902543489192632320",
# "leverage": 5,
# "margin": "18",
# "marketId": "100",
# "modifyTime": "1645694610883",
# "price": "30000",
# "priority": 0,
# "showStatus": 1,
# "side": 1,
# "sourceType": 1,
# "status": 12,
# "tradeAmount": "0",
# "tradeValue": "0",
# "type": 1,
# "userId": "6896693805014120448",
# "value": "90"
# }
# ],
# "pageNum": 1,
# "pageSize": 30
# },
# "desc": "操作成功"
# }
#
# Algo order
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "action": 1,
# "algoPrice": "30000",
# "amount": "0.003",
# "bizType": 0,
# "canCancel": True,
# "createTime": "1649913941109",
# "errorCode": 0,
# "id": "6920240642849449984",
# "isLong": False,
# "leverage": 10,
# "marketId": "100",
# "modifyTime": "1649913941109",
# "orderType": 1,
# "priceType": 2,
# "side": 5,
# "sourceType": 4,
# "status": 1,
# "submitPrice": "41270.53",
# "symbol": "BTC_USDT",
# "tradedAmount": "0",
# "triggerCondition": "<=",
# "triggerPrice": "31000",
# "triggerTime": "0",
# "userId": "6896693805014120448"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
if swap:
data = self.safe_value(response, 'data', {})
response = self.safe_value(data, 'list', [])
return self.parse_orders(response, market, since, limit)
def parse_order(self, order, market=None):
#
# Spot fetchOrder, fetchClosedOrders
#
# {
# acctType: 0,
# currency: 'btc_usdt',
# fees: 3.6e-7,
# id: '202102282829772463',
# price: 45177.5,
# status: 2,
# total_amount: 0.0002,
# trade_amount: 0.0002,
# trade_date: 1614515104998,
# trade_money: 8.983712,
# type: 1,
# useZbFee: False
# },
#
# Swap fetchOrder
#
# {
# "action": 1,
# "amount": "0.002",
# "availableAmount": "0.002",
# "availableValue": "60",
# "avgPrice": "0",
# "canCancel": True,
# "cancelStatus": 20,
# "createTime": "1646185684379",
# "entrustType": 1,
# "id": "6904603200733782016",
# "leverage": 2,
# "margin": "30",
# "marketId": "100",
# "modifyTime": "1646185684416",
# "price": "30000",
# "priority": 0,
# "showStatus": 1,
# "side": 1,
# "sourceType": 4,
# "status": 12,
# "tradeAmount": "0",
# "tradeValue": "0",
# "type": 1,
# "userId": "6896693805014120448",
# "value": "60"
# },
#
# Algo fetchOrder, fetchOrders, fetchOpenOrders, fetchClosedOrders
#
# {
# "action": 1,
# "algoPrice": "30000",
# "amount": "0.003",
# "bizType": 0,
# "canCancel": True,
# "createTime": "1649913941109",
# "errorCode": 0,
# "id": "6920240642849449984",
# "isLong": False,
# "leverage": 10,
# "marketId": "100",
# "modifyTime": "1649913941109",
# "orderType": 1,
# "priceType": 2,
# "side": 5,
# "sourceType": 4,
# "status": 1,
# "submitPrice": "41270.53",
# "symbol": "BTC_USDT",
# "tradedAmount": "0",
# "triggerCondition": "<=",
# "triggerPrice": "31000",
# "triggerTime": "0",
# "userId": "6896693805014120448"
# },
#
# Spot createOrder
#
# {
# code: '1000',
# message: '操作成功',
# id: '202202224851151555',
# type: '1',
# total_amount: 0.0002,
# price: 30000
# }
#
# Swap createOrder
#
# {
# orderId: '6901786759944937472',
# orderCode: null,
# timeInForce: 'IOC',
# total_amount: 0.0002,
# price: 30000
# }
#
# Algo createOrder
#
# {
# "code": 10000,
# "data": "6919884551305242624",
# "desc": "操作成功"
# }
#
orderId = self.safe_value(order, 'orderId') if market['swap'] else self.safe_value(order, 'id')
if orderId is None:
orderId = self.safe_value(order, 'id')
side = self.safe_integer_2(order, 'type', 'side')
if side is None:
side = None
else:
if market['type'] == 'spot':
side = 'buy' if (side == 1) else 'sell'
timestamp = self.safe_integer(order, 'trade_date')
if timestamp is None:
timestamp = self.safe_integer(order, 'createTime')
marketId = self.safe_string(order, 'currency')
market = self.safe_market(marketId, market, '_')
price = self.safe_string_2(order, 'price', 'algoPrice')
filled = self.safe_string(order, 'tradeAmount') if market['swap'] else self.safe_string(order, 'trade_amount')
amount = self.safe_string(order, 'total_amount')
if amount is None:
amount = self.safe_string(order, 'amount')
cost = self.safe_string(order, 'trade_money')
status = self.parse_order_status(self.safe_string(order, 'status'), market)
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = (timeInForce == 'PO')
feeCost = self.safe_number(order, 'fees')
fee = None
if feeCost is not None:
feeCurrency = None
zbFees = self.safe_value(order, 'useZbFee')
if zbFees is True:
feeCurrency = 'ZB'
else:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return self.safe_order({
'info': order,
'id': orderId,
'clientOrderId': self.safe_string(order, 'userId'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': market['symbol'],
'type': 'limit', # market order is not available on ZB
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': self.safe_string(order, 'triggerPrice'),
'average': self.safe_string(order, 'avgPrice'),
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def parse_order_status(self, status, market=None):
statuses = {}
if market['type'] == 'spot':
statuses = {
'0': 'open',
'1': 'canceled',
'2': 'closed',
'3': 'open', # partial
}
else:
statuses = {
'1': 'open',
'2': 'canceled',
'3': 'open', # stop order triggered
'4': 'failed',
'5': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_transaction_status(self, status):
statuses = {
'0': 'pending', # submitted, pending confirmation
'1': 'failed',
'2': 'ok',
'3': 'canceled',
'5': 'ok', # confirmed
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "code": 1000,
# "message": "success",
# "id": "withdrawalId"
# }
#
# fetchWithdrawals
#
# {
# "amount": 0.01,
# "fees": 0.001,
# "id": 2016042556231,
# "manageTime": 1461579340000,
# "status": 3,
# "submitTime": 1461579288000,
# "toAddress": "14fxEPirL9fyfw1i9EF439Pq6gQ5xijUmp",
# }
#
# fetchDeposits
#
# {
# "address": "1FKN1DZqCm8HaTujDioRL2Aezdh7Qj7xxx",
# "amount": "1.00000000",
# "confirmTimes": 1,
# "currency": "BTC",
# "description": "Successfully Confirm",
# "hash": "7ce842de187c379abafadd64a5fe66c5c61c8a21fb04edff9532234a1dae6xxx",
# "id": 558,
# "itransfer": 1,
# "status": 2,
# "submit_time": "2016-12-07 18:51:57",
# }
#
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'hash')
amount = self.safe_number(transaction, 'amount')
timestamp = self.parse8601(self.safe_string(transaction, 'submit_time'))
timestamp = self.safe_integer(transaction, 'submitTime', timestamp)
address = self.safe_string_2(transaction, 'toAddress', 'address')
tag = None
if address is not None:
parts = address.split('_')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
confirmTimes = self.safe_integer(transaction, 'confirmTimes')
updated = self.safe_integer(transaction, 'manageTime')
type = None
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
if address is not None:
type = 'withdrawal' if (confirmTimes is None) else 'deposit'
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
fee = None
feeCost = self.safe_number(transaction, 'fees')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
async def set_leverage(self, leverage, symbol=None, params={}):
"""
set the level of leverage for a market
:param float leverage: the rate of leverage
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: response from the exchange
"""
await self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
if (leverage < 1) or (leverage > 125):
raise BadRequest(self.id + ' setLeverage() leverage should be between 1 and 125')
market = self.market(symbol)
accountType = None
if not market['swap']:
raise BadSymbol(self.id + ' setLeverage() supports swap contracts only')
else:
accountType = 1
request = {
'symbol': market['id'],
'leverage': leverage,
'futuresAccountType': accountType, # 1: USDT perpetual swaps
}
return await self.contractV2PrivatePostSettingSetLeverage(self.extend(request, params))
async def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
"""
fetches historical funding rate prices
:param str|None symbol: unified symbol of the market to fetch the funding rate history for
:param int|None since: timestamp in ms of the earliest funding rate to fetch
:param int|None limit: the maximum amount of `funding rate structures <https://docs.ccxt.com/en/latest/manual.html?#funding-rate-history-structure>` to fetch
:param dict params: extra parameters specific to the zb api endpoint
:returns [dict]: a list of `funding rate structures <https://docs.ccxt.com/en/latest/manual.html?#funding-rate-history-structure>`
"""
await self.load_markets()
request = {
# 'symbol': market['id'],
# 'startTime': since,
# 'endTime': endTime, # current time by default
# 'limit': limit, # default 100, max 1000
}
if symbol is not None:
market = self.market(symbol)
symbol = market['symbol']
request['symbol'] = market['id']
if since is not None:
request['startTime'] = since
till = self.safe_integer(params, 'till')
endTime = self.safe_string(params, 'endTime')
params = self.omit(params, ['endTime', 'till'])
if till is not None:
request['endTime'] = till
elif endTime is not None:
request['endTime'] = endTime
if limit is not None:
request['limit'] = limit
response = await self.contractV2PublicGetFundingRate(self.extend(request, params))
#
# {
# "code": 10000,
# "data": [
# {
# "symbol": "BTC_USDT",
# "fundingRate": "0.0001",
# "fundingTime": "1645171200000"
# },
# ],
# "desc": "操作成功"
# }
#
data = self.safe_value(response, 'data', [])
rates = []
for i in range(0, len(data)):
entry = data[i]
marketId = self.safe_string(entry, 'symbol')
symbol = self.safe_symbol(marketId)
timestamp = self.safe_string(entry, 'fundingTime')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'fundingRate'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
async def fetch_funding_rate(self, symbol, params={}):
"""
fetch the current funding rate
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `funding rate structure <https://docs.ccxt.com/en/latest/manual.html#funding-rate-structure>`
"""
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadSymbol(self.id + ' fetchFundingRate() does not supports contracts only')
request = {
'symbol': market['id'],
}
response = await self.contractV1PublicGetFundingRate(self.extend(request, params))
#
# {
# "code": 10000,
# "desc": "操作成功",
# "data": {
# "fundingRate": "0.0001",
# "nextCalculateTime": "2022-02-19 00:00:00"
# }
# }
#
data = self.safe_value(response, 'data')
return self.parse_funding_rate(data, market)
def parse_funding_rate(self, contract, market=None):
#
# fetchFundingRate
#
# {
# "fundingRate": "0.0001",
# "nextCalculateTime": "2022-02-19 00:00:00"
# }
#
# fetchFundingRates
#
# {
# "symbol": "BTC_USDT",
# "markPrice": "43254.42",
# "indexPrice": "43278.61",
# "lastFundingRate": "0.0001",
# "nextFundingTime": "1646121600000"
# }
#
marketId = self.safe_string(contract, 'symbol')
symbol = self.safe_symbol(marketId, market)
fundingRate = self.safe_number(contract, 'fundingRate')
nextFundingDatetime = self.safe_string(contract, 'nextCalculateTime')
return {
'info': contract,
'symbol': symbol,
'markPrice': self.safe_string(contract, 'markPrice'),
'indexPrice': self.safe_string(contract, 'indexPrice'),
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': fundingRate,
'fundingTimestamp': None,
'fundingDatetime': None,
'nextFundingRate': None,
'nextFundingTimestamp': self.parse8601(nextFundingDatetime),
'nextFundingDatetime': nextFundingDatetime,
'previousFundingRate': self.safe_string(contract, 'lastFundingRate'),
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
async def fetch_funding_rates(self, symbols=None, params={}):
"""
fetch the funding rate for multiple markets
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a dictionary of `funding rates structures <https://docs.ccxt.com/en/latest/manual.html#funding-rates-structure>`, indexe by market symbols
"""
await self.load_markets()
response = await self.contractV2PublicGetPremiumIndex(params)
#
# {
# "code": 10000,
# "data": [
# {
# "symbol": "BTC_USDT",
# "markPrice": "43254.42",
# "indexPrice": "43278.61",
# "lastFundingRate": "0.0001",
# "nextFundingTime": "1646121600000"
# },
# ],
# "desc":"操作成功"
# }
#
data = self.safe_value(response, 'data', [])
result = self.parse_funding_rates(data)
return self.filter_by_array(result, 'symbol', symbols)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
password = self.safe_string(params, 'safePwd', self.password)
if password is None:
raise ArgumentsRequired(self.id + ' withdraw() requires exchange.password or a safePwd parameter')
fees = self.safe_number(params, 'fees')
if fees is None:
raise ArgumentsRequired(self.id + ' withdraw() requires a fees parameter')
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if tag is not None:
address += '_' + tag
request = {
'amount': self.currency_to_precision(code, amount),
'currency': currency['id'],
'fees': self.currency_to_precision(code, fees),
# 'itransfer': 0, # agree for an internal transfer, 0 disagree, 1 agree, the default is to disagree
'method': 'withdraw',
'receiveAddr': address,
'safePwd': password,
}
response = await self.spotV1PrivateGetWithdraw(self.extend(request, params))
#
# {
# "code": 1000,
# "message": "success",
# "id": "withdrawalId"
# }
#
transaction = self.parse_transaction(response, currency)
return self.extend(transaction, {
'type': 'withdrawal',
'address': address,
'addressTo': address,
'amount': amount,
})
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the zb api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'currency': currency['id'],
# 'pageIndex': 1,
# 'pageSize': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['pageSize'] = limit
response = await self.spotV1PrivateGetGetWithdrawRecord(self.extend(request, params))
#
# {
# "code": 1000,
# "message": {
# "des": "success",
# "isSuc": True,
# "datas": {
# "list": [
# {
# "amount": 0.01,
# "fees": 0.001,
# "id": 2016042556231,
# "manageTime": 1461579340000,
# "status": 3,
# "submitTime": 1461579288000,
# "toAddress": "14fxEPirL9fyfw1i9EF439Pq6gQ5xijUmp",
# },
# ],
# "pageIndex": 1,
# "pageSize": 10,
# "totalCount": 4,
# "totalPage": 1
# }
# }
# }
#
message = self.safe_value(response, 'message', {})
datas = self.safe_value(message, 'datas', {})
withdrawals = self.safe_value(datas, 'list', [])
return self.parse_transactions(withdrawals, currency, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the zb api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'currency': currency['id'],
# 'pageIndex': 1,
# 'pageSize': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['pageSize'] = limit
response = await self.spotV1PrivateGetGetChargeRecord(self.extend(request, params))
#
# {
# "code": 1000,
# "message": {
# "des": "success",
# "isSuc": True,
# "datas": {
# "list": [
# {
# "address": "1FKN1DZqCm8HaTujDioRL2Aezdh7Qj7xxx",
# "amount": "1.00000000",
# "confirmTimes": 1,
# "currency": "BTC",
# "description": "Successfully Confirm",
# "hash": "7ce842de187c379abafadd64a5fe66c5c61c8a21fb04edff9532234a1dae6xxx",
# "id": 558,
# "itransfer": 1,
# "status": 2,
# "submit_time": "2016-12-07 18:51:57",
# },
# ],
# "pageIndex": 1,
# "pageSize": 10,
# "total": 8
# }
# }
# }
#
message = self.safe_value(response, 'message', {})
datas = self.safe_value(message, 'datas', {})
deposits = self.safe_value(datas, 'list', [])
return self.parse_transactions(deposits, currency, since, limit)
async def fetch_position(self, symbol, params={}):
"""
fetch data on a single open contract trade position
:param str symbol: unified market symbol of the market the position is held in, default is None
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'futuresAccountType': 1, # 1: USDT-M Perpetual Futures
# 'symbol': market['id'],
# 'marketId': market['id'],
# 'side': params['side'],
}
response = await self.contractV2PrivateGetPositionsGetPositions(self.extend(request, params))
#
# {
# "code": 10000,
# "data": [
# {
# "amount": "0.002",
# "appendAmount": "0",
# "autoLightenRatio": "0",
# "avgPrice": "38570",
# "bankruptcyPrice": "46288.41",
# "contractType": 1,
# "createTime": "1645784751867",
# "freezeAmount": "0",
# "freezeList": [
# {
# "amount": "15.436832",
# "currencyId": "6",
# "currencyName": "usdt",
# "modifyTime": "1645784751867"
# }
# ],
# "id": "6902921567894972486",
# "lastAppendAmount": "0",
# "leverage": 5,
# "liquidateLevel": 1,
# "liquidatePrice": "46104",
# "maintainMargin": "0.30912384",
# "margin": "15.436832",
# "marginAppendCount": 0,
# "marginBalance": "15.295872",
# "marginMode": 1,
# "marginRate": "0.020209",
# "marketId": "100",
# "marketName": "BTC_USDT",
# "modifyTime": "1645784751867",
# "nominalValue": "77.14736",
# "originAppendAmount": "0",
# "originId": "6902921567894972591",
# "refreshType": "Timer",
# "returnRate": "-0.0091",
# "side": 0,
# "status": 1,
# "unrealizedPnl": "-0.14096",
# "userId": "6896693805014120448"
# }
# ],
# "desc": "操作成功"
# }
#
data = self.safe_value(response, 'data', [])
firstPosition = self.safe_value(data, 0)
return self.parse_position(firstPosition, market)
async def fetch_positions(self, symbols=None, params={}):
"""
fetch all open positions
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the zb api endpoint
:returns [dict]: a list of `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
await self.load_markets()
request = {
'futuresAccountType': 1, # 1: USDT-M Perpetual Futures
# 'symbol': market['id'],
# 'marketId': market['id'],
# 'side': params['side'],
}
response = await self.contractV2PrivateGetPositionsGetPositions(self.extend(request, params))
#
# {
# "code": 10000,
# "data": [
# {
# "amount": "0.002",
# "appendAmount": "0",
# "autoLightenRatio": "0",
# "avgPrice": "38570",
# "bankruptcyPrice": "46288.41",
# "contractType": 1,
# "createTime": "1645784751867",
# "freezeAmount": "0",
# "freezeList": [
# {
# "amount": "15.436832",
# "currencyId": "6",
# "currencyName": "usdt",
# "modifyTime": "1645784751867"
# }
# ],
# "id": "6902921567894972486",
# "lastAppendAmount": "0",
# "leverage": 5,
# "liquidateLevel": 1,
# "liquidatePrice": "46104",
# "maintainMargin": "0.30912384",
# "margin": "15.436832",
# "marginAppendCount": 0,
# "marginBalance": "15.295872",
# "marginMode": 1,
# "marginRate": "0.020209",
# "marketId": "100",
# "marketName": "BTC_USDT",
# "modifyTime": "1645784751867",
# "nominalValue": "77.14736",
# "originAppendAmount": "0",
# "originId": "6902921567894972591",
# "refreshType": "Timer",
# "returnRate": "-0.0091",
# "side": 0,
# "status": 1,
# "unrealizedPnl": "-0.14096",
# "userId": "6896693805014120448"
# },
# ],
# "desc": "操作成功"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_positions(data, symbols)
def parse_position(self, position, market=None):
#
# {
# "amount": "0.002",
# "appendAmount": "0",
# "autoLightenRatio": "0",
# "avgPrice": "38570",
# "bankruptcyPrice": "46288.41",
# "contractType": 1,
# "createTime": "1645784751867",
# "freezeAmount": "0",
# "freezeList": [
# {
# "amount": "15.436832",
# "currencyId": "6",
# "currencyName": "usdt",
# "modifyTime": "1645784751867"
# }
# ],
# "id": "6902921567894972486",
# "lastAppendAmount": "0",
# "leverage": 5,
# "liquidateLevel": 1,
# "liquidatePrice": "46104",
# "maintainMargin": "0.30912384",
# "margin": "15.436832",
# "marginAppendCount": 0,
# "marginBalance": "15.295872",
# "marginMode": 1,
# "marginRate": "0.020209",
# "marketId": "100",
# "marketName": "BTC_USDT",
# "modifyTime": "1645784751867",
# "nominalValue": "77.14736",
# "originAppendAmount": "0",
# "originId": "6902921567894972591",
# "refreshType": "Timer",
# "returnRate": "-0.0091",
# "side": 0,
# "status": 1,
# "unrealizedPnl": "-0.14096",
# "userId": "6896693805014120448"
# }
#
marketId = self.safe_string(position, 'marketName')
market = self.safe_market(marketId, market)
symbol = market['symbol']
contracts = self.safe_string(position, 'amount')
entryPrice = self.safe_number(position, 'avgPrice')
initialMargin = self.safe_string(position, 'margin')
rawSide = self.safe_string(position, 'side')
side = 'long' if (rawSide == '1') else 'short'
openType = self.safe_string(position, 'marginMode')
marginMode = 'isolated' if (openType == '1') else 'cross'
leverage = self.safe_string(position, 'leverage')
liquidationPrice = self.safe_number(position, 'liquidatePrice')
unrealizedProfit = self.safe_number(position, 'unrealizedPnl')
maintenanceMargin = self.safe_number(position, 'maintainMargin')
marginRatio = self.safe_number(position, 'marginRate')
notional = self.safe_number(position, 'nominalValue')
percentage = Precise.string_mul(self.safe_string(position, 'returnRate'), '100')
timestamp = self.safe_number(position, 'createTime')
return {
'info': position,
'symbol': symbol,
'contracts': self.parse_number(contracts),
'contractSize': None,
'entryPrice': entryPrice,
'collateral': None,
'side': side,
'unrealizedProfit': unrealizedProfit,
'leverage': self.parse_number(leverage),
'percentage': percentage,
'marginMode': marginMode,
'notional': notional,
'markPrice': None,
'liquidationPrice': liquidationPrice,
'initialMargin': self.parse_number(initialMargin),
'initialMarginPercentage': None,
'maintenanceMargin': maintenanceMargin,
'maintenanceMarginPercentage': None,
'marginRatio': marginRatio,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
def parse_ledger_entry_type(self, type):
types = {
'1': 'realized pnl',
'2': 'commission',
'3': 'funding fee subtract',
'4': 'funding fee addition',
'5': 'insurance clear',
'6': 'transfer in',
'7': 'transfer out',
'8': 'margin addition',
'9': 'margin subtraction',
'10': 'commission addition',
'11': 'bill type freeze',
'12': 'bill type unfreeze',
'13': 'system take over margin',
'14': 'transfer',
'15': 'realized pnl collection',
'16': 'funding fee collection',
'17': 'recommender return commission',
'18': 'by level subtract positions',
'19': 'system add',
'20': 'system subtract',
'23': 'trading competition take over fund',
'24': 'trading contest tickets',
'25': 'return of trading contest tickets',
'26': 'experience expired recall',
'50': 'test register gift',
'51': 'register gift',
'52': 'deposit gift',
'53': 'trading volume gift',
'54': 'awards gift',
'55': 'trading volume gift',
'56': 'awards gift expire',
'201': 'open positions',
'202': 'close positions',
'203': 'take over positions',
'204': 'trading competition take over positions',
'205': 'one way open long',
'206': 'one way open short',
'207': 'one way close long',
'208': 'one way close short',
'301': 'coupon deduction service charge',
'302': 'experience deduction',
'303': 'experience expired',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# [
# {
# "type": 3,
# "changeAmount": "0.00434664",
# "isIn": 0,
# "beforeAmount": "30.53353135",
# "beforeFreezeAmount": "21.547",
# "createTime": "1646121604997",
# "available": "30.52918471",
# "unit": "usdt",
# "symbol": "BTC_USDT"
# },
# ],
#
timestamp = self.safe_string(item, 'createTime')
direction = None
changeDirection = self.safe_number(item, 'isIn')
if changeDirection == 1:
direction = 'increase'
else:
direction = 'reduce'
fee = None
feeCost = self.safe_number(item, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': self.safe_currency_code(self.safe_string(item, 'unit')),
}
return {
'id': self.safe_string(item, 'id'),
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': self.safe_string(item, 'userId'),
'referenceId': None,
'referenceAccount': None,
'type': self.parse_ledger_entry_type(self.safe_integer(item, 'type')),
'currency': self.safe_currency_code(self.safe_string(item, 'unit')),
'amount': self.safe_number(item, 'changeAmount'),
'before': self.safe_number(item, 'beforeAmount'),
'after': self.safe_number(item, 'available'),
'status': None,
'fee': fee,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchLedger() requires a code argument')
await self.load_markets()
currency = self.currency(code)
request = {
'futuresAccountType': 1,
# 'currencyId': '11',
# 'type': 1,
# 'endTime': self.milliseconds(),
# 'pageNum': 1,
}
if code is not None:
request['currencyName'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
request['pageSize'] = limit
response = await self.contractV2PrivateGetFundGetBill(self.extend(request, params))
#
# {
# "code": 10000,
# "data": {
# "list": [
# {
# "type": 3,
# "changeAmount": "0.00434664",
# "isIn": 0,
# "beforeAmount": "30.53353135",
# "beforeFreezeAmount": "21.547",
# "createTime": "1646121604997",
# "available": "30.52918471",
# "unit": "usdt",
# "symbol": "BTC_USDT"
# },
# ],
# "pageNum": 1,
# "pageSize": 10
# },
# "desc": "操作成功"
# }
#
data = self.safe_value(response, 'data', {})
list = self.safe_value(data, 'list', [])
return self.parse_ledger(list, currency, since, limit)
async def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
await self.load_markets()
marketType, query = self.handle_market_type_and_params('transfer', None, params)
currency = self.currency(code)
margin = (marketType == 'margin')
swap = (marketType == 'swap')
side = None
marginMethod = None
amountToPrecision = self.currency_to_precision(code, amount)
request = {
'amount': amountToPrecision, # Swap, Cross Margin, Isolated Margin
# 'coin': currency['id'], # Margin
# 'currencyName': currency['id'], # Swap
# 'clientId': self.safe_string(params, 'clientId'), # Swap "2sdfsdfsdf232342"
# 'side': side, # Swap, 1:Deposit(zb account -> futures account),0:Withdrawal(futures account -> zb account)
# 'marketName': self.safe_string(params, 'marketName'), # Isolated Margin
}
if swap:
if fromAccount == 'spot' or toAccount == 'future':
side = 1
else:
side = 0
request['currencyName'] = currency['id']
request['clientId'] = self.safe_string(params, 'clientId')
request['side'] = side
else:
defaultMargin = 'isolated' if margin else 'cross'
marginMode = self.safe_string_2(self.options, 'defaultMarginMode', 'marginMode', defaultMargin)
if marginMode == 'isolated':
if fromAccount == 'spot' or toAccount == 'isolated':
marginMethod = 'spotV1PrivateGetTransferInLever'
else:
marginMethod = 'spotV1PrivateGetTransferOutLever'
request['marketName'] = self.safe_string(params, 'marketName')
elif marginMode == 'cross':
if fromAccount == 'spot' or toAccount == 'cross':
marginMethod = 'spotV1PrivateGetTransferInCross'
else:
marginMethod = 'spotV1PrivateGetTransferOutCross'
request['coin'] = currency['id']
method = self.get_supported_mapping(marketType, {
'swap': 'contractV2PrivatePostFundTransferFund',
'margin': marginMethod,
})
response = await getattr(self, method)(self.extend(request, query))
#
# Swap
#
# {
# "code": 10000,
# "data": "2sdfsdfsdf232342",
# "desc": "Success"
# }
#
# Margin
#
# {
# "code": 1000,
# "message": "Success"
# }
#
return self.extend(self.parse_transfer(response, currency), {
'amount': self.parse_number(amountToPrecision),
'fromAccount': fromAccount,
'toAccount': toAccount,
})
def parse_transfer(self, transfer, currency=None):
# response samples in 'transfer'
timestamp = self.milliseconds()
return {
'id': self.safe_string(transfer, 'data'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': self.safe_currency_code(None, 'currency'),
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': None,
}
async def modify_margin_helper(self, symbol, amount, type, params={}):
if params['positionsId'] is None:
raise ArgumentsRequired(self.id + ' modifyMarginHelper() requires a positionsId argument in the params')
await self.load_markets()
market = self.market(symbol)
amount = self.amount_to_precision(symbol, amount)
position = self.safe_string(params, 'positionsId')
request = {
'positionsId': position,
'amount': amount,
'type': type, # 1 increase, 0 reduce
'futuresAccountType': 1, # 1: USDT Perpetual Futures
}
response = await self.contractV2PrivatePostPositionsUpdateMargin(self.extend(request, params))
#
# {
# "code": 10000,
# "data": {
# "amount": "0.002",
# "appendAmount": "0",
# "avgPrice": "43927.23",
# "bankruptcyPrice": "41730.86",
# "createTime": "1646208695609",
# "freezeAmount": "0",
# "id": "6900781818669377576",
# "keyMark": "6896693805014120448-100-1-",
# "lastAppendAmount": "0",
# "lastTime": "1646209235505",
# "leverage": 20,
# "liquidateLevel": 1,
# "liquidatePrice": "41898.46",
# "maintainMargin": "0",
# "margin": "4.392723",
# "marginAppendCount": 0,
# "marginBalance": "0",
# "marginMode": 1,
# "marginRate": "0",
# "marketId": "100",
# "marketName": "BTC_USDT",
# "modifyTime": "1646209235505",
# "nominalValue": "87.88828",
# "originAppendAmount": "0",
# "originId": "6904699716827818029",
# "positionsMode": 2,
# "sellerCurrencyId": "1",
# "side": 1,
# "status": 1,
# "unrealizedPnl": "0.03382",
# "usable": True,
# "userId": "6896693805014120448"
# },
# "desc":"操作成功"
# }
#
return self.extend(self.parse_margin_modification(response, market), {
'amount': self.parse_number(amount),
})
def parse_margin_modification(self, data, market=None):
innerData = self.safe_value(data, 'data', {})
sideRaw = self.safe_integer(innerData, 'side')
side = 'add' if (sideRaw == 1) else 'reduce'
statusCode = self.safe_integer(innerData, 'status')
status = 'ok' if (statusCode == 1) else 'failed'
return {
'info': data,
'type': side,
'amount': None,
'code': market['quote'],
'symbol': market['symbol'],
'status': status,
}
async def add_margin(self, symbol, amount, params={}):
"""
add margin
:param str symbol: unified market symbol
:param float amount: amount of margin to add
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#add-margin-structure>`
"""
if params['positionsId'] is None:
raise ArgumentsRequired(self.id + ' addMargin() requires a positionsId argument in the params')
return await self.modify_margin_helper(symbol, amount, 1, params)
async def reduce_margin(self, symbol, amount, params={}):
"""
remove margin from a position
:param str symbol: unified market symbol
:param float amount: the amount of margin to remove
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#reduce-margin-structure>`
"""
if params['positionsId'] is None:
raise ArgumentsRequired(self.id + ' reduceMargin() requires a positionsId argument in the params')
return await self.modify_margin_helper(symbol, amount, 0, params)
async def fetch_borrow_rate(self, code, params={}):
"""
fetch the rate of interest to borrow a currency for margin trading
:param str code: unified currency code
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a `borrow rate structure <https://docs.ccxt.com/en/latest/manual.html#borrow-rate-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
}
response = await self.spotV1PrivateGetGetLoans(self.extend(request, params))
#
# {
# code: '1000',
# message: '操作成功',
# result: [
# {
# interestRateOfDay: '0.0005',
# repaymentDay: '30',
# amount: '148804.4841',
# balance: '148804.4841',
# rateOfDayShow: '0.05 %',
# coinName: 'USDT',
# lowestAmount: '0.01'
# },
# ]
# }
#
timestamp = self.milliseconds()
data = self.safe_value(response, 'result', [])
rate = self.safe_value(data, 0, {})
return {
'currency': self.safe_currency_code(self.safe_string(rate, 'coinName')),
'rate': self.safe_number(rate, 'interestRateOfDay'),
'period': self.safe_number(rate, 'repaymentDay'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': rate,
}
async def fetch_borrow_rates(self, params={}):
"""
fetch the borrow interest rates of all currencies
:param dict params: extra parameters specific to the zb api endpoint
:returns dict: a list of `borrow rate structures <https://docs.ccxt.com/en/latest/manual.html#borrow-rate-structure>`
"""
if params['coin'] is None:
raise ArgumentsRequired(self.id + ' fetchBorrowRates() requires a coin argument in the params')
await self.load_markets()
currency = self.currency(self.safe_string(params, 'coin'))
request = {
'coin': currency['id'],
}
response = await self.spotV1PrivateGetGetLoans(self.extend(request, params))
#
# {
# code: '1000',
# message: '操作成功',
# result: [
# {
# interestRateOfDay: '0.0005',
# repaymentDay: '30',
# amount: '148804.4841',
# balance: '148804.4841',
# rateOfDayShow: '0.05 %',
# coinName: 'USDT',
# lowestAmount: '0.01'
# },
# ]
# }
#
timestamp = self.milliseconds()
data = self.safe_value(response, 'result', [])
rates = []
for i in range(0, len(data)):
entry = data[i]
rates.append({
'currency': self.safe_currency_code(self.safe_string(entry, 'coinName')),
'rate': self.safe_number(entry, 'interestRateOfDay'),
'period': self.safe_number(entry, 'repaymentDay'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': entry,
})
return rates
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
section, version, access = api
url = self.implode_hostname(self.urls['api'][section][version][access])
if access == 'public':
if path == 'getFeeInfo':
url = self.implode_hostname(self.urls['api'][section][version]['private']) + '/' + path
else:
url += '/' + version + '/' + path
if params:
url += '?' + self.urlencode(params)
elif section == 'contract':
timestamp = self.milliseconds()
iso8601 = self.iso8601(timestamp)
signedString = iso8601 + method + '/Server/api/' + version + '/' + path
params = self.keysort(params)
headers = {
'ZB-APIKEY': self.apiKey,
'ZB-TIMESTAMP': iso8601,
# 'ZB-LAN': 'cn', # cn, en, kr
}
url += '/' + version + '/' + path
if method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(params)
signedString += self.urlencode(params)
else:
if params:
query = self.urlencode(params)
url += '?' + query
signedString += query
secret = self.hash(self.encode(self.secret), 'sha1')
signature = self.hmac(self.encode(signedString), self.encode(secret), hashlib.sha256, 'base64')
headers['ZB-SIGN'] = signature
else:
query = self.keysort(self.extend({
'method': path,
'accesskey': self.apiKey,
}, params))
nonce = self.nonce()
query = self.keysort(query)
auth = self.rawencode(query)
secret = self.hash(self.encode(self.secret), 'sha1')
signature = self.hmac(self.encode(auth), self.encode(secret), hashlib.md5)
suffix = 'sign=' + signature + '&reqTime=' + str(nonce)
url += '/' + path + '?' + auth + '&' + suffix
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if body[0] == '{':
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
if 'code' in response:
code = self.safe_string(response, 'code')
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
if (code != '1000') and (code != '10000'):
raise ExchangeError(feedback)
# special case for {"result":false,"message":"服务端忙碌"}(a "Busy Server" reply)
result = self.safe_value(response, 'result')
if result is not None:
if not result:
message = self.safe_string(response, 'message')
if message == u'服务端忙碌':
raise ExchangeNotAvailable(feedback)
else:
raise ExchangeError(feedback)
| 44.376623
| 205
| 0.456546
|
065d5e8e6ae7539700198f02bf477b5a375ded17
| 976
|
py
|
Python
|
timi_robot/logger.py
|
lxl0928/wechat_work_logger_robot
|
a8e3e968e31c94a5ae8dee573a1bc4f7dccd7d95
|
[
"Apache-2.0"
] | 1
|
2019-12-16T18:20:42.000Z
|
2019-12-16T18:20:42.000Z
|
timi_robot/logger.py
|
lxl0928/wechat_work_logger_robot
|
a8e3e968e31c94a5ae8dee573a1bc4f7dccd7d95
|
[
"Apache-2.0"
] | null | null | null |
timi_robot/logger.py
|
lxl0928/wechat_work_logger_robot
|
a8e3e968e31c94a5ae8dee573a1bc4f7dccd7d95
|
[
"Apache-2.0"
] | 1
|
2021-04-11T04:36:50.000Z
|
2021-04-11T04:36:50.000Z
|
# coding: utf-8
import traceback
from timi_robot.hub import init_sdk, capture_message, async_capture_message
class SensoroLogger(object):
def __init__(self, url, phones):
self.url = url
self.phones = phones
def log(self, err=None, common_msg=None):
msg = f"{traceback.format_exc()}" if not err else f"traceback: {traceback.format_exc()}\n\n error: {err}"
msg = msg if not common_msg else common_msg
if self.url and self.phones:
init_sdk(robot_url=self.url, mentioned_mobiles=self.phones)
capture_message(text=msg)
async def async_log(self, err=None, common_msg=None):
msg = f"{traceback.format_exc()}" if not err else f"traceback: {traceback.format_exc()}\n\n error: {err}"
msg = msg if not common_msg else common_msg
if self.url and self.phones:
init_sdk(robot_url=self.url, mentioned_mobiles=self.phones)
await async_capture_message(text=msg)
| 36.148148
| 113
| 0.67418
|
3656a80e3bba340e525f387e325a9d0d9dfb1121
| 2,949
|
py
|
Python
|
shenjieting/Study/Study_collections.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 2
|
2018-03-29T08:26:17.000Z
|
2019-06-17T10:56:19.000Z
|
shenjieting/Study/Study_collections.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2022-03-22T20:26:08.000Z
|
2022-03-22T20:26:08.000Z
|
shenjieting/Study/Study_collections.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2019-02-18T10:44:20.000Z
|
2019-02-18T10:44:20.000Z
|
#collections是Python内建的一个集合模块,提供了许多有用的集合类。
from collections import namedtuple
from collections import deque
from collections import defaultdict
from collections import OrderedDict
from collections import Counter
#⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐
'''
namedtuple
是一个函数,它用来创建一个自定义的tuple对象,并且规定了tuple元素的个数,并可以用属性而不是索引来引用tuple的某个元素。
可以很方便地定义一种数据类型,它具备tuple的不变性,又可以根据属性来引用,使用十分方便。
'''
# 一个点的二维坐标就可以表示成:
Point = namedtuple('Point',['x','y'])
p = Point(1,2)
print("namedtuple-----点的坐标:",p.x,p.y)
#验证Point 对象是tuple的一种子类
print("namedtuple-----p是否和Point类型一致",isinstance(p,Point))
print("namedtuple-----p是否和tuple类型一致",isinstance(p,tuple))
#如果要用坐标和半径表示一个圆,也可以用namedtuple定义:
Circle = namedtuple('Circle',['x','y','r'])
c = Circle(10,10,20)
print("namedtuple-----圆的坐标X:{};Y:{};半径:{}".format(c.x,c.y,c.r))
#⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐
'''
deque
使用list存储数据时,按索引访问元素很快,但是插入和删除元素就很慢了,
因为list是线性存储,数据量大的时候,插入和删除效率很低。
deque是为了高效实现插入和删除操作的双向列表,适合用于队列和栈:
'''
q = deque(['a','b','c'])
q.append("x")
q.appendleft("y")
print("deque-----",q)
q.pop()
print("deque-----",q)
q.popleft()
print("deque-----",q)
#注:deque除了实现list的append()和pop()外,还支持appendleft()和popleft(),这样就可以非常高效地往头部添加或删除元素。
#⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐
'''
defaultdict
使用dict时,如果引用的Key不存在,就会抛出KeyError。如果希望key不存在时,返回一个默认值,就可以用defaultdict:
'''
dd = defaultdict(lambda:'error')
dd['key1'] = 'abc'
print("defaultdict-----",dd['key1'])
print("defaultdict-----",dd['key2'])
#注:默认值是调用函数返回的,而函数在创建defaultdict对象时传入。除了在Key不存在时返回默认值,defaultdict的其他行为跟dict是完全一样的。
#⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐⭐
'''
OrderedDict
使用dict时,Key是无序的。在对dict做迭代时,我们无法确定Key的顺序。如果要保持Key的顺序,可以用OrderedDict:
'''
oo = dict([('a',1),('b',2),('c',3)])
print("OrderedDict-----",oo)
# OrderedDict的Key是有序的
od = OrderedDict([('a',1),('b',2),('c',3)])
print("OrderedDict-----",od)
odd = OrderedDict()
odd['z'] = 1
odd['y'] = 2
odd['x'] = 3
odd["z"] = 4
print(odd.keys())# 按照插入的Key的顺序返回
#OrderedDict可以实现一个FIFO(先进先出)的dict,当容量超出限制时,先删除最早添加的Key:
class LastUpdataOrdereDict(OrderedDict):
def __init__(self , capacity):#init方法在父类OrderedDict的__init__方法基础上,为LastUpdatedOrderedDict类添加了一个_capacity属性
super(LastUpdataOrdereDict,self).__init__()
self._capacity = capacity
def __setitem__(self, key, value): #setitem实现了在为LastUpdatedOrderedDict实例添加key的时候,先检查是否超出容量;三个顺序判断分别实现:
if key in self :
del self[key]
print("OrderedDict-----set:", (key, value)) #如果已存在key,则取代
else:
if len(self) == self._capacity:
last =self.popitem(last=False) #如果超出容量,则pop出最早添加的Key
print('OrderedDict-----remove:', last)
else:
print("OrderedDict-----add:", (key, value)) #如果不存在key,则添加
OrderedDict.__setitem__(self,key,value)
print(odd)
las = LastUpdataOrdereDict(odd)
print(odd)
'''
Counter
Counter是一个简单的计数器,例如,统计字符出现的个数:
Counter实际上也是dict的一个子类,上面的结果可以看出,字符'g'、'm'、'r'各出现了两次,其他字符各出现了一次。
'''
c = Counter()
for ch in 'programming':
c[ch] = c[ch] + 1
print("Counter-----",c)
| 24.991525
| 110
| 0.675822
|
eac71c73e577c7d3ef78ff01c8d3ab07a351cebd
| 31,244
|
py
|
Python
|
alex/applications/Switchboard/sw_hub.py
|
beka-evature/alex
|
e8fdc6f2d908d7a1911b18f29c218ae58d19ed6f
|
[
"Apache-2.0"
] | 1
|
2015-10-19T17:36:27.000Z
|
2015-10-19T17:36:27.000Z
|
alex/applications/Switchboard/sw_hub.py
|
beka-evature/alex
|
e8fdc6f2d908d7a1911b18f29c218ae58d19ed6f
|
[
"Apache-2.0"
] | null | null | null |
alex/applications/Switchboard/sw_hub.py
|
beka-evature/alex
|
e8fdc6f2d908d7a1911b18f29c218ae58d19ed6f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import multiprocessing
import time
import cPickle as pickle
import argparse
import re
from alex.components.hub.vio import VoipIO
from alex.components.hub.vad import VAD
from alex.components.hub.tts import TTS
from alex.components.hub.messages import Command, Frame
from alex.utils.config import Config
def load_database(file_name):
db = dict()
try:
f = open(file_name, 'r')
db = pickle.load(f)
f.close()
except IOError:
pass
if 'calls_from_start_end_length' not in db:
db['calls_from_start_end_length'] = dict()
return db
def save_database(file_name, db):
f = open(file_name, 'w+')
pickle.dump(db, f)
f.close()
def get_stats(db, remote_uri):
num_all_calls = 0
total_time = 0
last24_num_calls = 0
last24_total_time = 0
try:
for s, e, l in db['calls_from_start_end_length'][remote_uri]:
if l > 0:
num_all_calls += 1
total_time += l
# do counts for last 24 hours
if s > time.time() - 24 * 60 * 60:
last24_num_calls += 1
last24_total_time += l
except:
pass
return num_all_calls, total_time, last24_num_calls, last24_total_time
def play_intro(cfg, tts_commands, intro_id, last_intro_id):
cfg['Logging']['session_logger'].turn("system")
for i in range(len(cfg['Switchboard']['introduction'])):
last_intro_id = str(intro_id)
intro_id += 1
tts_commands.send(Command('synthesize(user_id="%s",text="%s",log="true")' % (last_intro_id, cfg['Switchboard']['introduction'][i]), 'HUB', 'TTS'))
return intro_id, last_intro_id
def run(cfg1, cfg2):
try:
cfg1['Logging']['system_logger'].info("Switchboard system\n" + "=" * 120)
vio1_commands, vio1_child_commands = multiprocessing.Pipe() # used to send commands to VoipIO
vio1_record, vio1_child_record = multiprocessing.Pipe() # I read from this connection recorded audio
vio1_play, vio1_child_play = multiprocessing.Pipe() # I write in audio to be played
vad1_commands, vad1_child_commands = multiprocessing.Pipe() # used to send commands to VAD
vad1_audio_in, vad1_child_audio_in = multiprocessing.Pipe() # used to read output audio from VAD
vad1_audio_out, vad1_child_audio_out = multiprocessing.Pipe() # used to read output audio from VAD
tts1_commands, tts1_child_commands = multiprocessing.Pipe() # used to send commands to TTS
tts1_text_in, tts1_child_text_in = multiprocessing.Pipe() # used to send TTS text
vio2_commands, vio2_child_commands = multiprocessing.Pipe() # used to send commands to VoipIO
vio2_record, vio2_child_record = multiprocessing.Pipe() # I read from this connection recorded audio
vio2_play, vio2_child_play = multiprocessing.Pipe() # I write in audio to be played
vad2_commands, vad2_child_commands = multiprocessing.Pipe() # used to send commands to VAD
vad2_audio_in, vad2_child_audio_in = multiprocessing.Pipe() # used to read output audio from VAD
vad2_audio_out, vad2_child_audio_out = multiprocessing.Pipe() # used to read output audio from VAD
tts2_commands, tts2_child_commands = multiprocessing.Pipe() # used to send commands to TTS
tts2_text_in, tts2_child_text_in = multiprocessing.Pipe() # used to send TTS text
command_connections = [vio1_commands, vad1_commands, tts1_commands, vio2_commands, vad2_commands, tts2_commands]
non_command_connections = [vio1_record, vio1_child_record,
vio1_play, vio1_child_play,
vad1_audio_in, vad1_child_audio_in,
vad1_audio_out, vad1_child_audio_out,
tts1_text_in, tts1_child_text_in,
vio2_record, vio2_child_record,
vio2_play, vio2_child_play,
vad2_audio_in, vad2_child_audio_in,
vad2_audio_out, vad2_child_audio_out,
tts2_text_in, tts2_child_text_in]
close_event = multiprocessing.Event()
vio1 = VoipIO(cfg1, vio1_child_commands, vio1_child_record, vio1_child_play, close_event)
vad1 = VAD(cfg1, vad1_child_commands, vad1_child_audio_in, vad1_child_audio_out, close_event)
tts1 = TTS(cfg1, tts1_child_commands, tts1_child_text_in, vio1_play, close_event)
vio2 = VoipIO(cfg2, vio2_child_commands, vio2_child_record, vio2_child_play, close_event)
vad2 = VAD(cfg2, vad2_child_commands, vad2_child_audio_in, vad2_child_audio_out, close_event)
tts2 = TTS(cfg2, tts2_child_commands, tts2_child_text_in, vio2_play, close_event)
vio1.start()
vad1.start()
tts1.start()
vio2.start()
vad2.start()
tts2.start()
cfg1['Logging']['session_logger'].set_close_event(close_event)
cfg1['Logging']['session_logger'].set_cfg(cfg1)
cfg1['Logging']['session_logger'].start()
cfg1['Logging']['session_logger'].cancel_join_thread()
cfg2['Logging']['session_logger'].set_close_event(close_event)
cfg2['Logging']['session_logger'].set_cfg(cfg2)
cfg2['Logging']['session_logger'].start()
cfg2['Logging']['session_logger'].cancel_join_thread()
# init the system
call_start1 = 0
count_intro1 = 0
intro_played1 = False
reject_played1 = False
intro_id1 = 0
last_intro_id1 = -1
end_played1 = False
s_voice_activity1 = False
s_last_voice_activity_time1 = 0
u_voice_activity1 = False
u_last_voice_activity_time1 = 0
vio_connect1 = False
hangup1 = False
u_start1 = False
call_start2 = 0
count_intro2 = 0
intro_played2 = False
reject_played2 = False
intro_id2 = 0
last_intro_id2 = -1
end_played2 = False
s_voice_activity2 = False
s_last_voice_activity_time2 = 0
u_voice_activity2 = False
u_last_voice_activity_time2 = 0
vio_connect2 = False
hangup2 = False
u_start2 = False
callee_entered = False
callee_uri = ''
db = load_database(cfg1['Switchboard']['call_db'])
for remote_uri in db['calls_from_start_end_length']:
num_all_calls, total_time, last24_num_calls, last24_total_time = get_stats(db, remote_uri)
m = []
m.append('')
m.append('=' * 120)
m.append('Remote SIP URI: %s' % remote_uri)
m.append('-' * 120)
m.append('Total calls: %d' % num_all_calls)
m.append('Total time (s): %f' % total_time)
m.append('Last 24h total calls: %d' % last24_num_calls)
m.append('Last 24h total time (s): %f' % last24_total_time)
m.append('-' * 120)
current_time = time.time()
if last24_num_calls > cfg1['Switchboard']['last24_max_num_calls'] or \
last24_total_time > cfg1['Switchboard']['last24_max_total_time']:
# add the remote uri to the black list
vio1_commands.send(Command('black_list(remote_uri="%s",expire="%d")' % (remote_uri,
current_time + cfg1['Switchboard']['blacklist_for']), 'HUB', 'VoipIO'))
m.append('BLACKLISTED')
else:
m.append('OK')
m.append('-' * 120)
m.append('')
cfg1['Logging']['system_logger'].info('\n'.join(m))
call_back_time = -1
call_back_uri = None
while 1:
# Check the close event.
if close_event.is_set():
print 'Received close event in: %s' % multiprocessing.current_process().name
return
time.sleep(cfg1['Hub']['main_loop_sleep_time'])
if intro_played1 and intro_played2 and not u_start1:
vio1_commands.send(Command('flush_out()', 'HUB', 'VoipIO1'))
time.sleep(cfg1['Hub']['main_loop_sleep_time'])
cfg1['Logging']['session_logger'].turn("system")
vio1_play.send(Command('utterance_start(user_id="%s",text="%s",fname="%s",log="%s")' %
('system', '', 'vpl-1.wav', 'true'), 'HUB', 'VoipIO1'))
u_start1 = True
if intro_played1 and intro_played2 and not u_start2:
vio2_commands.send(Command('flush_out()', 'HUB', 'VoipIO2'))
time.sleep(cfg1['Hub']['main_loop_sleep_time'])
cfg2['Logging']['session_logger'].turn("system")
vio2_play.send(Command('utterance_start(user_id="%s",text="%s",fname="%s",log="%s")' %
('system', '', 'vpl-2.wav', 'true'), 'HUB', 'VoipIO2'))
u_start2 = True
while vio1_record.poll() or vio2_record.poll():
if vio1_record.poll():
data = vio1_record.recv()
vad1_audio_in.send(data)
if u_start2:
vio2_play.send(data)
if vio2_record.poll():
data = vio2_record.recv()
vad2_audio_in.send(data)
if u_start1:
vio1_play.send(data)
# empty vad pipes
while vad1_audio_out.poll():
data = vad1_audio_out.recv()
while vad2_audio_out.poll():
data = vad2_audio_out.recv()
if call_back_time != -1 and call_back_time < time.time():
vio1_commands.send(Command('make_call(destination="%s")' % call_back_uri, 'HUB', 'VoipIO1'))
call_back_time = -1
call_back_uri = None
if callee_entered and callee_uri:
s_voice_activity1 = True
m = cfg1['Switchboard']['calling'] + ' '.join(callee_uri)
cfg1['Logging']['session_logger'].turn("system")
tts1_commands.send(Command('synthesize(text="%s",log="true")' % m, 'HUB', 'TTS1'))
vio2_commands.send(Command('make_call(destination="%s")' % callee_uri, 'HUB', 'VoipIO2'))
callee_uri = ''
# read all messages
if vio1_commands.poll():
command = vio1_commands.recv()
if isinstance(command, Command):
if command.parsed['__name__'] == "incoming_call" or command.parsed['__name__'] == "make_call":
cfg1['Logging']['system_logger'].session_start(command.parsed['remote_uri'])
cfg1['Logging']['session_logger'].session_start(cfg1['Logging']['system_logger'].get_session_dir_name())
cfg1['Logging']['system_logger'].session_system_log('config = ' + unicode(cfg1))
cfg1['Logging']['system_logger'].info(command)
cfg1['Logging']['session_logger'].config('config = ' + unicode(cfg1))
cfg1['Logging']['session_logger'].header(cfg1['Logging']["system_name"], cfg1['Logging']["version"])
cfg1['Logging']['session_logger'].input_source("voip")
if command.parsed['__name__'] == "rejected_call":
cfg1['Logging']['system_logger'].info(command)
call_back_time = time.time() + cfg1['Switchboard']['wait_time_before_calling_back']
# call back a default uri, if not defined call back the caller
if ('call_back_uri_subs' in cfg1['Switchboard']) and cfg1['Switchboard']['call_back_uri_subs']:
ru = command.parsed['remote_uri']
for pat, repl in cfg1['Switchboard']['call_back_uri_subs']:
ru = re.sub(pat, repl, ru)
call_back_uri = ru
elif ('call_back_uri' in cfg1['Switchboard']) and cfg1['Switchboard']['call_back_uri']:
call_back_uri = cfg1['Switchboard']['call_back_uri']
else:
call_back_uri = command.parsed['remote_uri']
if command.parsed['__name__'] == "rejected_call_from_blacklisted_uri":
cfg1['Logging']['system_logger'].info(command)
remote_uri = command.parsed['remote_uri']
num_all_calls, total_time, last24_num_calls, last24_total_time = get_stats(db, remote_uri)
m = []
m.append('')
m.append('=' * 120)
m.append('Rejected incoming call from blacklisted URI: %s' % remote_uri)
m.append('-' * 120)
m.append('Total calls: %d' % num_all_calls)
m.append('Total time (s): %f' % total_time)
m.append('Last 24h total calls: %d' % last24_num_calls)
m.append('Last 24h total time (s): %f' % last24_total_time)
m.append('=' * 120)
m.append('')
cfg1['Logging']['system_logger'].info('\n'.join(m))
if command.parsed['__name__'] == "call_connecting":
cfg1['Logging']['system_logger'].info(command)
if command.parsed['__name__'] == "call_confirmed":
cfg1['Logging']['system_logger'].info(command)
remote_uri = command.parsed['remote_uri']
num_all_calls, total_time, last24_num_calls, last24_total_time = get_stats(db, remote_uri)
m = []
m.append('')
m.append('=' * 120)
m.append('Incoming call from : %s' % remote_uri)
m.append('-' * 120)
m.append('Total calls: %d' % num_all_calls)
m.append('Total time (s): %f' % total_time)
m.append('Last 24h total calls: %d' % last24_num_calls)
m.append('Last 24h total time (s): %f' % last24_total_time)
m.append('-' * 120)
if last24_num_calls > cfg1['Switchboard']['last24_max_num_calls'] or \
last24_total_time > cfg1['Switchboard']['last24_max_total_time']:
cfg1['Logging']['session_logger'].turn("system")
tts1_commands.send(Command('synthesize(text="%s",log="true")' % cfg1['Switchboard']['rejected'], 'HUB', 'TTS1'))
reject_played1 = True
s_voice_activity1 = True
vio1_commands.send(Command('black_list(remote_uri="%s",expire="%d")' % (remote_uri, time.time() + cfg1['Switchboard']['blacklist_for']), 'HUB', 'VoipIO1'))
m.append('CALL REJECTED')
else:
# init the system
call_start1 = time.time()
count_intro1 = 0
intro_played1 = False
reject_played1 = False
end_played1 = False
s_voice_activity1 = False
s_last_voice_activity_time1 = 0
u_voice_activity1 = False
u_last_voice_activity_time1 = 0
vio_connect1 = False
hangup1 = False
u_start1 = False
callee_entered = False
callee_uri = ''
intro_id1, last_intro_id1 = play_intro(cfg1, tts1_commands, intro_id1, last_intro_id1)
m.append('CALL ACCEPTED')
m.append('=' * 120)
m.append('')
cfg1['Logging']['system_logger'].info('\n'.join(m))
try:
db['calls_from_start_end_length'][remote_uri].append([time.time(), 0, 0])
except:
db['calls_from_start_end_length'][remote_uri] = [[time.time(), 0, 0], ]
save_database(cfg1['Switchboard']['call_db'], db)
if command.parsed['__name__'] == "call_disconnected":
cfg1['Logging']['system_logger'].info(command)
remote_uri = command.parsed['remote_uri']
vio1_commands.send(Command('flush()', 'HUB', 'VoipIO1'))
vad1_commands.send(Command('flush()', 'HUB', 'VAD1'))
tts1_commands.send(Command('flush()', 'HUB', 'TTS1'))
cfg1['Logging']['system_logger'].session_end()
cfg1['Logging']['session_logger'].session_end()
try:
s, e, l = db['calls_from_start_end_length'][remote_uri][-1]
if e == 0 and l == 0:
# there is a record about last confirmed but not disconnected call
db['calls_from_start_end_length'][remote_uri][-1] = [s, time.time(), time.time() - s]
save_database('call_db.pckl', db)
except KeyError:
# disconnecting call which was not confirmed for URI calling for the first time
pass
intro_played1 = False
u_start1 = False
callee_entered = False
callee_uri = ''
vio2_commands.send(Command('hangup()', 'HUB', 'VoipIO1'))
if command.parsed['__name__'] == "play_utterance_start":
cfg1['Logging']['system_logger'].info(command)
s_voice_activity1 = True
if command.parsed['__name__'] == "play_utterance_end":
cfg1['Logging']['system_logger'].info(command)
s_voice_activity1 = False
s_last_voice_activity_time1 = time.time()
if command.parsed['user_id'] == last_intro_id1:
intro_played1 = True
if command.parsed['__name__'] == "dtmf_digit":
cfg1['Logging']['system_logger'].info(command)
digit = command.parsed['digit']
if digit in ['*', '#']:
callee_entered = True
if not callee_entered and digit in '0123456789':
callee_uri += digit
if vio2_commands.poll():
command = vio2_commands.recv()
if isinstance(command, Command):
if command.parsed['__name__'] == "make_call":
cfg2['Logging']['system_logger'].session_start(command.parsed['remote_uri'])
cfg2['Logging']['session_logger'].session_start(cfg2['Logging']['system_logger'].get_session_dir_name())
cfg2['Logging']['system_logger'].session_system_log('config = ' + unicode(cfg2))
cfg2['Logging']['system_logger'].info(command)
cfg2['Logging']['session_logger'].config('config = ' + unicode(cfg2))
cfg2['Logging']['session_logger'].header(cfg2['Logging']["system_name"], cfg2['Logging']["version"])
cfg2['Logging']['session_logger'].input_source("voip")
if command.parsed['__name__'] == "call_connecting":
cfg2['Logging']['system_logger'].info(command)
if command.parsed['__name__'] == "call_confirmed":
cfg2['Logging']['system_logger'].info(command)
remote_uri = command.parsed['remote_uri']
num_all_calls, total_time, last24_num_calls, last24_total_time = get_stats(db, remote_uri)
m = []
m.append('')
m.append('=' * 120)
m.append('Incoming call from : %s' % remote_uri)
m.append('-' * 120)
m.append('Total calls: %d' % num_all_calls)
m.append('Total time (s): %f' % total_time)
m.append('Last 24h total calls: %d' % last24_num_calls)
m.append('Last 24h total time (s): %f' % last24_total_time)
m.append('-' * 120)
m.append('CALL ACCEPTED')
m.append('=' * 120)
m.append('')
cfg2['Logging']['system_logger'].info('\n'.join(m))
# init the system
call_start2 = time.time()
count_intro2 = 0
intro_played2 = False
reject_played2 = False
end_played2 = False
s_voice_activity2 = False
s_last_voice_activity_time2 = 0
u_voice_activity2 = False
u_last_voice_activity_time2 = 0
vio_connect2 = False
hangup2 = False
u_start2 = False
intro_id2, last_intro_id2 = play_intro(cfg2, tts2_commands, intro_id2, last_intro_id2)
if command.parsed['__name__'] == "call_disconnected":
cfg2['Logging']['system_logger'].info(command)
remote_uri = command.parsed['remote_uri']
code = command.parsed['code']
vio2_commands.send(Command('flush()', 'HUB', 'VoipIO2'))
vad2_commands.send(Command('flush()', 'HUB', 'VAD2'))
tts2_commands.send(Command('flush()', 'HUB', 'TTS2'))
cfg2['Logging']['system_logger'].session_end()
cfg2['Logging']['session_logger'].session_end()
if intro_played2 and code in set(['603',]):
vio1_commands.send(Command('hangup()', 'HUB', 'VoipIO1'))
elif code in set(['486', '600', '603', '604', '606',]):
s_voice_activity1 = True
m = cfg1['Switchboard']['noanswer']
cfg1['Logging']['session_logger'].turn("system")
tts1_commands.send(Command('synthesize(text="%s",log="true")' % m, 'HUB', 'TTS1'))
elif code in set(['480',]):
s_voice_activity1 = True
m = cfg1['Switchboard']['wrongnumber']
cfg1['Logging']['session_logger'].turn("system")
tts1_commands.send(Command('synthesize(text="%s",log="true")' % m, 'HUB', 'TTS1'))
else:
vio1_commands.send(Command('hangup()', 'HUB', 'VoipIO1'))
intro_played2 = False
u_start2 = False
hangup1 = True
if command.parsed['__name__'] == "play_utterance_start":
cfg2['Logging']['system_logger'].info(command)
s_voice_activity2 = True
if command.parsed['__name__'] == "play_utterance_end":
cfg2['Logging']['system_logger'].info(command)
s_voice_activity2 = False
s_last_voice_activity_time2 = time.time()
if command.parsed['user_id'] == last_intro_id2:
intro_played2 = True
if vad1_commands.poll():
command = vad1_commands.recv()
cfg1['Logging']['system_logger'].info(command)
if isinstance(command, Command):
if command.parsed['__name__'] == "speech_start":
u_voice_activity = True
if command.parsed['__name__'] == "speech_end":
u_voice_activity = False
u_last_voice_activity_time = time.time()
if vad2_commands.poll():
command = vad2_commands.recv()
cfg2['Logging']['system_logger'].info(command)
if isinstance(command, Command):
if command.parsed['__name__'] == "speech_start":
u_voice_activity = True
if command.parsed['__name__'] == "speech_end":
u_voice_activity = False
u_last_voice_activity_time = time.time()
if tts1_commands.poll():
command = tts1_commands.recv()
cfg1['Logging']['system_logger'].info(command)
if tts2_commands.poll():
command = tts2_commands.recv()
cfg1['Logging']['system_logger'].info(command)
current_time = time.time()
# print
# print intro_played, end_played
# print s_voice_activity, u_voice_activity,
# print call_start, current_time, u_last_voice_activity_time, s_last_voice_activity_time
# print current_time - s_last_voice_activity_time > 5, u_last_voice_activity_time - s_last_voice_activity_time > 0
# print hangup1, s_voice_activity1, s_last_voice_activity_time1, current_time
if hangup1 and s_voice_activity1 == False and s_last_voice_activity_time1 + 2.0 < current_time:
# we are ready to hangup only when all voice activity is finished
hangup1 = False
vio1_commands.send(Command('hangup()', 'HUB', 'VoipIO1'))
if hangup2 and s_voice_activity2 == False and s_last_voice_activity_time2 + 2.0 < current_time:
# we are ready to hangup only when all voice activity is finished
hangup2 = False
vio2_commands.send(Command('hangup()', 'HUB', 'VoipIO2'))
if reject_played1 == True and s_voice_activity1 == False:
# be careful it does not hangup immediately
reject_played1 = False
vio1_commands.send(Command('hangup()', 'HUB', 'VoipIO1'))
vio1_commands.send(Command('flush()', 'HUB', 'VoipIO1'))
vad1_commands.send(Command('flush()', 'HUB', 'VAD1'))
tts1_commands.send(Command('flush()', 'HUB', 'TTS1'))
if intro_played1 and current_time - call_start1 > cfg1['Switchboard']['max_call_length']:
intro_played1 = False
# be careful it does not hangup immediately
vio1_commands.send(Command('hangup()', 'HUB', 'VoipIO1'))
vio1_commands.send(Command('flush()', 'HUB', 'VoipIO1'))
vad1_commands.send(Command('flush()', 'HUB', 'VAD1'))
tts1_commands.send(Command('flush()', 'HUB', 'TTS1'))
if intro_played2 and current_time - call_start1 > cfg1['Switchboard']['max_call_length']:
intro_played2 = False
# be careful it does not hangup immediately
vio2_commands.send(Command('hangup()', 'HUB', 'VoipIO2'))
vio2_commands.send(Command('flush()', 'HUB', 'VoipIO2'))
vad2_commands.send(Command('flush()', 'HUB', 'VAD2'))
tts2_commands.send(Command('flush()', 'HUB', 'TTS2'))
# stop processes
vio1_commands.send(Command('stop()', 'HUB', 'VoipIO1'))
vad1_commands.send(Command('stop()', 'HUB', 'VAD1'))
tts1_commands.send(Command('stop()', 'HUB', 'TTS1'))
vio2_commands.send(Command('stop()', 'HUB', 'VoipIO2'))
vad2_commands.send(Command('stop()', 'HUB', 'VAD2'))
tts2_commands.send(Command('stop()', 'HUB', 'TTS2'))
# clean connections
for c in command_connections:
while c.poll():
c.recv()
for c in non_command_connections:
while c.poll():
c.recv()
# wait for processes to stop
# do not join, because in case of exception the join will not be successful
# vio1.join()
# vad1.join()
# tts1.join()
# vio2.join()
# vad2.join()
# tts2.join()
except KeyboardInterrupt:
print 'KeyboardInterrupt exception in: %s' % multiprocessing.current_process().name
close_event.set()
return
except:
cfg1['Logging']['system_logger'].exception('Uncaught exception in SW_HUB process.')
close_event.set()
raise
print 'Exiting: %s. Setting close event' % multiprocessing.current_process().name
close_event.set()
#########################################################################
#########################################################################
if __name__ == '__main__':
import autopath
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Switchboard system records conversation between two users.
When the first user calls the system, the systems rejects the call. Then in a few seconds,
it calls back to the first user, informs about how to use the system and the recording data.
Then, it asks the first user to enter a phone number of a second user. If the number is entered successfully,
it calls the second user.
The systems calls back to the user to prevent any call charges on the users' side.
The program reads the default config in the resources directory ('../resources/default.cfg').
In addition, it reads all config file passed as an argument of a '-c'.
The additional config files overwrites any default or previous values.
""")
parser.add_argument('-o', action="store", dest="caller", nargs='+', help='additional configure file')
parser.add_argument('-d', action="store", dest="callee", nargs='+', help='additional configure file')
args = parser.parse_args()
cfg1 = Config.load_configs(args.caller)
cfg2 = Config.load_configs(args.callee)
run(cfg1, cfg2)
| 46.150665
| 183
| 0.530982
|
de14ce710cb616961d4631d8dea7e6152d2ef73d
| 7,431
|
py
|
Python
|
libqtile/widget/systray.py
|
himaaaatti/qtile
|
9a8326d751042c0b59cbc0aee4a5e20e8ff03a4d
|
[
"MIT"
] | null | null | null |
libqtile/widget/systray.py
|
himaaaatti/qtile
|
9a8326d751042c0b59cbc0aee4a5e20e8ff03a4d
|
[
"MIT"
] | null | null | null |
libqtile/widget/systray.py
|
himaaaatti/qtile
|
9a8326d751042c0b59cbc0aee4a5e20e8ff03a4d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2010-2011 dequis
# Copyright (c) 2010, 2012 roger
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011-2012, 2014 Tycho Andersen
# Copyright (c) 2012 dmpayton
# Copyright (c) 2012-2013 Craig Barnes
# Copyright (c) 2013 hbc
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
from .. import bar, xcbq, window
from . import base
import xcffib
from xcffib.xproto import (ClientMessageEvent, ClientMessageData, EventMask,
SetMode)
XEMBED_PROTOCOL_VERSION = 0
class Icon(window._Window):
_windowMask = EventMask.StructureNotify | \
EventMask.PropertyChange | \
EventMask.Exposure
def __init__(self, win, qtile, systray):
window._Window.__init__(self, win, qtile)
self.systray = systray
self.update_size()
def update_size(self):
icon_size = self.systray.icon_size
self.updateHints()
try:
width = self.hints["min_width"]
height = self.hints["min_height"]
except KeyError:
width = icon_size
height = icon_size
if height > icon_size:
width = width * icon_size // height
height = icon_size
if height <= 0:
width = icon_size
height = icon_size
self.width = width
self.height = height
return False
def handle_PropertyNotify(self, e):
name = self.qtile.conn.atoms.get_name(e.atom)
if name == "_XEMBED_INFO":
info = self.window.get_property('_XEMBED_INFO', unpack=int)
if info and info[1]:
self.systray.bar.draw()
return False
def handle_DestroyNotify(self, event):
wid = event.window
del(self.qtile.windowMap[wid])
del(self.systray.icons[wid])
self.systray.bar.draw()
return False
handle_UnmapNotify = handle_DestroyNotify
class Systray(window._Window, base._Widget):
"""
A widget that manages system tray.
"""
_windowMask = EventMask.StructureNotify | \
EventMask.Exposure
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('icon_size', 20, 'Icon width'),
('padding', 5, 'Padding between icons'),
]
def __init__(self, **config):
base._Widget.__init__(self, bar.CALCULATED, **config)
self.add_defaults(Systray.defaults)
self.icons = {}
self.screen = 0
def calculate_length(self):
width = sum([i.width for i in self.icons.values()])
width += self.padding * len(self.icons)
return width
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
win = qtile.conn.create_window(-1, -1, 1, 1)
window._Window.__init__(self, xcbq.Window(qtile.conn, win.wid), qtile)
qtile.windowMap[win.wid] = self
# Even when we have multiple "Screen"s, we are setting up as the system
# tray on a particular X display, that is the screen we need to
# reference in the atom
if qtile.currentScreen:
self.screen = qtile.currentScreen.index
self.bar = bar
atoms = qtile.conn.atoms
qtile.conn.conn.core.SetSelectionOwner(
win.wid,
atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],
xcffib.CurrentTime
)
data = [
xcffib.CurrentTime,
atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],
win.wid, 0, 0
]
union = ClientMessageData.synthetic(data, "I" * 5)
event = ClientMessageEvent.synthetic(
format=32,
window=qtile.root.wid,
type=atoms['MANAGER'],
data=union
)
qtile.root.send_event(event, mask=EventMask.StructureNotify)
def handle_ClientMessage(self, event):
atoms = self.qtile.conn.atoms
opcode = event.type
data = event.data.data32
message = data[1]
wid = data[2]
conn = self.qtile.conn.conn
parent = self.bar.window.window
if opcode == atoms['_NET_SYSTEM_TRAY_OPCODE'] and message == 0:
w = xcbq.Window(self.qtile.conn, wid)
icon = Icon(w, self.qtile, self)
self.icons[wid] = icon
self.qtile.windowMap[wid] = icon
conn.core.ChangeSaveSet(SetMode.Insert, wid)
conn.core.ReparentWindow(wid, parent.wid, 0, 0)
conn.flush()
info = icon.window.get_property('_XEMBED_INFO', unpack=int)
if not info:
self.bar.draw()
return False
if info[1]:
self.bar.draw()
return False
def draw(self):
xoffset = self.padding
self.drawer.clear(self.background or self.bar.background)
self.drawer.draw(offsetx=self.offset, width=self.length)
for pos, icon in enumerate(self.icons.values()):
icon.window.set_attribute(backpixmap=self.drawer.pixmap)
icon.place(
self.offset + xoffset,
self.bar.height // 2 - self.icon_size // 2,
icon.width, self.icon_size,
0,
None
)
if icon.hidden:
icon.unhide()
data = [
self.qtile.conn.atoms["_XEMBED_EMBEDDED_NOTIFY"],
xcffib.xproto.Time.CurrentTime,
0,
self.bar.window.window.wid,
XEMBED_PROTOCOL_VERSION
]
u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
event = xcffib.xproto.ClientMessageEvent.synthetic(
format=32,
window=icon.window.wid,
type=self.qtile.conn.atoms["_XEMBED"],
data=u
)
self.window.send_event(event)
xoffset += icon.width + self.padding
def finalize(self):
base._Widget.finalize(self)
atoms = self.qtile.conn.atoms
self.qtile.conn.conn.core.SetSelectionOwner(
0,
atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],
xcffib.CurrentTime,
)
self.hide()
| 32.880531
| 79
| 0.60113
|
f50776277715fec1de62a22333aa3ad5bbc11e08
| 3,581
|
py
|
Python
|
egs/aspire/s5/local/multi_condition/create_uniform_segments.py
|
oplatek/idlak
|
02b24dc6f79b84779e423bfbb17bdf8e70c95aec
|
[
"Apache-2.0"
] | null | null | null |
egs/aspire/s5/local/multi_condition/create_uniform_segments.py
|
oplatek/idlak
|
02b24dc6f79b84779e423bfbb17bdf8e70c95aec
|
[
"Apache-2.0"
] | null | null | null |
egs/aspire/s5/local/multi_condition/create_uniform_segments.py
|
oplatek/idlak
|
02b24dc6f79b84779e423bfbb17bdf8e70c95aec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Johns Hopkins University (Authors: Daniel Povey, Vijayaditya Peddinti). Apache 2.0.
# creates a segments file in the provided data directory
# into uniform segments with specified window and overlap
import imp, sys, argparse, os, math, subprocess
corruptor = imp.load_source('', 'local/multi_condition/corrupt.py')
min_segment_length = 10 # in seconds
def segment(total_length, window_length, overlap = 0):
increment = window_length - overlap
num_windows = int(math.ceil(float(total_length)/increment))
segments = map(lambda x: (x * increment, min( total_length, (x * increment) + window_length)), range(0, num_windows))
if segments[-1][1] - segments[-1][0] < min_segment_length:
segments[-2] = (segments[-2][0], segments[-1][1])
segments.pop()
return segments
def get_wave_segments(wav_command, window_length, overlap):
raw_output = subprocess.check_output(wav_command+" sox -t wav - -n stat 2>&1 | grep Length ", shell = True)
parts = raw_output.split(":")
if parts[0].strip() != "Length (seconds)":
raise Exception("Failed while processing file ", wav_command)
total_length = float(parts[1])
segments = segment(total_length, window_length, overlap)
return segments
def prepare_segments_file(kaldi_data_dir, window_length, overlap):
if not os.path.exists(kaldi_data_dir+'/wav.scp'):
raise Exception("Not a proper kaldi data directory")
ids = []
files = []
for line in open(kaldi_data_dir+'/wav.scp').readlines():
parts = line.split()
ids.append(parts[0])
files.append(" ".join(parts[1:]))
segments_total = []
segments_per_recording = []
for i in range(0, len(ids)):
segments = get_wave_segments(files[i], window_length, overlap)
segments_current_recording = []
for segment in segments:
segment_string = "{0}-{1:06}-{2:06} {0} {3} {4}".format(ids[i], int(segment[0] * 1000), int(segment[1]* 1000), segment[0], segment[1])
segments_total.append(segment_string)
segments_current_recording.append(segment_string.split()[0])
segments_per_recording.append([ids[i], segments_current_recording])
return segments_total, segments_per_recording
if __name__ == "__main__":
usage = """ Python script to create segments file with uniform segment
given the kaldi data directory."""
sys.stderr.write(str(" ".join(sys.argv)))
main_parser = argparse.ArgumentParser(usage)
parser = argparse.ArgumentParser()
parser.add_argument('--window-length', type = float, default = 30.0, help = 'length of the window used to cut the segment')
parser.add_argument('--overlap', type = float, default = 5.0, help = 'overlap of neighboring windows')
parser.add_argument('data_dir', type=str, help='directory such as data/train')
params = parser.parse_args()
# write the segments file
segments_file = open(params.data_dir+"/segments", "w")
segments, segments_per_recording = prepare_segments_file(params.data_dir, params.window_length, params.overlap)
segments_file.write("\n".join(segments))
segments_file.close()
utt2spk_file = open(params.data_dir + "/utt2spk", "w")
spk2utt_file = open(params.data_dir + "/spk2utt", "w")
# write the utt2spk file
# assumes the recording id is the speaker ir
for i in range(len(segments_per_recording)):
segments = segments_per_recording[i][1]
recording = segments_per_recording[i][0]
spk2utt_file.write("{0} {1}\n".format(recording, " ".join(segments)))
for segment in segments:
utt2spk_file.write("{0} {1}\n".format(segment, recording))
spk2utt_file.close()
utt2spk_file.close()
| 44.7625
| 140
| 0.717118
|
a22b35cfbb6504b84c6d622913070b37994b51b7
| 236
|
py
|
Python
|
dogProject/dogs/admin.py
|
cs-fullstack-2019-spring/django-models-cw-bettyjware11
|
1cdd033b598a640a932db5aece91e3f8d2770d85
|
[
"Apache-2.0"
] | null | null | null |
dogProject/dogs/admin.py
|
cs-fullstack-2019-spring/django-models-cw-bettyjware11
|
1cdd033b598a640a932db5aece91e3f8d2770d85
|
[
"Apache-2.0"
] | null | null | null |
dogProject/dogs/admin.py
|
cs-fullstack-2019-spring/django-models-cw-bettyjware11
|
1cdd033b598a640a932db5aece91e3f8d2770d85
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
# setting up admin so that I can add information and make changes
from.models import Dog
admin.site.register(Dog)
from.models import Account
admin.site.register(Account)
| 19.666667
| 65
| 0.79661
|
085a21bf18d650b5a4415c27e1f5d6bdc3daea62
| 199
|
py
|
Python
|
kirigami/__init__.py
|
marc-harary/kirigami
|
059256f7ebb083e6b21d633d8928f4144c2f02fb
|
[
"MIT"
] | null | null | null |
kirigami/__init__.py
|
marc-harary/kirigami
|
059256f7ebb083e6b21d633d8928f4144c2f02fb
|
[
"MIT"
] | 2
|
2021-01-18T03:53:35.000Z
|
2021-04-01T02:35:02.000Z
|
kirigami/__init__.py
|
marc-harary/kirigami
|
059256f7ebb083e6b21d633d8928f4144c2f02fb
|
[
"MIT"
] | null | null | null |
import warnings
warnings.filterwarnings("ignore")
from kirigami import nn
from kirigami import utils
from kirigami._cpp_utils import *
from kirigami.distance import *
from kirigami.contact import *
| 22.111111
| 33
| 0.824121
|
f4f646fe5613b7ca826cceef106e05a862face1e
| 11,886
|
py
|
Python
|
test/integration/ggrc_workflows/converters/test_export_cycle_tasks.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc_workflows/converters/test_export_cycle_tasks.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc_workflows/converters/test_export_cycle_tasks.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-02-13T12:32:45.000Z
|
2020-02-13T12:32:45.000Z
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for task group task specific export."""
from collections import defaultdict
import ddt
from ggrc.models import all_models
from integration import ggrc
from integration.ggrc.models import factories as ggrc_factories
from integration.ggrc_workflows.models import factories
@ddt.ddt
class TestExportTasks(ggrc.TestCase):
"""Test imports for basic workflow objects."""
CYCLES_TASKS_COUNT = (
# (Cycle count, tasks in cycle)
(0, 0),
(1, 2),
(2, 1),
)
def setUp(self):
super(TestExportTasks, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
@staticmethod
def generate_tasks_for_cycle(cycle_count, task_count):
"""generate seceted number of cycles and tasks"""
role_names = ("Task Assignees", "Task Secondary Assignees")
statuses = ["Assigned", "In Progress", "Finished", "Verified"]
results = {}
with ggrc_factories.single_commit():
for _ in range(cycle_count):
workflow = factories.WorkflowFactory()
cycle = factories.CycleFactory(
workflow=workflow,
)
cycle.contact = ggrc_factories.PersonFactory(
name="user for cycle {}".format(cycle.id)
)
person = ggrc_factories.PersonFactory(
name="user for cycle tasks {}".format(cycle.id)
)
task_group = factories.TaskGroupFactory(workflow=workflow)
for _ in range(task_count):
task_group_task = factories.TaskGroupTaskFactory(
task_group=task_group)
for r_name in role_names:
ggrc_factories.AccessControlPersonFactory(
person=person,
ac_list=task_group_task.acr_name_acl_map[r_name],
)
cycle_task_group = factories.CycleTaskGroupFactory(
cycle=cycle, contact=person)
task = factories.CycleTaskGroupObjectTaskFactory(
cycle=cycle,
cycle_task_group=cycle_task_group,
task_group_task=task_group_task,
status=statuses.pop()
)
for r_name in role_names:
ggrc_factories.AccessControlPersonFactory(
person=person,
ac_list=task.acr_name_acl_map[r_name],
)
results[task.id] = cycle.slug
return results
# pylint: disable=invalid-name
def assertCycles(self, field, value, cycle_slugs):
"""assertion for search cycles for selected fields and values"""
search_request = [{
"object_name": "Cycle",
"filters": {
"expression": {
"left": field,
"op": {"name": "="},
"right": value,
},
},
"fields": ["slug"],
}]
parsed_data = self.export_parsed_csv(search_request)["Cycle"]
self.assertEqual(sorted(cycle_slugs),
sorted([i["Code*"] for i in parsed_data]))
self.assertEqual(len(cycle_slugs), len(parsed_data))
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_title(self, cycle_count, task_count):
"""Test filter cycles by task slug and title"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
self.assertCycles("task title", task.title, [slug])
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_status(self, cycle_count, task_count):
"""Test filter cycles by task status"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
self.assertCycles("task state", task.status, [slug])
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_group_title(self, cycle_count, task_count):
"""Test filter cycles by group slug and title"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
self.assertCycles("group title", task.cycle_task_group.title, [slug])
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_due_date(self, cycle_count, task_count):
"""Test filter cycles by task due date"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
due_date_dict = defaultdict(set)
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
due_date_dict[str(task.end_date)].add(slug)
for due_date, cycle_slugs in due_date_dict.iteritems():
self.assertCycles("task due date", due_date, list(cycle_slugs))
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_group_due_date(self, cycle_count, task_count):
"""Test filter cycles by group due date"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
due_date_dict = defaultdict(set)
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
due_date_dict[str(task.cycle_task_group.next_due_date)].add(slug)
for due_date, cycle_slugs in due_date_dict.iteritems():
self.assertCycles("group due date", due_date, list(cycle_slugs))
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_group_assignee(self, cycle_count, task_count):
"""Test filter cycles by group assignee name or email"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
self.assertCycles(
"group assignee", task.cycle_task_group.contact.email, [slug])
self.assertCycles(
"group assignee", task.cycle_task_group.contact.name, [slug])
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_assignee(self, cycle_count, task_count):
"""Test filter cycles by task assignee name or email"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
assignees = list(self.get_persons_for_role_name(
task, "Task Assignees"))
self.assertEqual(1, len(assignees))
self.assertCycles("task assignees", assignees[0].email, [slug])
self.assertCycles("task assignees", assignees[0].name, [slug])
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_secondary_assignee(self, cycle_count, task_count):
"""Test filter cycles by task secondary assignee name or email"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
s_assignees = list(self.get_persons_for_role_name(
task, "Task Secondary Assignees"))
self.assertEqual(1, len(s_assignees))
self.assertCycles("task secondary assignees",
s_assignees[0].email, [slug])
self.assertCycles("task secondary assignees",
s_assignees[0].name, [slug])
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_due_date_year(self, cycle_count, task_count):
"""Test filter cycles by task due date year"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
due_date_dict = defaultdict(set)
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
key = (task.end_date.year, task.end_date.month, task.end_date.day)
due_date_dict[key].add(slug)
for due_date, cycle_slugs in due_date_dict.iteritems():
self.assertCycles("task due date",
"{}-{}-{}".format(*due_date),
list(cycle_slugs))
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_due_date_year_month(self, cycle_count, task_count):
"""Test filter cycles by task due date year month"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
due_date_dict = defaultdict(set)
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
due_date_dict[(task.end_date.year, task.end_date.month)].add(slug)
for due_date, cycle_slugs in due_date_dict.iteritems():
self.assertCycles("task due date",
"{}-{}".format(*due_date),
list(cycle_slugs))
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_cycle_assignee(self, cycle_count, task_count):
"""Test filter cycles by cycle assignee name and email"""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
self.assertEqual(bool(cycle_count), bool(task_cycle_filter))
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
self.assertCycles("cycle assignee", task.cycle.contact.email, [slug])
self.assertCycles("cycle assignee", task.cycle.contact.name, [slug])
@ddt.data(*CYCLES_TASKS_COUNT)
@ddt.unpack
def test_filter_by_task_comment(self, cycle_count, task_count):
"""Test filter cycles by task comments."""
task_cycle_filter = self.generate_tasks_for_cycle(cycle_count, task_count)
filter_params = {}
for task_id, slug in task_cycle_filter.iteritems():
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
comment_text = "comment for task # {}".format(task_id)
comment = ggrc_factories.CommentFactory(description=comment_text)
ggrc_factories.RelationshipFactory(source=task, destination=comment)
filter_params[comment_text] = slug
for comment_text, slug in filter_params.iteritems():
self.assertCycles("task comment", comment_text, [slug])
| 41.852113
| 78
| 0.695777
|
a936df93abe12becab9ed304a472c6008539bfe4
| 1,057
|
py
|
Python
|
h/services/groupfinder.py
|
rickyhan/h
|
d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14
|
[
"MIT"
] | 2
|
2021-11-07T23:14:54.000Z
|
2021-11-17T10:11:55.000Z
|
h/services/groupfinder.py
|
0b01/h
|
d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14
|
[
"MIT"
] | null | null | null |
h/services/groupfinder.py
|
0b01/h
|
d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14
|
[
"MIT"
] | 1
|
2017-03-12T00:18:33.000Z
|
2017-03-12T00:18:33.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from zope.interface import implementer
from h import models
from h.interfaces import IGroupService
from h.util.db import lru_cache_in_transaction
from h.groups.util import WorldGroup
# Ideally this would be called the GroupService to match nomenclature in memex.
# FIXME: rename / split existing GroupService and rename this.
@implementer(IGroupService)
class GroupfinderService(object):
def __init__(self, session, authority):
self.session = session
self.authority = authority
self._cached_find = lru_cache_in_transaction(self.session)(self._find)
def find(self, id_):
return self._cached_find(id_)
def _find(self, id_):
if id_ == '__world__':
return WorldGroup(self.authority)
return (self.session.query(models.Group)
.filter_by(pubid=id_)
.one_or_none())
def groupfinder_service_factory(context, request):
return GroupfinderService(request.db, request.authority)
| 28.567568
| 79
| 0.711447
|
160e669e725837bf4951e54f2bed350f66a11d68
| 30,316
|
py
|
Python
|
tests/tracking/test_tracking.py
|
tsterbak/mlflow
|
77bfd3eb6e130d6583ee6e91b3d88298bb6951c7
|
[
"Apache-2.0"
] | null | null | null |
tests/tracking/test_tracking.py
|
tsterbak/mlflow
|
77bfd3eb6e130d6583ee6e91b3d88298bb6951c7
|
[
"Apache-2.0"
] | null | null | null |
tests/tracking/test_tracking.py
|
tsterbak/mlflow
|
77bfd3eb6e130d6583ee6e91b3d88298bb6951c7
|
[
"Apache-2.0"
] | null | null | null |
from collections import namedtuple
import filecmp
import json
import os
import posixpath
import random
import tempfile
import time
import yaml
import pytest
from unittest import mock
import mlflow
from mlflow import tracking
from mlflow.entities import RunStatus, LifecycleStage, Metric, Param, RunTag, ViewType
from mlflow.exceptions import MlflowException
from mlflow.store.tracking.file_store import FileStore
from mlflow.protos.databricks_pb2 import ErrorCode, INVALID_PARAMETER_VALUE
from mlflow.tracking.client import MlflowClient
from mlflow.tracking.fluent import start_run
from mlflow.utils.file_utils import local_file_uri_to_path
from mlflow.utils.mlflow_tags import (
MLFLOW_PARENT_RUN_ID,
MLFLOW_USER,
MLFLOW_SOURCE_NAME,
MLFLOW_SOURCE_TYPE,
)
from mlflow.tracking.fluent import _RUN_ID_ENV_VAR
MockExperiment = namedtuple("MockExperiment", ["experiment_id", "lifecycle_stage"])
def test_create_experiment():
with pytest.raises(TypeError):
mlflow.create_experiment() # pylint: disable=no-value-for-parameter
with pytest.raises(Exception):
mlflow.create_experiment(None)
with pytest.raises(Exception):
mlflow.create_experiment("")
exp_id = mlflow.create_experiment("Some random experiment name %d" % random.randint(1, 1e6))
assert exp_id is not None
def test_create_experiment_with_duplicate_name():
name = "popular_name"
exp_id = mlflow.create_experiment(name)
with pytest.raises(MlflowException):
mlflow.create_experiment(name)
tracking.MlflowClient().delete_experiment(exp_id)
with pytest.raises(MlflowException):
mlflow.create_experiment(name)
def test_create_experiments_with_bad_names():
# None for name
with pytest.raises(MlflowException) as e:
mlflow.create_experiment(None)
assert e.message.contains("Invalid experiment name: 'None'")
# empty string name
with pytest.raises(MlflowException) as e:
mlflow.create_experiment("")
assert e.message.contains("Invalid experiment name: ''")
@pytest.mark.parametrize("name", [123, 0, -1.2, [], ["A"], {1: 2}])
def test_create_experiments_with_bad_name_types(name):
with pytest.raises(MlflowException) as e:
mlflow.create_experiment(name)
assert e.message.contains("Invalid experiment name: %s. Expects a string." % name)
@pytest.mark.usefixtures("reset_active_experiment")
def test_set_experiment():
with pytest.raises(TypeError):
mlflow.set_experiment() # pylint: disable=no-value-for-parameter
with pytest.raises(Exception):
mlflow.set_experiment(None)
with pytest.raises(Exception):
mlflow.set_experiment("")
name = "random_exp"
exp_id = mlflow.create_experiment(name)
mlflow.set_experiment(name)
with start_run() as run:
assert run.info.experiment_id == exp_id
another_name = "another_experiment"
mlflow.set_experiment(another_name)
exp_id2 = mlflow.tracking.MlflowClient().get_experiment_by_name(another_name)
with start_run() as another_run:
assert another_run.info.experiment_id == exp_id2.experiment_id
def test_set_experiment_with_deleted_experiment_name():
name = "dead_exp"
mlflow.set_experiment(name)
with start_run() as run:
exp_id = run.info.experiment_id
tracking.MlflowClient().delete_experiment(exp_id)
with pytest.raises(MlflowException):
mlflow.set_experiment(name)
def test_list_experiments():
def _assert_exps(ids_to_lifecycle_stage, view_type_arg):
result = set(
[
(exp.experiment_id, exp.lifecycle_stage)
for exp in client.list_experiments(view_type=view_type_arg)
]
)
assert result == set([(exp_id, stage) for exp_id, stage in ids_to_lifecycle_stage.items()])
experiment_id = mlflow.create_experiment("exp_1")
assert experiment_id == "1"
client = tracking.MlflowClient()
_assert_exps({"0": LifecycleStage.ACTIVE, "1": LifecycleStage.ACTIVE}, ViewType.ACTIVE_ONLY)
_assert_exps({"0": LifecycleStage.ACTIVE, "1": LifecycleStage.ACTIVE}, ViewType.ALL)
_assert_exps({}, ViewType.DELETED_ONLY)
client.delete_experiment(experiment_id)
_assert_exps({"0": LifecycleStage.ACTIVE}, ViewType.ACTIVE_ONLY)
_assert_exps({"0": LifecycleStage.ACTIVE, "1": LifecycleStage.DELETED}, ViewType.ALL)
_assert_exps({"1": LifecycleStage.DELETED}, ViewType.DELETED_ONLY)
@pytest.mark.usefixtures("reset_active_experiment")
def test_set_experiment_with_zero_id(reset_mock):
reset_mock(
MlflowClient,
"get_experiment_by_name",
mock.Mock(
return_value=MockExperiment(experiment_id=0, lifecycle_stage=LifecycleStage.ACTIVE)
),
)
reset_mock(MlflowClient, "create_experiment", mock.Mock())
mlflow.set_experiment("my_exp")
MlflowClient.get_experiment_by_name.assert_called_once()
MlflowClient.create_experiment.assert_not_called()
def test_start_run_context_manager():
with start_run() as first_run:
first_uuid = first_run.info.run_id
# Check that start_run() causes the run information to be persisted in the store
persisted_run = tracking.MlflowClient().get_run(first_uuid)
assert persisted_run is not None
assert persisted_run.info == first_run.info
finished_run = tracking.MlflowClient().get_run(first_uuid)
assert finished_run.info.status == RunStatus.to_string(RunStatus.FINISHED)
# Launch a separate run that fails, verify the run status is FAILED and the run UUID is
# different
with pytest.raises(Exception):
with start_run() as second_run:
second_run_id = second_run.info.run_id
raise Exception("Failing run!")
assert second_run_id != first_uuid
finished_run2 = tracking.MlflowClient().get_run(second_run_id)
assert finished_run2.info.status == RunStatus.to_string(RunStatus.FAILED)
def test_start_and_end_run():
# Use the start_run() and end_run() APIs without a `with` block, verify they work.
with start_run() as active_run:
mlflow.log_metric("name_1", 25)
finished_run = tracking.MlflowClient().get_run(active_run.info.run_id)
# Validate metrics
assert len(finished_run.data.metrics) == 1
assert finished_run.data.metrics["name_1"] == 25
def test_metric_timestamp():
with mlflow.start_run() as active_run:
mlflow.log_metric("name_1", 25)
mlflow.log_metric("name_1", 30)
run_id = active_run.info.run_uuid
# Check that metric timestamps are between run start and finish
client = mlflow.tracking.MlflowClient()
history = client.get_metric_history(run_id, "name_1")
finished_run = client.get_run(run_id)
assert len(history) == 2
assert all(
[
m.timestamp >= finished_run.info.start_time
and m.timestamp <= finished_run.info.end_time
for m in history
]
)
@pytest.mark.usefixtures("tmpdir")
def test_log_batch():
expected_metrics = {"metric-key0": 1.0, "metric-key1": 4.0}
expected_params = {"param-key0": "param-val0", "param-key1": "param-val1"}
exact_expected_tags = {"tag-key0": "tag-val0", "tag-key1": "tag-val1"}
approx_expected_tags = set([MLFLOW_USER, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE])
t = int(time.time())
sorted_expected_metrics = sorted(expected_metrics.items(), key=lambda kv: kv[0])
metrics = [
Metric(key=key, value=value, timestamp=t, step=i)
for i, (key, value) in enumerate(sorted_expected_metrics)
]
params = [Param(key=key, value=value) for key, value in expected_params.items()]
tags = [RunTag(key=key, value=value) for key, value in exact_expected_tags.items()]
with start_run() as active_run:
run_id = active_run.info.run_id
mlflow.tracking.MlflowClient().log_batch(
run_id=run_id, metrics=metrics, params=params, tags=tags
)
client = tracking.MlflowClient()
finished_run = client.get_run(run_id)
# Validate metrics
assert len(finished_run.data.metrics) == 2
for key, value in finished_run.data.metrics.items():
assert expected_metrics[key] == value
metric_history0 = client.get_metric_history(run_id, "metric-key0")
assert set([(m.value, m.timestamp, m.step) for m in metric_history0]) == set([(1.0, t, 0)])
metric_history1 = client.get_metric_history(run_id, "metric-key1")
assert set([(m.value, m.timestamp, m.step) for m in metric_history1]) == set([(4.0, t, 1)])
# Validate tags (for automatically-set tags)
assert len(finished_run.data.tags) == len(exact_expected_tags) + len(approx_expected_tags)
for tag_key, tag_value in finished_run.data.tags.items():
if tag_key in approx_expected_tags:
pass
else:
assert exact_expected_tags[tag_key] == tag_value
# Validate params
assert finished_run.data.params == expected_params
# test that log_batch works with fewer params
new_tags = {"1": "2", "3": "4", "5": "6"}
tags = [RunTag(key=key, value=value) for key, value in new_tags.items()]
client.log_batch(run_id=run_id, tags=tags)
finished_run_2 = client.get_run(run_id)
# Validate tags (for automatically-set tags)
assert len(finished_run_2.data.tags) == len(finished_run.data.tags) + 3
for tag_key, tag_value in finished_run_2.data.tags.items():
if tag_key in new_tags:
assert new_tags[tag_key] == tag_value
def test_log_metric():
with start_run() as active_run, mock.patch("time.time") as time_mock:
time_mock.side_effect = [123 for _ in range(100)]
run_id = active_run.info.run_id
mlflow.log_metric("name_1", 25)
mlflow.log_metric("name_2", -3)
mlflow.log_metric("name_1", 30, 5)
mlflow.log_metric("name_1", 40, -2)
mlflow.log_metric("nested/nested/name", 40)
finished_run = tracking.MlflowClient().get_run(run_id)
# Validate metrics
assert len(finished_run.data.metrics) == 3
expected_pairs = {"name_1": 30, "name_2": -3, "nested/nested/name": 40}
for key, value in finished_run.data.metrics.items():
assert expected_pairs[key] == value
client = tracking.MlflowClient()
metric_history_name1 = client.get_metric_history(run_id, "name_1")
assert set([(m.value, m.timestamp, m.step) for m in metric_history_name1]) == set(
[(25, 123 * 1000, 0), (30, 123 * 1000, 5), (40, 123 * 1000, -2)]
)
metric_history_name2 = client.get_metric_history(run_id, "name_2")
assert set([(m.value, m.timestamp, m.step) for m in metric_history_name2]) == set(
[(-3, 123 * 1000, 0)]
)
def test_log_metrics_uses_millisecond_timestamp_resolution_fluent():
with start_run() as active_run, mock.patch("time.time") as time_mock:
time_mock.side_effect = lambda: 123
mlflow.log_metrics({"name_1": 25, "name_2": -3})
mlflow.log_metrics({"name_1": 30})
mlflow.log_metrics({"name_1": 40})
run_id = active_run.info.run_id
client = tracking.MlflowClient()
metric_history_name1 = client.get_metric_history(run_id, "name_1")
assert set([(m.value, m.timestamp) for m in metric_history_name1]) == set(
[(25, 123 * 1000), (30, 123 * 1000), (40, 123 * 1000)]
)
metric_history_name2 = client.get_metric_history(run_id, "name_2")
assert set([(m.value, m.timestamp) for m in metric_history_name2]) == set([(-3, 123 * 1000)])
def test_log_metrics_uses_millisecond_timestamp_resolution_client():
with start_run() as active_run, mock.patch("time.time") as time_mock:
time_mock.side_effect = lambda: 123
mlflow_client = tracking.MlflowClient()
run_id = active_run.info.run_id
mlflow_client.log_metric(run_id=run_id, key="name_1", value=25)
mlflow_client.log_metric(run_id=run_id, key="name_2", value=-3)
mlflow_client.log_metric(run_id=run_id, key="name_1", value=30)
mlflow_client.log_metric(run_id=run_id, key="name_1", value=40)
metric_history_name1 = mlflow_client.get_metric_history(run_id, "name_1")
assert set([(m.value, m.timestamp) for m in metric_history_name1]) == set(
[(25, 123 * 1000), (30, 123 * 1000), (40, 123 * 1000)]
)
metric_history_name2 = mlflow_client.get_metric_history(run_id, "name_2")
assert set([(m.value, m.timestamp) for m in metric_history_name2]) == set([(-3, 123 * 1000)])
@pytest.mark.parametrize("step_kwarg", [None, -10, 5])
def test_log_metrics_uses_common_timestamp_and_step_per_invocation(step_kwarg):
expected_metrics = {"name_1": 30, "name_2": -3, "nested/nested/name": 40}
with start_run() as active_run:
run_id = active_run.info.run_id
mlflow.log_metrics(expected_metrics, step=step_kwarg)
finished_run = tracking.MlflowClient().get_run(run_id)
# Validate metric key/values match what we expect, and that all metrics have the same timestamp
assert len(finished_run.data.metrics) == len(expected_metrics)
for key, value in finished_run.data.metrics.items():
assert expected_metrics[key] == value
common_timestamp = finished_run.data._metric_objs[0].timestamp
expected_step = step_kwarg if step_kwarg is not None else 0
for metric_obj in finished_run.data._metric_objs:
assert metric_obj.timestamp == common_timestamp
assert metric_obj.step == expected_step
@pytest.fixture
@pytest.mark.usefixtures("tmpdir")
def get_store_mock():
with mock.patch("mlflow.store.file_store.FileStore.log_batch") as _get_store_mock:
yield _get_store_mock
def test_set_tags():
exact_expected_tags = {"name_1": "c", "name_2": "b", "nested/nested/name": 5}
approx_expected_tags = set([MLFLOW_USER, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE])
with start_run() as active_run:
run_id = active_run.info.run_id
mlflow.set_tags(exact_expected_tags)
finished_run = tracking.MlflowClient().get_run(run_id)
# Validate tags
assert len(finished_run.data.tags) == len(exact_expected_tags) + len(approx_expected_tags)
for tag_key, tag_val in finished_run.data.tags.items():
if tag_key in approx_expected_tags:
pass
else:
assert str(exact_expected_tags[tag_key]) == tag_val
def test_log_metric_validation():
with start_run() as active_run:
run_id = active_run.info.run_id
with pytest.raises(MlflowException) as e:
mlflow.log_metric("name_1", "apple")
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
finished_run = tracking.MlflowClient().get_run(run_id)
assert len(finished_run.data.metrics) == 0
def test_log_param():
with start_run() as active_run:
run_id = active_run.info.run_id
mlflow.log_param("name_1", "a")
mlflow.log_param("name_2", "b")
mlflow.log_param("nested/nested/name", 5)
finished_run = tracking.MlflowClient().get_run(run_id)
# Validate params
assert finished_run.data.params == {"name_1": "a", "name_2": "b", "nested/nested/name": "5"}
def test_log_params():
expected_params = {"name_1": "c", "name_2": "b", "nested/nested/name": 5}
with start_run() as active_run:
run_id = active_run.info.run_id
mlflow.log_params(expected_params)
finished_run = tracking.MlflowClient().get_run(run_id)
# Validate params
assert finished_run.data.params == {"name_1": "c", "name_2": "b", "nested/nested/name": "5"}
def test_log_batch_validates_entity_names_and_values():
bad_kwargs = {
"metrics": [
[Metric(key="../bad/metric/name", value=0.3, timestamp=3, step=0)],
[Metric(key="ok-name", value="non-numerical-value", timestamp=3, step=0)],
[Metric(key="ok-name", value=0.3, timestamp="non-numerical-timestamp", step=0)],
],
"params": [[Param(key="../bad/param/name", value="my-val")]],
"tags": [[Param(key="../bad/tag/name", value="my-val")]],
}
with start_run() as active_run:
for kwarg, bad_values in bad_kwargs.items():
for bad_kwarg_value in bad_values:
final_kwargs = {
"run_id": active_run.info.run_id,
"metrics": [],
"params": [],
"tags": [],
}
final_kwargs[kwarg] = bad_kwarg_value
with pytest.raises(MlflowException) as e:
tracking.MlflowClient().log_batch(**final_kwargs)
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_log_artifact_with_dirs(tmpdir):
# Test log artifact with a directory
art_dir = tmpdir.mkdir("parent")
file0 = art_dir.join("file0")
file0.write("something")
file1 = art_dir.join("file1")
file1.write("something")
sub_dir = art_dir.mkdir("child")
with start_run():
artifact_uri = mlflow.get_artifact_uri()
run_artifact_dir = local_file_uri_to_path(artifact_uri)
mlflow.log_artifact(str(art_dir))
base = os.path.basename(str(art_dir))
assert os.listdir(run_artifact_dir) == [base]
assert set(os.listdir(os.path.join(run_artifact_dir, base))) == {"child", "file0", "file1"}
with open(os.path.join(run_artifact_dir, base, "file0")) as f:
assert f.read() == "something"
# Test log artifact with directory and specified parent folder
art_dir = tmpdir.mkdir("dir")
with start_run():
artifact_uri = mlflow.get_artifact_uri()
run_artifact_dir = local_file_uri_to_path(artifact_uri)
mlflow.log_artifact(str(art_dir), "some_parent")
assert os.listdir(run_artifact_dir) == [os.path.basename("some_parent")]
assert os.listdir(os.path.join(run_artifact_dir, "some_parent")) == [
os.path.basename(str(art_dir))
]
sub_dir = art_dir.mkdir("another_dir")
with start_run():
artifact_uri = mlflow.get_artifact_uri()
run_artifact_dir = local_file_uri_to_path(artifact_uri)
mlflow.log_artifact(str(art_dir), "parent/and_child")
assert os.listdir(os.path.join(run_artifact_dir, "parent", "and_child")) == [
os.path.basename(str(art_dir))
]
assert os.listdir(
os.path.join(run_artifact_dir, "parent", "and_child", os.path.basename(str(art_dir)))
) == [os.path.basename(str(sub_dir))]
def test_log_artifact():
artifact_src_dir = tempfile.mkdtemp()
# Create artifacts
_, path0 = tempfile.mkstemp(dir=artifact_src_dir)
_, path1 = tempfile.mkstemp(dir=artifact_src_dir)
for i, path in enumerate([path0, path1]):
with open(path, "w") as handle:
handle.write("%s" % str(i))
# Log an artifact, verify it exists in the directory returned by get_artifact_uri
# after the run finishes
artifact_parent_dirs = ["some_parent_dir", None]
for parent_dir in artifact_parent_dirs:
with start_run():
artifact_uri = mlflow.get_artifact_uri()
run_artifact_dir = local_file_uri_to_path(artifact_uri)
mlflow.log_artifact(path0, parent_dir)
expected_dir = (
os.path.join(run_artifact_dir, parent_dir)
if parent_dir is not None
else run_artifact_dir
)
assert os.listdir(expected_dir) == [os.path.basename(path0)]
logged_artifact_path = os.path.join(expected_dir, path0)
assert filecmp.cmp(logged_artifact_path, path0, shallow=False)
# Log multiple artifacts, verify they exist in the directory returned by get_artifact_uri
for parent_dir in artifact_parent_dirs:
with start_run():
artifact_uri = mlflow.get_artifact_uri()
run_artifact_dir = local_file_uri_to_path(artifact_uri)
mlflow.log_artifacts(artifact_src_dir, parent_dir)
# Check that the logged artifacts match
expected_artifact_output_dir = (
os.path.join(run_artifact_dir, parent_dir)
if parent_dir is not None
else run_artifact_dir
)
dir_comparison = filecmp.dircmp(artifact_src_dir, expected_artifact_output_dir)
assert len(dir_comparison.left_only) == 0
assert len(dir_comparison.right_only) == 0
assert len(dir_comparison.diff_files) == 0
assert len(dir_comparison.funny_files) == 0
@pytest.mark.parametrize("subdir", [None, ".", "dir", "dir1/dir2", "dir/.."])
def test_log_text(subdir):
filename = "file.txt"
text = "a"
artifact_file = filename if subdir is None else posixpath.join(subdir, filename)
with mlflow.start_run():
mlflow.log_text(text, artifact_file)
artifact_path = None if subdir is None else posixpath.normpath(subdir)
artifact_uri = mlflow.get_artifact_uri(artifact_path)
run_artifact_dir = local_file_uri_to_path(artifact_uri)
assert os.listdir(run_artifact_dir) == [filename]
filepath = os.path.join(run_artifact_dir, filename)
with open(filepath) as f:
assert f.read() == text
@pytest.mark.parametrize("subdir", [None, ".", "dir", "dir1/dir2", "dir/.."])
@pytest.mark.parametrize("extension", [".json", ".yml", ".yaml", ".txt", ""])
def test_log_dict(subdir, extension):
dictionary = {"k": "v"}
filename = "data" + extension
artifact_file = filename if subdir is None else posixpath.join(subdir, filename)
with mlflow.start_run():
mlflow.log_dict(dictionary, artifact_file)
artifact_path = None if subdir is None else posixpath.normpath(subdir)
artifact_uri = mlflow.get_artifact_uri(artifact_path)
run_artifact_dir = local_file_uri_to_path(artifact_uri)
assert os.listdir(run_artifact_dir) == [filename]
filepath = os.path.join(run_artifact_dir, filename)
extension = os.path.splitext(filename)[1]
with open(filepath) as f:
loaded = yaml.load(f) if (extension in [".yml", ".yaml"]) else json.load(f)
assert loaded == dictionary
def test_with_startrun():
run_id = None
t0 = int(time.time() * 1000)
with mlflow.start_run() as active_run:
assert mlflow.active_run() == active_run
run_id = active_run.info.run_id
t1 = int(time.time() * 1000)
run_info = mlflow.tracking._get_store().get_run(run_id).info
assert run_info.status == "FINISHED"
assert t0 <= run_info.end_time and run_info.end_time <= t1
assert mlflow.active_run() is None
def test_parent_create_run():
with mlflow.start_run() as parent_run:
parent_run_id = parent_run.info.run_id
os.environ[_RUN_ID_ENV_VAR] = parent_run_id
with mlflow.start_run() as parent_run:
assert parent_run.info.run_id == parent_run_id
with pytest.raises(Exception, match="To start a nested run"):
mlflow.start_run()
with mlflow.start_run(nested=True) as child_run:
assert child_run.info.run_id != parent_run_id
with mlflow.start_run(nested=True) as grand_child_run:
pass
def verify_has_parent_id_tag(child_id, expected_parent_id):
tags = tracking.MlflowClient().get_run(child_id).data.tags
assert tags[MLFLOW_PARENT_RUN_ID] == expected_parent_id
verify_has_parent_id_tag(child_run.info.run_id, parent_run.info.run_id)
verify_has_parent_id_tag(grand_child_run.info.run_id, child_run.info.run_id)
assert mlflow.active_run() is None
def test_start_deleted_run():
run_id = None
with mlflow.start_run() as active_run:
run_id = active_run.info.run_id
tracking.MlflowClient().delete_run(run_id)
with pytest.raises(MlflowException, matches="because it is in the deleted state."):
with mlflow.start_run(run_id=run_id):
pass
assert mlflow.active_run() is None
@pytest.mark.usefixtures("reset_active_experiment")
def test_start_run_exp_id_0():
mlflow.set_experiment("some-experiment")
# Create a run and verify that the current active experiment is the one we just set
with mlflow.start_run() as active_run:
exp_id = active_run.info.experiment_id
assert exp_id != FileStore.DEFAULT_EXPERIMENT_ID
assert MlflowClient().get_experiment(exp_id).name == "some-experiment"
# Set experiment ID to 0 when creating a run, verify that the specified experiment ID is honored
with mlflow.start_run(experiment_id=0) as active_run:
assert active_run.info.experiment_id == FileStore.DEFAULT_EXPERIMENT_ID
def test_get_artifact_uri_with_artifact_path_unspecified_returns_artifact_root_dir():
with mlflow.start_run() as active_run:
assert mlflow.get_artifact_uri(artifact_path=None) == active_run.info.artifact_uri
def test_get_artifact_uri_uses_currently_active_run_id():
artifact_path = "artifact"
with mlflow.start_run() as active_run:
assert mlflow.get_artifact_uri(
artifact_path=artifact_path
) == tracking.artifact_utils.get_artifact_uri(
run_id=active_run.info.run_id, artifact_path=artifact_path
)
@pytest.mark.parametrize(
"artifact_location, expected_uri_format",
[
(
"mysql://user:password@host:port/dbname?driver=mydriver",
"mysql://user:password@host:port/dbname/{run_id}/artifacts/{path}?driver=mydriver",
),
(
"mysql+driver://user:password@host:port/dbname/subpath/#fragment",
"mysql+driver://user:password@host:port/dbname/subpath/{run_id}/artifacts/{path}#fragment", # noqa
),
("s3://bucketname/rootpath", "s3://bucketname/rootpath/{run_id}/artifacts/{path}",),
("/dirname/rootpa#th?", "/dirname/rootpa#th?/{run_id}/artifacts/{path}",),
],
)
def test_get_artifact_uri_appends_to_uri_path_component_correctly(
artifact_location, expected_uri_format
):
client = MlflowClient()
client.create_experiment("get-artifact-uri-test", artifact_location=artifact_location)
mlflow.set_experiment("get-artifact-uri-test")
with mlflow.start_run():
run_id = mlflow.active_run().info.run_id
for artifact_path in ["path/to/artifact", "/artifact/path", "arty.txt"]:
artifact_uri = mlflow.get_artifact_uri(artifact_path)
assert artifact_uri == tracking.artifact_utils.get_artifact_uri(run_id, artifact_path)
assert artifact_uri == expected_uri_format.format(
run_id=run_id, path=artifact_path.lstrip("/")
)
@pytest.mark.usefixtures("reset_active_experiment")
def test_search_runs():
mlflow.set_experiment("exp-for-search")
# Create a run and verify that the current active experiment is the one we just set
logged_runs = {}
with mlflow.start_run() as active_run:
logged_runs["first"] = active_run.info.run_id
mlflow.log_metric("m1", 0.001)
mlflow.log_metric("m2", 0.002)
mlflow.log_metric("m1", 0.002)
mlflow.log_param("p1", "a")
mlflow.set_tag("t1", "first-tag-val")
with mlflow.start_run() as active_run:
logged_runs["second"] = active_run.info.run_id
mlflow.log_metric("m1", 0.008)
mlflow.log_param("p2", "aa")
mlflow.set_tag("t2", "second-tag-val")
def verify_runs(runs, expected_set):
assert set([r.info.run_id for r in runs]) == set([logged_runs[r] for r in expected_set])
experiment_id = MlflowClient().get_experiment_by_name("exp-for-search").experiment_id
# 2 runs in this experiment
assert len(MlflowClient().list_run_infos(experiment_id, ViewType.ACTIVE_ONLY)) == 2
# 2 runs that have metric "m1" > 0.001
runs = MlflowClient().search_runs([experiment_id], "metrics.m1 > 0.0001")
verify_runs(runs, ["first", "second"])
# 1 run with has metric "m1" > 0.002
runs = MlflowClient().search_runs([experiment_id], "metrics.m1 > 0.002")
verify_runs(runs, ["second"])
# no runs with metric "m1" > 0.1
runs = MlflowClient().search_runs([experiment_id], "metrics.m1 > 0.1")
verify_runs(runs, [])
# 1 run with metric "m2" > 0
runs = MlflowClient().search_runs([experiment_id], "metrics.m2 > 0")
verify_runs(runs, ["first"])
# 1 run each with param "p1" and "p2"
runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.ALL)
verify_runs(runs, ["first"])
runs = MlflowClient().search_runs([experiment_id], "params.p2 != 'a'", ViewType.ALL)
verify_runs(runs, ["second"])
runs = MlflowClient().search_runs([experiment_id], "params.p2 = 'aa'", ViewType.ALL)
verify_runs(runs, ["second"])
# 1 run each with tag "t1" and "t2"
runs = MlflowClient().search_runs([experiment_id], "tags.t1 = 'first-tag-val'", ViewType.ALL)
verify_runs(runs, ["first"])
runs = MlflowClient().search_runs([experiment_id], "tags.t2 != 'qwerty'", ViewType.ALL)
verify_runs(runs, ["second"])
runs = MlflowClient().search_runs([experiment_id], "tags.t2 = 'second-tag-val'", ViewType.ALL)
verify_runs(runs, ["second"])
# delete "first" run
MlflowClient().delete_run(logged_runs["first"])
runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.ALL)
verify_runs(runs, ["first"])
runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.DELETED_ONLY)
verify_runs(runs, ["first"])
runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.ACTIVE_ONLY)
verify_runs(runs, [])
@pytest.mark.usefixtures("reset_active_experiment")
def test_search_runs_multiple_experiments():
experiment_ids = [mlflow.create_experiment("exp__{}".format(exp_id)) for exp_id in range(1, 4)]
for eid in experiment_ids:
with mlflow.start_run(experiment_id=eid):
mlflow.log_metric("m0", 1)
mlflow.log_metric("m_{}".format(eid), 2)
assert len(MlflowClient().search_runs(experiment_ids, "metrics.m0 > 0", ViewType.ALL)) == 3
assert len(MlflowClient().search_runs(experiment_ids, "metrics.m_1 > 0", ViewType.ALL)) == 1
assert len(MlflowClient().search_runs(experiment_ids, "metrics.m_2 = 2", ViewType.ALL)) == 1
assert len(MlflowClient().search_runs(experiment_ids, "metrics.m_3 < 4", ViewType.ALL)) == 1
| 41.585734
| 111
| 0.686271
|
b04e93595e8335001b59e2ad8890c85d239ddfb3
| 2,265
|
py
|
Python
|
plugin.video.hellokoko/tests/kokolib_test.py
|
jabiel/kodi
|
f1fc8b74e4313705c2b928914ca6a6bcd56df0a3
|
[
"MIT"
] | null | null | null |
plugin.video.hellokoko/tests/kokolib_test.py
|
jabiel/kodi
|
f1fc8b74e4313705c2b928914ca6a6bcd56df0a3
|
[
"MIT"
] | null | null | null |
plugin.video.hellokoko/tests/kokolib_test.py
|
jabiel/kodi
|
f1fc8b74e4313705c2b928914ca6a6bcd56df0a3
|
[
"MIT"
] | null | null | null |
import unittest
from resources.lib import kokolib
class Test_kokolib_test(unittest.TestCase):
def test_decryptDefalc13(self):
zz = kokolib.decryptDefalc13('Vidto^$2^$0^$%@%ivqgb.zr/rzorq-r4ldnpru45d9-640k360.ugzy^%^Vidto^$2^$1^$%@%ivqgb.zr/rzorq-u65w347cbm3k-640k360.ugzy^%^Vidto^$0^$0^$%@%ivqgb.zr/rzorq-oqvycqder6sk-640k360.ugzy^%^Vidto^$0^$0^$^%^Fileone^$2^$0^$%@%svyrbar.gi/i/59dc690317a2a^%^Fileone^$2^$1^$%@%svyrbar.gi/i/59dcad5c9a3f3^%^Fileone^$0^$0^$%@%svyrbar.gi/i/59e36f3a17120^%^Fileone^$0^$0^$^%^Videowood^$2^$0^$%@%ivqrbjbbq.gi/rzorq/1nb4h^%^Videowood^$2^$1^$^%^Videowood^$0^$0^$^%^Videowood^$0^$0^$^%^Speedvid VIP^$0^$0^$^%^Speedvid VIP^$0^$0^$^%^Speedvid VIP^$0^$0^$^%^Speedvid VIP^$0^$0^$^%^Uptobox VIP^$0^$0^$^%^Uptobox VIP^$0^$0^$^%^Uptobox VIP^$0^$0^$^%^Uptobox VIP^$0^$0^$^%^Stormo VIP^$2^$1^$f%@%jjj.fgbezb.gi/rzorq/194690/^%^Stormo VIP^$2^$2^$f%@%jjj.fgbezb.gi/rzorq/194658/^%^Stormo VIP^$2^$3^$f%@%jjj.fgbezb.gi/rzorq/194670/^%^Stormo VIP^$0^$0^$^%^Stormo VIP^$0^$0^$^%^Stormo VIP^$0^$0^$^%^Streamango VIP^$2^$0^$f%@%fgernznatb.pbz/rzorq/xfoyyezycrbadaqb^%^Streamango VIP^$2^$1^$f%@%fgernznatb.pbz/rzorq/sgrqoobgagefexqg^%^Streamango VIP^$2^$2^$f%@%fgernznatb.pbz/rzorq/scsfnnfsbxerpgzq^%^Streamango VIP^$2^$3^$f%@%fgernznatb.pbz/rzorq/cbxdzonqyeqrrxap^%^Streamango VIP^$0^$0^$f%@%fgernznatb.pbz/rzorq/yfpcnnznsyrdaxbc/Jne_sbe_gur_Cynarg_bs_gur_Ncrf_2017_CY_OQEvc_KivQ-XvG_1_niv^%^Streamango VIP^$0^$0^$^%^Streamango VIP^$0^$0^$^%^Streamango VIP^$0^$0^$^%^Openload VIP^$2^$0^$f%@%bcraybnq.pb/rzorq/3FNTTj6IusV^%^Openload VIP^$2^$1^$f%@%bcraybnq.pb/rzorq/G8F2VGPmrBZ^%^Openload VIP^$2^$2^$f%@%bcraybnq.pb/rzorq/OWz8PqwGUhH^%^Openload VIP^$2^$3^$f%@%bcraybnq.pb/rzorq/8HubkwO3EHf^%^Openload VIP^$0^$0^$f%@%bcraybnq.pb/rzorq/aEJqe4wl4Kt/Jne.sbe.gur.Cynarg.bs.gur.Ncrf.2017.CY.OQEvc.KivQ-XvG_%281%29.niv^%^Openload VIP^$0^$0^$^%^Openload VIP^$0^$2^$f%@%bcraybnq.pb/rzorq/oVvdV2QyqHt/Jne.sbe.gur.Cynarg.bs.gur.Ncrf.2017.CY.720c.OyhEnl.k264.NP3-XvG.zxi^%^Openload [3D] VIP^$7^$3^$f%@%bcraybnq.pb/rzorq/SSzv9Ghcy_R/fxbcvhw_yvax_m_bcraybnq.gkg^%^');
self.assertTrue(isinstance(zz, list)) # is list
self.assertEqual(22, len(zz))
self.assertEqual('Vidto', zz[0][0])
if __name__ == '__main__':
unittest.main()
| 161.785714
| 1,946
| 0.688742
|
a3fd58cca965e84de1056d11dd9e05676711d3cc
| 161
|
py
|
Python
|
send.py
|
dyike/CTEmail
|
d94416401198393df01f143047acb1fb7c227492
|
[
"MIT"
] | 47
|
2017-10-15T08:23:55.000Z
|
2021-03-21T04:05:25.000Z
|
send.py
|
bobo18801737494/CTEmail
|
d94416401198393df01f143047acb1fb7c227492
|
[
"MIT"
] | null | null | null |
send.py
|
bobo18801737494/CTEmail
|
d94416401198393df01f143047acb1fb7c227492
|
[
"MIT"
] | 7
|
2017-10-16T02:23:12.000Z
|
2020-07-08T13:32:28.000Z
|
from ctemail import CTEmail
e = CTEmail('Your email acount', 'Your password')
# " ./content/ 邮件文件的路径 "
e.send_email('Test Email', './content/', ['i@ityike.com'])
| 40.25
| 58
| 0.68323
|
d78a6d74af88e0eea959ab35722d7c8d807c00a2
| 3,091
|
py
|
Python
|
nosferatu/views.py
|
DanCardin/nosferatu
|
7a81cfb0871e5a50649cde1a9585140e8b88d442
|
[
"Apache-2.0"
] | null | null | null |
nosferatu/views.py
|
DanCardin/nosferatu
|
7a81cfb0871e5a50649cde1a9585140e8b88d442
|
[
"Apache-2.0"
] | null | null | null |
nosferatu/views.py
|
DanCardin/nosferatu
|
7a81cfb0871e5a50649cde1a9585140e8b88d442
|
[
"Apache-2.0"
] | null | null | null |
import logging
from flask import render_template, jsonify, request
from flask_user import login_required, current_user
from flask_user.signals import user_sent_invitation, user_registered
from . import app, cache, db
from .models import Node
from .tasks import *
log = logging.getLogger('debug')
@user_registered.connect_via(app)
def after_registered_hook(sender, user, user_invite):
log.info('USER REGISTERED')
@user_sent_invitation.connect_via(app)
def after_invitation_hook(sender, **kwargs):
log.info('USER SENT INVITATION')
@app.route('/', methods=['GET', 'POST'])
@login_required
def index():
return render_template('index.html')
@app.route('/nodes/get', methods=['GET'])
@login_required
def get_nodes():
return jsonify(get_nodes_task())
@app.route('/nodes/add', methods=['POST'])
@login_required
def add_node():
print('Beginning add node', request.json)
node = request.json
result = add_node_task(node, current_user.id)
return jsonify(id=result['id'])
@app.route('/nodes/find', methods=['POST'])
@login_required
def search_for_nodes():
return jsonify(find_nodes_task())
@app.route('/nodes/<int:node_id>', methods=['GET', 'DELETE'])
@login_required
def get_node(node_id):
if request.method == 'GET':
print('Getting the node', node_id)
return jsonify(get_node_task(node_id))
elif request.method == 'DELETE':
print('Deleting the node')
return jsonify(delete_node_task(node_id))
@app.route('/nodes/test', methods=['POST'])
@login_required
def test_start():
test_node_task(request.json)
return 'SUCCESS', 200
@app.route('/nodes/<node_id>/motion', methods=['POST'])
@login_required
def change_node_motion(node_id):
change_motion_task(node_id, request.json)
return 'SUCCESS', 200
@app.route('/nodes/<node_id>/toggle', methods=['POST'])
@login_required
def toggle_node(node_id):
toggle_node_task(node_id)
return 'SUCCESS', 200
@app.route('/nodes/<int:node_id>/status', methods=['GET'])
@login_required
def get_node_status(node_id):
return jsonify(get_node_status_task(node_id))
@app.route('/nodes/<node_id>/rules', methods=['GET', 'POST'])
@login_required
def add_rule(node_id):
if request.method == 'GET':
print('Beginning get all rules', node_id)
result = get_all_rules_task(node_id)
print(' - all the rules', result)
return jsonify(result)
elif request.method == 'POST':
print('Beginning add rule', node_id, request.json)
rule = request.json
return jsonify(add_rule_task(node_id, rule))
@app.route('/nodes/<int:node_id>/rules/<int:rule_id>', methods=['GET', 'DELETE'])
@login_required
def get_single_rule(node_id, rule_id):
if request.method == 'GET':
print('Beginning get rule', node_id)
result = get_rule_task(node_id, rule_id)
print(' - this singluar gotten node', result)
return jsonify(result)
elif request.method == 'DELETE':
print('Deleting rule', node_id, request.args)
return jsonify(delete_rule_task(node_id, rule_id))
| 26.878261
| 81
| 0.697185
|
16ef6e44413a8ecbaac274b3ca1579e4f4b528ac
| 1,491
|
py
|
Python
|
setup.py
|
jugmac00/flask-wtf
|
c25fdf0efabbf367a67b60706ef254e5c5dbd5b7
|
[
"BSD-3-Clause"
] | 1
|
2015-02-28T18:48:37.000Z
|
2015-02-28T18:48:37.000Z
|
setup.py
|
jugmac00/flask-wtf
|
c25fdf0efabbf367a67b60706ef254e5c5dbd5b7
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jugmac00/flask-wtf
|
c25fdf0efabbf367a67b60706ef254e5c5dbd5b7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import find_packages, setup
with open('README.rst') as f:
readme = f.read()
setup(
name='Flask-WTF',
version='0.14.3',
url='https://github.com/lepture/flask-wtf',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Hsiaoming Yang',
maintainer_email='me@lepture.com',
description='Simple integration of Flask and WTForms.',
long_description=readme,
packages=find_packages(exclude=('tests',)),
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'WTForms',
'itsdangerous',
],
extras_require={'email': ['email-validator']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 33.133333
| 70
| 0.612341
|
4ead118d321e2b6eddc74f2f91f7cadde9ba6699
| 785
|
py
|
Python
|
hhlevents/apps/hhlregistrations/migrations/0008_auto_20150412_2257.py
|
HelsinkiHacklab/hhlevents
|
94e7092963670d7689bb975e238a0ed2b35a4692
|
[
"BSD-3-Clause"
] | 1
|
2021-02-14T13:16:36.000Z
|
2021-02-14T13:16:36.000Z
|
hhlevents/apps/hhlregistrations/migrations/0008_auto_20150412_2257.py
|
HelsinkiHacklab/hhlevents
|
94e7092963670d7689bb975e238a0ed2b35a4692
|
[
"BSD-3-Clause"
] | 7
|
2015-12-02T11:43:49.000Z
|
2019-01-05T20:41:35.000Z
|
hhlevents/apps/hhlregistrations/migrations/0008_auto_20150412_2257.py
|
hacklab-fi/hhlevents
|
94e7092963670d7689bb975e238a0ed2b35a4692
|
[
"BSD-3-Clause"
] | 2
|
2015-11-27T20:20:46.000Z
|
2015-11-28T16:34:42.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('hhlregistrations', '0007_auto_20150412_2220'),
]
operations = [
migrations.AddField(
model_name='event',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AddField(
model_name='person',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AddField(
model_name='registration',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
]
| 25.322581
| 71
| 0.588535
|
86604625f2bc1a227b767f781365c0e542589abc
| 2,519
|
py
|
Python
|
imagededup/utils/image_utils.py
|
backwardn/imagededup
|
81d383ec0774d62439eb34ca1fab21b23d83bacd
|
[
"Apache-2.0"
] | 2
|
2020-06-12T11:20:46.000Z
|
2020-06-18T15:54:28.000Z
|
imagededup/utils/image_utils.py
|
ndcuong91/imagededup
|
b6c62eb0f7b49c4f484963efb54c0a118288dc44
|
[
"Apache-2.0"
] | null | null | null |
imagededup/utils/image_utils.py
|
ndcuong91/imagededup
|
b6c62eb0f7b49c4f484963efb54c0a118288dc44
|
[
"Apache-2.0"
] | 1
|
2020-10-07T12:33:16.000Z
|
2020-10-07T12:33:16.000Z
|
from pathlib import PurePath
from typing import List, Union, Tuple
import numpy as np
from PIL import Image
from imagededup.utils.logger import return_logger
IMG_FORMATS = ['JPEG', 'PNG', 'BMP', 'MPO', 'PPM', 'TIFF', 'GIF']
logger = return_logger(__name__)
def preprocess_image(
image, target_size: Tuple[int, int] = None, grayscale: bool = False
) -> np.ndarray:
"""
Take as input an image as numpy array or Pillow format. Returns an array version of optionally resized and grayed
image.
Args:
image: numpy array or a pillow image.
target_size: Size to resize the input image to.
grayscale: A boolean indicating whether to grayscale the image.
Returns:
A numpy array of the processed image.
"""
if isinstance(image, np.ndarray):
image = image.astype('uint8')
image_pil = Image.fromarray(image)
elif isinstance(image, Image.Image):
image_pil = image
else:
raise ValueError('Input is expected to be a numpy array or a pillow object!')
if target_size:
image_pil = image_pil.resize(target_size, Image.ANTIALIAS)
if grayscale:
image_pil = image_pil.convert('L')
return np.array(image_pil).astype('uint8')
def load_image(
image_file: Union[PurePath, str],
target_size: Tuple[int, int] = None,
grayscale: bool = False,
img_formats: List[str] = IMG_FORMATS,
) -> np.ndarray:
"""
Load an image given its path. Returns an array version of optionally resized and grayed image. Only allows images
of types described by img_formats argument.
Args:
image_file: Path to the image file.
target_size: Size to resize the input image to.
grayscale: A boolean indicating whether to grayscale the image.
img_formats: List of allowed image formats that can be loaded.
"""
try:
img = Image.open(image_file)
# validate image format
if img.format not in img_formats:
logger.warning(f'Invalid image format {img.format}!')
return None
else:
if img.mode != 'RGB':
# convert to RGBA first to avoid warning
# we ignore alpha channel if available
img = img.convert('RGBA').convert('RGB')
img = preprocess_image(img, target_size=target_size, grayscale=grayscale)
return img
except Exception as e:
logger.warning(f'Invalid image file {image_file}:\n{e}')
return None
| 29.988095
| 117
| 0.649861
|
b8f1c83e4340e4fde78398ec4382df8029a880db
| 1,048
|
py
|
Python
|
script/run_CSO.py
|
cyy111/metaheuristics
|
9d885e4c9e9f39ad22baa9ea5d263d5daa276f88
|
[
"Apache-2.0"
] | 104
|
2020-09-07T01:24:19.000Z
|
2022-03-30T13:11:21.000Z
|
script/run_CSO.py
|
luanedge/metaheuristics
|
9d885e4c9e9f39ad22baa9ea5d263d5daa276f88
|
[
"Apache-2.0"
] | 3
|
2020-05-12T03:54:16.000Z
|
2020-06-06T01:12:31.000Z
|
script/run_CSO.py
|
luanedge/metaheuristics
|
9d885e4c9e9f39ad22baa9ea5d263d5daa276f88
|
[
"Apache-2.0"
] | 40
|
2020-08-30T14:29:37.000Z
|
2022-03-30T17:33:26.000Z
|
from models.multiple_solution.swarm_based.CSO import BaseCSO
from utils.FunctionUtil import square_function
## Setting parameters
root_paras = {
"problem_size": 30,
"domain_range": [-1, 1],
"print_train": True,
"objective_func": square_function
}
cso_paras = {
"epoch": 100,
"pop_size": 250,
"mixture_ratio": 0.15, # MR - joining seeking mode with tracing mode
"smp": 50, # seeking memory pool, 50 clones (larger is better, but need more time)
"spc": True, # self-position considering
"cdc": 0.8, # counts of dimension to change (larger is better)
"srd": 0.15, # seeking range of the selected dimension (lower is better, but need more time)
"c1": 0.4,
"w_minmax": [0.4, 0.9], # [0-1] -> [0.4-0.9] Weight of bird
"selected_strategy": 0 # 0: roulette wheel, 1: random, 2: best fitness, 3: tournament
}
## Run model
md = BaseCSO(root_algo_paras=root_paras, cso_paras=cso_paras)
md._train__()
| 34.933333
| 114
| 0.611641
|
d6d744ebe1b2efbf7246b78c9f99375198d6201b
| 171
|
py
|
Python
|
src/secml/testing/__init__.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 63
|
2020-04-20T16:31:16.000Z
|
2022-03-29T01:05:35.000Z
|
src/secml/testing/__init__.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 5
|
2020-04-21T11:31:39.000Z
|
2022-03-24T13:42:56.000Z
|
src/secml/testing/__init__.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 8
|
2020-04-21T09:16:42.000Z
|
2022-02-23T16:28:43.000Z
|
try:
import pytest
except ImportError:
raise ImportError(
"Install extra component `unittests` to use `secml.testing`")
from .c_unittest import CUnitTest
| 21.375
| 69
| 0.725146
|
3345a94c4dce308e390100fadbb1befb55a71f0e
| 4,824
|
py
|
Python
|
utils/center.py
|
qychen13/ClusterAlignReID
|
9dca1a39b7f1035c9579d80bbb73aa45480a616c
|
[
"MIT"
] | 15
|
2020-08-24T22:47:39.000Z
|
2021-04-19T07:51:32.000Z
|
utils/center.py
|
qychen13/ClusterAlignReID
|
9dca1a39b7f1035c9579d80bbb73aa45480a616c
|
[
"MIT"
] | 1
|
2021-10-14T03:07:12.000Z
|
2021-11-05T13:59:55.000Z
|
utils/center.py
|
qychen13/ClusterAlignReID
|
9dca1a39b7f1035c9579d80bbb73aa45480a616c
|
[
"MIT"
] | 1
|
2020-08-26T02:48:40.000Z
|
2020-08-26T02:48:40.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from tqdm import tqdm
from .evaluation import fliplr
def calculate_id_features(feature_extractor, training_iterator, gpu_ids, method='avg', flips=True):
print('==> extracting training features...')
with torch.no_grad():
fets, targets, probs = extract_features_for_id(
feature_extractor, training_iterator, gpu_ids, is_test=False, flips=flips)
id_features = defaultdict(list)
prob_dict = defaultdict(list)
for i in range(fets.shape[0]):
id_features[targets['pid'][i].item()].append(fets[i])
prob_dict[targets['pid'][i].item()].append(probs[i])
id_features = [torch.stack(id_features[i], dim=0) for i in range(len(id_features))]
prob_dict = [torch.stack(prob_dict[i], dim=0) for i in range(len(prob_dict))]
if method == 'avg':
id_features = [id_fet.mean(dim=0) for id_fet in id_features]
elif method == 'weight-prob':
id_features = [(id_fet*weight.unsqueeze(1)).sum(dim=0)/weight.sum() for id_fet, weight in zip(id_features, prob_dict)]
else:
raise NotImplementedError
id_features = torch.stack(id_features, dim=0)
print('==> calculate ID features done...')
return id_features
def update_id_features(fet, target, moumentum=0.9):
if isinstance(fet, dict):
fet = fet['features']
with torch.no_grad():
id_features = target['id_feature_dict']
id_set = set(target['pid'][i] for i in range(target['pid'].shape[0]))
for pid in id_set:
id_feature_update = fet[target['pid']==pid]
id_feature_update = fet[target['pid']==pid].mean(0)
id_features[pid] = moumentum * id_features[pid] + (1 - moumentum) * id_feature_update
return id_features
def extract_features_for_id(feature_extractor, data_iterator, gpu_ids, is_test, flips=False):
feature_extractor.eval()
fets = []
probs = []
targets = defaultdict(list)
if gpu_ids is not None:
feature_extractor.cuda(gpu_ids[0])
with torch.no_grad():
one_hot_class = None
for ipt, target in tqdm(data_iterator):
if gpu_ids is not None:
if len(gpu_ids) == 1:
ipt = ipt.cuda(gpu_ids[0], non_blocking=True)
for key in target:
if isinstance(target[key], list):
continue
target[key] = target[key].cuda(
gpu_ids[0], non_blocking=True)
if gpu_ids is not None:
opt = nn.parallel.data_parallel(
feature_extractor, ipt, gpu_ids)
opt1 = nn.parallel.data_parallel(
feature_extractor, fliplr(ipt), gpu_ids)
else:
opt = feature_extractor(ipt)
opt1 = feature_extractor(fliplr(ipt))
if 'features_test' in opt and is_test:
#print('==> use features_test')
key = 'features_test'
else:
key = 'features'
if one_hot_class is None:
if isinstance(opt['logits'], list):
logits = opt['logits'][0]
else:
logits = opt['logits']
num_classes = logits.shape[1]
one_hot_class = torch.eye(num_classes, device=logits.device)
fet = opt[key]
fet1 = opt1[key]
labels = target['pid']
pos_mask = one_hot_class[labels].type(torch.bool)
if isinstance(opt['logits'], list):
prob = [F.softmax(opt['logits'][i], dim=1).masked_select(pos_mask) for i in range(len(opt['logits']))]
prob = torch.stack(prob, 1)
prob1 = [F.softmax(opt1['logits'][i], dim=1).masked_select(pos_mask) for i in range(len(opt['logits']))]
prob1 = torch.stack(prob1, 1)
else:
prob = F.softmax(opt['logits'], dim=1).masked_select(pos_mask)
prob1 = F.softmax(opt1['logits'], dim=1).masked_select(pos_mask)
if flips:
fet += fet1
fet /= 2
prob += prob1
prob /=2
fets.append(fet)
probs.append(prob)
for key in target:
targets[key].append(target[key])
fets = torch.cat(fets, 0)
probs = torch.cat(probs)
for key in targets:
if isinstance(targets[key][0], list):
temp = []
for t in targets[key]:
temp += t
targets[key] = temp
else:
targets[key] = torch.cat(targets[key], 0)
return fets, targets, probs
| 39.219512
| 130
| 0.560945
|
e0fa8df2847b2754005b438eb96965db9cc8aa1f
| 12,088
|
py
|
Python
|
cpas_toolbox/asmnet/asm_pcd/asm.py
|
roym899/pose_and_shape_evaluation
|
a55c9452b54b64715f817d9dd80d098472ab56bf
|
[
"MIT"
] | 5
|
2022-02-22T09:48:01.000Z
|
2022-03-28T12:41:44.000Z
|
cpas_toolbox/asmnet/asm_pcd/asm.py
|
roym899/pose_and_shape_evaluation
|
a55c9452b54b64715f817d9dd80d098472ab56bf
|
[
"MIT"
] | 2
|
2022-02-23T07:31:00.000Z
|
2022-03-10T09:11:54.000Z
|
cpas_toolbox/asmnet/asm_pcd/asm.py
|
roym899/pose_and_shape_evaluation
|
a55c9452b54b64715f817d9dd80d098472ab56bf
|
[
"MIT"
] | 1
|
2021-11-29T03:45:31.000Z
|
2021-11-29T03:45:31.000Z
|
"""
Active Shape Model for point cloud deformation
Shuichi Akizuki, Chukyo Univ.
Email: s-akizuki@sist.chukyo-u.ac.jp
"""
import numpy as np
from sklearn.decomposition import PCA
import copy
import os.path as osp
from math import *
import open3d as o3
import cv2
class ActiveShapeModel():
"""
Required packages:
import numpy as np
from sklearn.decomposition import PCA
import copy
"""
def __init__( self, clouds ):
"""
clouds: a list of open3d point cloud
"""
self.clouds = clouds
self.mean_size = 0.0
flag, n_points = self.check_n_points()
if flag == False:
print("Error!! Number of points in the training set are not same.")
print( n_points )
exit()
# M: number of models
M = self.get_n_pcd()
self.X = np.zeros([M,len(self.clouds[0].points)*3])
for i, c in enumerate(self.clouds):
self.X[i] = self.to_vec(c)
# Standardization
self.mean_size = self.X.mean()
self.std = self.X.std()
self.X2 = (self.X - self.mean_size)/self.std
self.pca = self.PCA(self.X2)
def get_X(self):
"""
Return features (Nx3, M)
"""
return self.X
def check_n_points( self ):
"""
Check number of points in the training set. Assume same
Return:
flag: True(same) or False (not same)
list: a list that stored points
"""
n_points = []
for c in self.clouds:
n_points.append(len(c.points))
n_points = np.array(n_points)
return n_points.all(), n_points
def get_n_pcd( self ):
"""
Return:
# point clouds in the training set
"""
return len(self.clouds)
def get_mean_size( self ):
"""
Calc the mean size of shapes
"""
# Normalize by mean size
M = self.get_n_pcd()
norms = np.zeros(M)
for i, x in enumerate(self.X):
norms[i] = np.linalg.norm(x,ord=2)
return np.mean(norms)
def to_pcd( self, vec ):
"""
Convert (Nx3,) vector to open3d pcd
"""
vec3d = vec.reshape((-1,3))
vec3d = (vec3d * self.std )+self.mean_size
pcd = o3.geometry.PointCloud()
pcd.points = o3.utility.Vector3dVector(vec3d)
return pcd
def to_vec( self, pcd ):
"""
Convert open3d pcd to (Nx3,) vector
"""
tmp = np.asarray(pcd.points)
vec = tmp.reshape(tmp.shape[0]*3)
return vec
def PCA( self, X ):
"""
Apply PCA
"""
pca = PCA()
x_pca = pca.fit(X)
return pca
def get_components(self):
return self.pca.components_
def get_mean(self):
return self.pca.mean_
def get_explained_variance_ratio(self):
return self.pca.explained_variance_ratio_
def deformation( self, param, n_dim=10 ):
"""
Shape defomation by deformation parameter
Input:
param: deformation param (ndim,)
n_dim: # dimension
"""
weight = self.get_components()
deformed = copy.deepcopy(self.get_mean())
cnt = 0
for w,p in zip(weight, param):
if n_dim == cnt:
break
deformed += w*p
cnt+=1
cloud_deformed = self.to_pcd(deformed)
return cloud_deformed
def projection(self,data):
"""
Projection data which is converted by to_vec()
to the latent space.
"""
# Standardization
data2 = (data - self.mean_size)/self.std
return self.pca.transform([data2])
def get_all_projection(self):
"""
Get all projection at a time
each row indicates a projection of data
"""
projections = np.zeros((self.X.shape[0],self.X.shape[0]))
for i, x in enumerate(self.X):
p = self.projection(x)
projections[i] = p
return projections
def get_asm_info( self ):
"""
Get a dictionary data consist of
the mean shape, components, and size info.
"""
asm_info = {}
asm_info["mean_shape"] = self.get_mean()
asm_info["components"] = self.get_components()
asm_info["size_mean"] = self.mean_size
asm_info["size_std"] = self.std
return asm_info
def save_asm_info( self, name ):
info = self.get_asm_info()
np.savez( name,
mean_shape=info["mean_shape"],
components=info["components"],
size_mean=info["size_mean"],
size_std=info["size_std"]
)
def load_asmds( root, synset_names ):
""" 複数のSSMの読み込み
Args:
root(str): データセットのルートディレクトリ
synset_names(str): クラスのリスト.冒頭はBGなので無視する.
Return:
dict:SSMDeformationの辞書変数
"""
print("Root dir:", root )
asmds = {}
for s in range(len(synset_names)-1):
paths = set_paths( root, synset_names[s+1] )
trainset_path = paths["trainset_path"]
info = np.load( osp.join(trainset_path,"info.npz"))
asmd = ASMdeformation( info )
asmds[synset_names[s+1]] = asmd
return asmds
# 変形パラメータの平均と標準偏差に基づく確率でパラメータをサンプリング
# 入力
# params: 変形パラメータが1行ずつ積み重なった行列
def generate_parameter( params ):
param_mean = np.mean(params,axis=0)
param_std = np.std(params,axis=0)
b = np.random.normal(param_mean, param_std)
return b
# 変形パラメータのMIN-MAXの範囲を超えないようなサンプリング
# 入力
# params: 変形パラメータが1行ずつ積み重なった行列
def generate_parameter_minmax( params ):
param_min = np.min(params,axis=0)
param_max = np.max(params,axis=0)
b = np.random.uniform(param_min, param_max)
return b
class ASMdeformation():
def __init__( self, asm_info ):
self.mean_shape = asm_info['mean_shape'] # load mean shape
self.component = asm_info['components' ] # load components
self.mean = asm_info['size_mean'] # size mean
self.std = asm_info['size_std'] # size std
def get_dp_dim( self ):
return self.component.shape[0]
def to_pcd( self, vec ):
"""
Convert (Nx3,) vector to open3d pcd
"""
vec3d = vec.reshape((-1,3))
vec3d = (vec3d * self.std )+self.mean
pcd = o3.geometry.PointCloud()
pcd.points = o3.utility.Vector3dVector(vec3d)
return pcd
def deformation( self, dp ):
"""
Deformation
"""
deformed = copy.deepcopy( self.mean_shape )
for c,p in zip( self.component, dp):
deformed += c*p
cloud_deformed = self.to_pcd( deformed )
return cloud_deformed
#####################
# Visualization tool
#####################
def generate_latent_space_image( ap, im_size=200 ):
""" Visualization function for latent spase as image. (use top 2 dimensions)
Args:
ap(ndarray): Eigen vectors generated by ASM.get_all_projection()
im_size(int): image size
Return:
ndarray(uint8,3ch): image of latent space
"""
im_size = im_size
im_latent = np.zeros([im_size,im_size,3]).astype(np.uint8)
offset = np.array([im_size/2, im_size/2])
cv2.line( im_latent, (int(im_size/2),0),(int(im_size/2),im_size),
(100,100,100),1 )
cv2.line( im_latent, (0,int(im_size/2)),(im_size,int(im_size/2)),
(100,100,100),1 )
for i in range(ap.shape[0]):
pix = ap[i,0:2] + offset
cv2.circle( im_latent,
(int(pix[0]),int(im_size-pix[1])),
2, (0,255,0), -1, cv2.LINE_AA )
return im_latent
def continuous_shape_deformation( asm, pcd ):
param = asm.projection(asm.get_mean())
d_param = []
d_param = copy.deepcopy(param)
direction = np.zeros(d_param.shape)
direction[0:2] = 1.0
print("copy")
d_param_id = 0
d_param_id2 = 1
cnt = 0
def deformation( vis, param ):
deformed = asm.deformation( param, asm.get_n_pcd() )
pcd.points = deformed.points
vis.update_geometry( pcd )
def shape_edit( vis ):
nonlocal param
nonlocal direction
nonlocal d_param
nonlocal d_param_id
nonlocal cnt
upper = 0.10
lower = -0.10
dim = 0
if upper < d_param[d_param_id]:
direction[d_param_id] = -1.0
elif d_param[d_param_id] < lower:
direction[d_param_id] = 1.0
if upper < d_param[d_param_id2]:
direction[d_param_id2] = -1.0
elif d_param[d_param_id2] < lower:
direction[d_param_id2] = 1.0
"""
if cnt == 300:
d_param_id +=1
if d_param_id == 5:
d_param_id = 0
cnt=0
cnt+=1
"""
step = 0.001*direction
d_param += step
print(cnt, d_param_id, " step", step )
print(d_param)
deformation( vis, d_param )
return False
o3.visualization.draw_geometries_with_animation_callback([pcd], shape_edit, width=640, height=500)
def deformation_with_key_callback( asm, pcd,p):
param = copy.deepcopy(p)
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
def show_image( img_, param_ ):
param = str(param_)
img = np.array( 255.0*img_, np.uint8 )
img = cv2.putText( img, param, (10, 30), cv2.FONT_HERSHEY_DUPLEX, 0.5, (20,0,0), 1, cv2.LINE_AA )
#cv2.imwrite( "hoge.png", img )
cv2.imshow( "Result", img )
cv2.waitKey(5) #
def deformation( vis, param ):
deformed = asm.deformation( param, param.shape[0] )
pcd.points = deformed.points
vis.update_geometry( pcd )
buf = vis.capture_screen_float_buffer(do_render=False)
np_buf = np.asarray( buf )
show_image( np_buf, param )
def pc1p(vis):
nonlocal param
step = np.zeros(param.shape[0])
step[0] = 1.0
param += step
deformation( vis, param )
return False
def pc1m(vis):
nonlocal param
step = np.zeros(param.shape[0])
step[0] = 1.0
param -= step
deformation( vis, param )
def pc2p(vis):
nonlocal param
step = np.zeros(param.shape[0])
step[1] = 1.0
param += step
deformation( vis, param )
return False
def pc2m(vis):
nonlocal param
step = np.zeros(param.shape[0])
step[1] = 1.0
param -= step
deformation( vis, param )
def pc3p(vis):
nonlocal param
step = np.zeros(param.shape[0])
step[2] = 1.0
param += step
deformation( vis, param )
return False
def pc3m(vis):
nonlocal param
step = np.zeros(param.shape[0])
step[2] = 1.0
param -= step
deformation( vis, param )
return False
def reset(vis):
nonlocal param
param = np.zeros(param.shape[0])
deformation( vis, param )
return False
key_to_callback = {}
key_to_callback[ord("K")] = pc1p
key_to_callback[ord("L")] = pc1m
key_to_callback[ord(",")] = pc2p
key_to_callback[ord(".")] = pc2m
key_to_callback[ord("N")] = pc3p
key_to_callback[ord("M")] = pc3m
key_to_callback[ord("R")] = reset
o3.visualization.draw_geometries_with_key_callbacks([pcd], key_to_callback, width=640, height=500 )
cv2.destroyAllWindows()
| 26.743363
| 105
| 0.54004
|
88b8b957f94666a3db0bc12595f38e14fed83bbc
| 7,578
|
py
|
Python
|
bot.py
|
0xCN/MailBot
|
a6f9b94d6aca158f583369f55561806c50236c1c
|
[
"MIT"
] | 1
|
2022-02-07T20:05:34.000Z
|
2022-02-07T20:05:34.000Z
|
bot.py
|
0xCN/MailBot
|
a6f9b94d6aca158f583369f55561806c50236c1c
|
[
"MIT"
] | null | null | null |
bot.py
|
0xCN/MailBot
|
a6f9b94d6aca158f583369f55561806c50236c1c
|
[
"MIT"
] | 1
|
2022-01-12T04:49:50.000Z
|
2022-01-12T04:49:50.000Z
|
import os
import email
import discord
import asyncio
import aiosmtplib
from aioimaplib import aioimaplib
from selectolax.parser import HTMLParser
from config import (
imap_host, smtp_host, user, passwd, mail_channel_id,
send_channel_id, from_mail, token, pre
)
client = discord.Client()
imap_host = imap_host.split(':')
smtp_host = smtp_host.split(':')
def get_text_selectolax(html):
"""
parsing HTML from email and returning crucial parts as TEXT
"""
tree = HTMLParser(html)
if tree.body is None:
return None
for tag in tree.css('script'):
tag.decompose()
for tag in tree.css('style'):
tag.decompose()
text = tree.body.text(separator='\n')
return text
async def send_mail(from_, to, subject, content, reply_to=None):
message = email.message.EmailMessage()
message["From"] = from_
message["To"] = to
message["Subject"] = subject
message.set_content(content)
if reply_to:
message['reply-to'] = reply_to
await aiosmtplib.send(
message,
hostname=smtp_host[0],
port=smtp_host[1],
username=user,
password=passwd,
use_tls=True
)
@asyncio.coroutine
async def idle_loop(host, port, user, password):
"""
This will loop to get new emails and send them to "mail_channel_id"
"""
imap_client = aioimaplib.IMAP4_SSL(host=host, port=port, timeout=30)
await imap_client.wait_hello_from_server()
await imap_client.login(user, password)
await imap_client.select()
while True:
# only get emails which we haven't read
status, data = await imap_client.search('(UNSEEN)')
for i in data[0].split():
typ, mail = await imap_client.fetch(i, '(RFC822)')
mail_msg = email.message_from_bytes(
mail[1],
policy=email.policy.SMTP
)
mail_channel = client.get_channel(mail_channel_id)
# sending the email as a discord message
await mail_channel.send(
"```\n------------START-OF-MAIL-----------```"
f"```ini\n[From]: {mail_msg['from']}\n"
f"[Subject]: {mail_msg['subject']}\n"
f"[To]: {mail_msg['to']}\n"
f"[Date]: {mail_msg['date']}\n```"
)
for part in mail_msg.walk():
if part.get_content_type() == "text/plain":
message = ''
for line in part.get_content().splitlines():
message += line + '\n'
message = get_text_selectolax(message.rstrip('\n'))
# removing unicode character representations
# not best practice, but works.
message = ''.join(i for i in message if ord(i) < 128)
d_msg_len = 1992
# cutting the email content so it-
# doesn't reach discords message char limit
for i in range(0, len(message), d_msg_len):
msg_ = message[i:i+d_msg_len]+'-'
await mail_channel.send(f'```\n{msg_}```')
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
file_name = part.get_filename()
file_raw = part.get_payload(decode=True)
if bool(file_name):
file_path = os.path.join(
f'{os.getcwd()}/attachments/',
file_name)
if not os.path.isfile(file_path):
with open(file_path, 'wb') as fp:
fp.write(file_raw)
# won't send files that's bigger than 8mb
if len(file_raw) <= 8000000:
await mail_channel.send(
f'`{file_name}`',
file=discord.File(file_path))
else:
await mail_channel.send(f'{file_name} file too big')
os.system('rm -r attachments/*')
await mail_channel.send(
"```\n-------------END-OF-MAIL------------```"
)
idle = await imap_client.idle_start(timeout=60)
print((await imap_client.wait_server_push()))
imap_client.idle_done()
await asyncio.wait_for(idle, 30)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith(f'{pre}ping'):
await message.channel.send('pong')
if message.channel.id == send_channel_id:
# all the commands below will only be available-
# in the "send_channel_id" discord channel
if message.content.startswith(f'{pre}help'):
await message.channel.send(
"```ini\n[MailBot] - v1.0.0```"
f"`{pre}help` - `show this help message`\n"
f'`{pre}send` example@email.com "subject" \\`\\`\\`content'
'\\`\\`\\` - `send an email`\n'
f'`{pre}reply` 740042707807895642 \\`\\`\\`content\\`\\`\\` -'
' `reply to an email`\n'
f'[note]: `{pre}reply message_id` of a message in the '
'`mail_text_channel`\n'
'```ini\n[commands only work in send_text_channel]```'
)
if message.content.startswith(f'{pre}send'):
params = message.content.split(' ')
try:
to = message.content.split(' ')[1]
subject = message.content.split('"')[1]
content = message.content.split('```')[1]
await send_mail(from_mail, to, subject, content)
await message.channel.send(
f"```ini\n[From]: {from_mail}\n"
f"[Subject]: {subject}\n"
f"[To]: {to}\n```"
f'```\nsent email```'
)
except Exception as e:
print(e)
await message.channel.send('```\nError```')
if message.content.startswith(f'{pre}reply'):
mail_channel = client.get_channel(mail_channel_id)
params = message.content.split(' ')
try:
# parsing info from the discord message id-
# and replying to the email
msg = await mail_channel.fetch_message(int(params[1]))
msg = msg.content.split('\n')
content = ' '.join(params[2:])[3:-3]
to = msg[2][8:]
subject = msg[3][11:]
if 'Sent-To' in msg[4]:
to = msg[4][11:]
if 'Re: ' not in msg[3][11:]:
subject = 'Re: ' + subject
await send_mail(from_mail, to, subject, content, to)
await message.channel.send(
f"```ini\n[From]: {from_mail}\n"
f"[Subject]: {subject}\n"
f"[To]: {to}\n```"
f'```\nreplied to email```'
)
except Exception as e:
print(e)
await message.channel.send('```\nError```')
client.loop.create_task(idle_loop(
imap_host[0],
int(imap_host[1]),
user,
passwd)
)
client.run(token)
| 34.445455
| 78
| 0.510161
|
6252eeaabe90d4aeb7929c26927226d14187bb70
| 115,278
|
py
|
Python
|
saleor/graphql/product/tests/test_variant.py
|
sjkim-com/saleor
|
097592305f9340e1e72d4987b355932d335436ef
|
[
"CC-BY-4.0"
] | 1
|
2021-01-13T15:55:33.000Z
|
2021-01-13T15:55:33.000Z
|
saleor/graphql/product/tests/test_variant.py
|
Chemax911/saleor
|
4e060c03bb2457f3cd36beaffefcad0188abb2ab
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/product/tests/test_variant.py
|
Chemax911/saleor
|
4e060c03bb2457f3cd36beaffefcad0188abb2ab
|
[
"CC-BY-4.0"
] | null | null | null |
from unittest.mock import ANY, patch
from uuid import uuid4
import graphene
import pytest
from django.utils.text import slugify
from measurement.measures import Weight
from prices import Money, TaxedMoney
from ....attribute import AttributeInputType
from ....attribute.utils import associate_attribute_values_to_instance
from ....core.weight import WeightUnits
from ....order import OrderStatus
from ....order.models import OrderLine
from ....product.error_codes import ProductErrorCode
from ....product.models import Product, ProductChannelListing, ProductVariant
from ....warehouse.error_codes import StockErrorCode
from ....warehouse.models import Stock, Warehouse
from ...core.enums import WeightUnitsEnum
from ...tests.utils import assert_no_permission, get_graphql_content
def test_fetch_variant(
staff_api_client,
product,
permission_manage_products,
site_settings,
channel_USD,
):
query = """
query ProductVariantDetails($id: ID!, $countyCode: CountryCode, $channel: String) {
productVariant(id: $id, channel: $channel) {
id
stocks(countryCode: $countyCode) {
id
}
attributes {
attribute {
id
name
slug
values {
id
name
slug
}
}
values {
id
name
slug
}
}
costPrice {
currency
amount
}
images {
id
}
name
channelListings {
channel {
slug
}
price {
currency
amount
}
costPrice {
currency
amount
}
}
product {
id
}
weight {
unit
value
}
}
}
"""
# given
variant = product.variants.first()
variant.weight = Weight(kg=10)
variant.save(update_fields=["weight"])
site_settings.default_weight_unit = WeightUnits.GRAM
site_settings.save(update_fields=["default_weight_unit"])
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id, "countyCode": "EU", "channel": channel_USD.slug}
staff_api_client.user.user_permissions.add(permission_manage_products)
# when
response = staff_api_client.post_graphql(query, variables)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["name"] == variant.name
assert len(data["stocks"]) == variant.stocks.count()
assert data["weight"]["value"] == 10000
assert data["weight"]["unit"] == WeightUnitsEnum.G.name
channel_listing_data = data["channelListings"][0]
channel_listing = variant.channel_listings.get()
assert channel_listing_data["channel"]["slug"] == channel_listing.channel.slug
assert channel_listing_data["price"]["currency"] == channel_listing.currency
assert channel_listing_data["price"]["amount"] == channel_listing.price_amount
assert channel_listing_data["costPrice"]["currency"] == channel_listing.currency
assert (
channel_listing_data["costPrice"]["amount"] == channel_listing.cost_price_amount
)
QUERY_PRODUCT_VARIANT_CHANNEL_LISTING = """
query ProductVariantDetails($id: ID!, $channel: String) {
productVariant(id: $id, channel: $channel) {
id
channelListings {
channel {
slug
}
price {
currency
amount
}
costPrice {
currency
amount
}
}
}
}
"""
def test_get_product_variant_channel_listing_as_staff_user(
staff_api_client,
product_available_in_many_channels,
channel_USD,
):
# given
variant = product_available_in_many_channels.variants.get()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_CHANNEL_LISTING,
variables,
)
content = get_graphql_content(response)
# then
data = content["data"]["productVariant"]
channel_listings = variant.channel_listings.all()
for channel_listing in channel_listings:
assert {
"channel": {"slug": channel_listing.channel.slug},
"price": {
"currency": channel_listing.currency,
"amount": channel_listing.price_amount,
},
"costPrice": {
"currency": channel_listing.currency,
"amount": channel_listing.cost_price_amount,
},
} in data["channelListings"]
assert len(data["channelListings"]) == variant.channel_listings.count()
def test_get_product_variant_channel_listing_as_app(
app_api_client,
product_available_in_many_channels,
channel_USD,
):
# given
variant = product_available_in_many_channels.variants.get()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id, "channel": channel_USD.slug}
# when
response = app_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_CHANNEL_LISTING,
variables,
)
content = get_graphql_content(response)
# then
data = content["data"]["productVariant"]
channel_listings = variant.channel_listings.all()
for channel_listing in channel_listings:
assert {
"channel": {"slug": channel_listing.channel.slug},
"price": {
"currency": channel_listing.currency,
"amount": channel_listing.price_amount,
},
"costPrice": {
"currency": channel_listing.currency,
"amount": channel_listing.cost_price_amount,
},
} in data["channelListings"]
assert len(data["channelListings"]) == variant.channel_listings.count()
def test_get_product_variant_channel_listing_as_customer(
user_api_client,
product_available_in_many_channels,
channel_USD,
):
# given
variant = product_available_in_many_channels.variants.get()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id, "channel": channel_USD.slug}
# when
response = user_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_CHANNEL_LISTING,
variables,
)
# then
assert_no_permission(response)
def test_get_product_variant_channel_listing_as_anonymous(
api_client,
product_available_in_many_channels,
channel_USD,
):
# given
variant = product_available_in_many_channels.variants.get()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id, "channel": channel_USD.slug}
# when
response = api_client.post_graphql(
QUERY_PRODUCT_VARIANT_CHANNEL_LISTING,
variables,
)
# then
assert_no_permission(response)
CREATE_VARIANT_MUTATION = """
mutation createVariant (
$productId: ID!,
$sku: String,
$stocks: [StockInput!],
$attributes: [AttributeValueInput]!,
$weight: WeightScalar,
$trackInventory: Boolean) {
productVariantCreate(
input: {
product: $productId,
sku: $sku,
stocks: $stocks,
attributes: $attributes,
trackInventory: $trackInventory,
weight: $weight
}) {
productErrors {
field
message
attributes
code
}
productVariant {
id
name
sku
attributes {
attribute {
slug
}
values {
name
slug
reference
file {
url
contentType
}
}
}
costPrice {
currency
amount
localized
}
weight {
value
unit
}
stocks {
quantity
warehouse {
slug
}
}
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant(
updated_webhook_mock,
staff_api_client,
product,
product_type,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
weight = 10.22
variant_slug = product_type.variant_attributes.first().slug
variant_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().pk
)
variant_value = "test-value"
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"weight": weight,
"attributes": [{"id": variant_id, "values": [variant_value]}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
assert not content["productErrors"]
data = content["productVariant"]
assert data["name"] == variant_value
assert data["sku"] == sku
assert data["attributes"][0]["attribute"]["slug"] == variant_slug
assert data["attributes"][0]["values"][0]["slug"] == variant_value
assert data["weight"]["unit"] == WeightUnitsEnum.KG.name
assert data["weight"]["value"] == weight
assert len(data["stocks"]) == 1
assert data["stocks"][0]["quantity"] == stocks[0]["quantity"]
assert data["stocks"][0]["warehouse"]["slug"] == warehouse.slug
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant_with_file_attribute(
updated_webhook_mock,
staff_api_client,
product,
product_type,
file_attribute,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
weight = 10.22
product_type.variant_attributes.clear()
product_type.variant_attributes.add(file_attribute)
file_attr_id = graphene.Node.to_global_id("Attribute", file_attribute.id)
existing_value = file_attribute.values.first()
values_count = file_attribute.values.count()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"weight": weight,
"attributes": [{"id": file_attr_id, "file": existing_value.file_url}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
assert not content["productErrors"]
data = content["productVariant"]
assert data["name"] == sku
assert data["sku"] == sku
assert data["attributes"][0]["attribute"]["slug"] == file_attribute.slug
assert data["attributes"][0]["values"][0]["slug"] == f"{existing_value.slug}-2"
assert data["attributes"][0]["values"][0]["name"] == existing_value.name
assert data["weight"]["unit"] == WeightUnitsEnum.KG.name
assert data["weight"]["value"] == weight
assert len(data["stocks"]) == 1
assert data["stocks"][0]["quantity"] == stocks[0]["quantity"]
assert data["stocks"][0]["warehouse"]["slug"] == warehouse.slug
file_attribute.refresh_from_db()
assert file_attribute.values.count() == values_count + 1
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant_with_file_attribute_new_value(
updated_webhook_mock,
staff_api_client,
product,
product_type,
file_attribute,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
price = 1.32
cost_price = 3.22
weight = 10.22
product_type.variant_attributes.clear()
product_type.variant_attributes.add(file_attribute)
file_attr_id = graphene.Node.to_global_id("Attribute", file_attribute.id)
new_value = "new_value.txt"
values_count = file_attribute.values.count()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"costPrice": cost_price,
"price": price,
"weight": weight,
"attributes": [{"id": file_attr_id, "file": new_value}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
assert not content["productErrors"]
data = content["productVariant"]
assert data["name"] == sku
assert data["sku"] == sku
assert data["attributes"][0]["attribute"]["slug"] == file_attribute.slug
assert data["attributes"][0]["values"][0]["slug"] == slugify(new_value)
assert data["weight"]["unit"] == WeightUnitsEnum.KG.name
assert data["weight"]["value"] == weight
assert len(data["stocks"]) == 1
assert data["stocks"][0]["quantity"] == stocks[0]["quantity"]
assert data["stocks"][0]["warehouse"]["slug"] == warehouse.slug
file_attribute.refresh_from_db()
assert file_attribute.values.count() == values_count + 1
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant_with_file_attribute_no_file_url_given(
updated_webhook_mock,
staff_api_client,
product,
product_type,
file_attribute,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
price = 1.32
cost_price = 3.22
weight = 10.22
product_type.variant_attributes.clear()
product_type.variant_attributes.add(file_attribute)
file_attr_id = graphene.Node.to_global_id("Attribute", file_attribute.id)
values_count = file_attribute.values.count()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"costPrice": cost_price,
"price": price,
"weight": weight,
"attributes": [{"id": file_attr_id}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
errors = content["productErrors"]
data = content["productVariant"]
assert not errors
assert data["name"] == sku
assert data["sku"] == sku
assert data["attributes"][0]["attribute"]["slug"] == file_attribute.slug
assert len(data["attributes"][0]["values"]) == 0
assert data["weight"]["unit"] == WeightUnitsEnum.KG.name
assert data["weight"]["value"] == weight
assert len(data["stocks"]) == 1
assert data["stocks"][0]["quantity"] == stocks[0]["quantity"]
assert data["stocks"][0]["warehouse"]["slug"] == warehouse.slug
file_attribute.refresh_from_db()
assert file_attribute.values.count() == values_count
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant_with_page_reference_attribute(
updated_webhook_mock,
staff_api_client,
product,
product_type,
product_type_page_reference_attribute,
page_list,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
product_type.variant_attributes.clear()
product_type.variant_attributes.add(product_type_page_reference_attribute)
ref_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.id
)
page_ref_1 = graphene.Node.to_global_id("Page", page_list[0].pk)
page_ref_2 = graphene.Node.to_global_id("Page", page_list[1].pk)
values_count = product_type_page_reference_attribute.values.count()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"attributes": [{"id": ref_attr_id, "references": [page_ref_1, page_ref_2]}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
assert not content["productErrors"]
data = content["productVariant"]
assert data["sku"] == sku
variant_id = data["id"]
_, variant_pk = graphene.Node.from_global_id(variant_id)
assert (
data["attributes"][0]["attribute"]["slug"]
== product_type_page_reference_attribute.slug
)
expected_values = [
{
"slug": f"{variant_pk}_{page_list[0].pk}",
"file": None,
"reference": page_ref_1,
"name": page_list[0].title,
},
{
"slug": f"{variant_pk}_{page_list[1].pk}",
"file": None,
"reference": page_ref_2,
"name": page_list[1].title,
},
]
for value in expected_values:
assert value in data["attributes"][0]["values"]
assert len(data["stocks"]) == 1
assert data["stocks"][0]["quantity"] == stocks[0]["quantity"]
assert data["stocks"][0]["warehouse"]["slug"] == warehouse.slug
product_type_page_reference_attribute.refresh_from_db()
assert product_type_page_reference_attribute.values.count() == values_count + 2
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant_with_page_reference_attribute_no_references_given(
updated_webhook_mock,
staff_api_client,
product,
product_type,
product_type_page_reference_attribute,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
product_type.variant_attributes.clear()
product_type.variant_attributes.add(product_type_page_reference_attribute)
ref_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.id
)
values_count = product_type_page_reference_attribute.values.count()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"attributes": [{"id": ref_attr_id, "file": "test.jpg"}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
errors = content["productErrors"]
data = content["productVariant"]
assert not data
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
assert errors[0]["field"] == "attributes"
assert errors[0]["attributes"] == [ref_attr_id]
product_type_page_reference_attribute.refresh_from_db()
assert product_type_page_reference_attribute.values.count() == values_count
updated_webhook_mock.assert_not_called()
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant_with_product_reference_attribute(
updated_webhook_mock,
staff_api_client,
product,
product_type,
product_type_product_reference_attribute,
product_list,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
product_type.variant_attributes.clear()
product_type.variant_attributes.add(product_type_product_reference_attribute)
ref_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.id
)
product_ref_1 = graphene.Node.to_global_id("Product", product_list[0].pk)
product_ref_2 = graphene.Node.to_global_id("Product", product_list[1].pk)
values_count = product_type_product_reference_attribute.values.count()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"attributes": [
{"id": ref_attr_id, "references": [product_ref_1, product_ref_2]}
],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
assert not content["productErrors"]
data = content["productVariant"]
assert data["sku"] == sku
variant_id = data["id"]
_, variant_pk = graphene.Node.from_global_id(variant_id)
assert (
data["attributes"][0]["attribute"]["slug"]
== product_type_product_reference_attribute.slug
)
expected_values = [
{
"slug": f"{variant_pk}_{product_list[0].pk}",
"file": None,
"reference": product_ref_1,
"name": product_list[0].name,
},
{
"slug": f"{variant_pk}_{product_list[1].pk}",
"file": None,
"reference": product_ref_2,
"name": product_list[1].name,
},
]
for value in expected_values:
assert value in data["attributes"][0]["values"]
assert len(data["stocks"]) == 1
assert data["stocks"][0]["quantity"] == stocks[0]["quantity"]
assert data["stocks"][0]["warehouse"]["slug"] == warehouse.slug
product_type_product_reference_attribute.refresh_from_db()
assert product_type_product_reference_attribute.values.count() == values_count + 2
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_variant_with_product_reference_attribute_no_references_given(
updated_webhook_mock,
staff_api_client,
product,
product_type,
product_type_product_reference_attribute,
permission_manage_products,
warehouse,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
product_type.variant_attributes.clear()
product_type.variant_attributes.add(product_type_product_reference_attribute)
ref_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.id
)
values_count = product_type_product_reference_attribute.values.count()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"attributes": [{"id": ref_attr_id, "file": "test.jpg"}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
errors = content["productErrors"]
data = content["productVariant"]
assert not data
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
assert errors[0]["field"] == "attributes"
assert errors[0]["attributes"] == [ref_attr_id]
product_type_product_reference_attribute.refresh_from_db()
assert product_type_product_reference_attribute.values.count() == values_count
updated_webhook_mock.assert_not_called()
def test_create_product_variant_with_negative_weight(
staff_api_client, product, product_type, permission_manage_products
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
variant_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().pk
)
variant_value = "test-value"
variables = {
"productId": product_id,
"weight": -1,
"attributes": [{"id": variant_id, "values": [variant_value]}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productVariantCreate"]
error = data["productErrors"][0]
assert error["field"] == "weight"
assert error["code"] == ProductErrorCode.INVALID.name
def test_create_product_variant_without_attributes(
staff_api_client, product, permission_manage_products
):
# given
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
variables = {
"productId": product_id,
"sku": "test-sku",
"price": 0,
"attributes": [],
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariantCreate"]
error = data["productErrors"][0]
assert error["field"] == "attributes"
assert error["code"] == ProductErrorCode.REQUIRED.name
def test_create_product_variant_not_all_attributes(
staff_api_client, product, product_type, color_attribute, permission_manage_products
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
variant_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().pk
)
variant_value = "test-value"
product_type.variant_attributes.add(color_attribute)
variables = {
"productId": product_id,
"sku": sku,
"attributes": [{"id": variant_id, "values": [variant_value]}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["productVariantCreate"]["productErrors"]
assert content["data"]["productVariantCreate"]["productErrors"][0] == {
"field": "attributes",
"code": ProductErrorCode.REQUIRED.name,
"message": ANY,
"attributes": None,
}
assert not product.variants.filter(sku=sku).exists()
def test_create_product_variant_duplicated_attributes(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
query = CREATE_VARIANT_MUTATION
product = product_with_variant_with_two_attributes
product_id = graphene.Node.to_global_id("Product", product.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
sku = str(uuid4())[:12]
variables = {
"productId": product_id,
"sku": sku,
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["small"]},
],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["productVariantCreate"]["productErrors"]
assert content["data"]["productVariantCreate"]["productErrors"][0] == {
"field": "attributes",
"code": ProductErrorCode.DUPLICATED_INPUT_ITEM.name,
"message": ANY,
"attributes": None,
}
assert not product.variants.filter(sku=sku).exists()
def test_create_variant_invalid_variant_attributes(
staff_api_client,
product,
product_type,
permission_manage_products,
warehouse,
color_attribute,
weight_attribute,
):
query = CREATE_VARIANT_MUTATION
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
price = 1.32
cost_price = 3.22
weight = 10.22
# Default attribute defined in product_type fixture
size_attribute = product_type.variant_attributes.get(name="Size")
size_value_slug = size_attribute.values.first().slug
size_attr_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
# Add second attribute
product_type.variant_attributes.add(color_attribute)
color_attr_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
non_existent_attr_value = "The cake is a lie"
# Add third attribute
product_type.variant_attributes.add(weight_attribute)
weight_attr_id = graphene.Node.to_global_id("Attribute", weight_attribute.id)
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
}
]
variables = {
"productId": product_id,
"sku": sku,
"stocks": stocks,
"costPrice": cost_price,
"price": price,
"weight": weight,
"attributes": [
{"id": color_attr_id, "values": [" "]},
{"id": weight_attr_id, "values": [None]},
{"id": size_attr_id, "values": [non_existent_attr_value, size_value_slug]},
],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productVariantCreate"]
errors = data["productErrors"]
assert not data["productVariant"]
assert len(errors) == 2
expected_errors = [
{
"attributes": [color_attr_id, weight_attr_id],
"code": ProductErrorCode.REQUIRED.name,
"field": "attributes",
"message": ANY,
},
{
"attributes": [size_attr_id],
"code": ProductErrorCode.INVALID.name,
"field": "attributes",
"message": ANY,
},
]
for error in expected_errors:
assert error in errors
def test_create_product_variant_update_with_new_attributes(
staff_api_client, permission_manage_products, product, size_attribute
):
query = """
mutation VariantUpdate(
$id: ID!
$attributes: [AttributeValueInput]
$sku: String
$trackInventory: Boolean!
) {
productVariantUpdate(
id: $id
input: {
attributes: $attributes
sku: $sku
trackInventory: $trackInventory
}
) {
errors {
field
message
}
productVariant {
id
attributes {
attribute {
id
name
slug
values {
id
name
slug
__typename
}
__typename
}
__typename
}
}
}
}
"""
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variant_id = graphene.Node.to_global_id(
"ProductVariant", product.variants.first().pk
)
variables = {
"attributes": [{"id": size_attribute_id, "values": ["XXXL"]}],
"id": variant_id,
"sku": "21599567",
"trackInventory": True,
}
data = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["productVariantUpdate"]
assert not data["errors"]
assert data["productVariant"]["id"] == variant_id
attributes = data["productVariant"]["attributes"]
assert len(attributes) == 1
assert attributes[0]["attribute"]["id"] == size_attribute_id
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_variant(
updated_webhook_mock,
staff_api_client,
product,
size_attribute,
permission_manage_products,
):
query = """
mutation updateVariant (
$id: ID!,
$sku: String!,
$trackInventory: Boolean!,
$attributes: [AttributeValueInput]) {
productVariantUpdate(
id: $id,
input: {
sku: $sku,
trackInventory: $trackInventory,
attributes: $attributes,
}) {
productVariant {
name
sku
channelListings {
channel {
slug
}
}
costPrice {
currency
amount
localized
}
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
sku = "test sku"
variables = {
"id": variant_id,
"sku": sku,
"trackInventory": True,
"attributes": [{"id": attribute_id, "values": ["S"]}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]["productVariant"]
assert data["name"] == variant.name
assert data["sku"] == sku
updated_webhook_mock.assert_called_once_with(product)
def test_update_product_variant_with_negative_weight(
staff_api_client, product, permission_manage_products
):
query = """
mutation updateVariant (
$id: ID!,
$weight: WeightScalar
) {
productVariantUpdate(
id: $id,
input: {
weight: $weight,
}
){
productVariant {
name
}
productErrors {
field
message
code
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id, "weight": -1}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
error = data["productErrors"][0]
assert error["field"] == "weight"
assert error["code"] == ProductErrorCode.INVALID.name
QUERY_UPDATE_VARIANT_ATTRIBUTES = """
mutation updateVariant (
$id: ID!,
$sku: String,
$attributes: [AttributeValueInput]!) {
productVariantUpdate(
id: $id,
input: {
sku: $sku,
attributes: $attributes
}) {
productVariant {
sku
attributes {
attribute {
slug
}
values {
slug
name
file {
url
contentType
}
reference
}
}
}
errors {
field
message
}
productErrors {
field
code
}
}
}
"""
def test_update_product_variant_not_all_attributes(
staff_api_client, product, product_type, color_attribute, permission_manage_products
):
"""Ensures updating a variant with missing attributes (all attributes must
be provided) raises an error. We expect the color attribute
to be flagged as missing."""
query = QUERY_UPDATE_VARIANT_ATTRIBUTES
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
sku = "test sku"
attr_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().id
)
variant_value = "test-value"
product_type.variant_attributes.add(color_attribute)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [{"id": attr_id, "values": [variant_value]}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
content = get_graphql_content(response)
assert len(content["data"]["productVariantUpdate"]["errors"]) == 1
assert content["data"]["productVariantUpdate"]["errors"][0] == {
"field": "attributes",
"message": "All variant selection attributes must take a value.",
}
assert not product.variants.filter(sku=sku).exists()
def test_update_product_variant_with_current_attribute(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["small"]},
],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant.refresh_from_db()
assert variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
def test_update_product_variant_with_new_attribute(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["big"]},
],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant.refresh_from_db()
assert variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "big"
def test_update_product_variant_with_duplicated_attribute(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
variant = product.variants.first()
variant2 = product.variants.first()
variant2.pk = None
variant2.sku = str(uuid4())[:12]
variant2.save()
associate_attribute_values_to_instance(
variant2, color_attribute, color_attribute.values.last()
)
associate_attribute_values_to_instance(
variant2, size_attribute, size_attribute.values.last()
)
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
assert variant2.attributes.first().values.first().slug == "blue"
assert variant2.attributes.last().values.first().slug == "big"
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variables = {
"id": variant_id,
"attributes": [
{"id": color_attribute_id, "values": ["blue"]},
{"id": size_attribute_id, "values": ["big"]},
],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert data["productErrors"][0] == {
"field": "attributes",
"code": ProductErrorCode.DUPLICATED_INPUT_ITEM.name,
}
def test_update_product_variant_with_current_file_attribute(
staff_api_client,
product_with_variant_with_file_attribute,
file_attribute,
permission_manage_products,
):
product = product_with_variant_with_file_attribute
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
assert set(variant.attributes.first().values.values_list("slug", flat=True)) == {
"test_filetxt"
}
second_value = file_attribute.values.last()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
file_attribute_id = graphene.Node.to_global_id("Attribute", file_attribute.pk)
variables = {
"id": variant_id,
"sku": sku,
"price": 15,
"attributes": [{"id": file_attribute_id, "file": second_value.file_url}],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant_data = data["productVariant"]
assert variant_data
assert variant_data["sku"] == sku
assert len(variant_data["attributes"]) == 1
assert variant_data["attributes"][0]["attribute"]["slug"] == file_attribute.slug
assert len(variant_data["attributes"][0]["values"]) == 1
assert (
variant_data["attributes"][0]["values"][0]["slug"]
== f"{slugify(second_value)}-2"
)
def test_update_product_variant_with_duplicated_file_attribute(
staff_api_client,
product_with_variant_with_file_attribute,
file_attribute,
permission_manage_products,
):
product = product_with_variant_with_file_attribute
variant = product.variants.first()
variant2 = product.variants.first()
variant2.pk = None
variant2.sku = str(uuid4())[:12]
variant2.save()
file_attr_value = file_attribute.values.last()
associate_attribute_values_to_instance(variant2, file_attribute, file_attr_value)
sku = str(uuid4())[:12]
assert not variant.sku == sku
assert set(variant.attributes.first().values.values_list("slug", flat=True)) == {
"test_filetxt"
}
assert set(variant2.attributes.first().values.values_list("slug", flat=True)) == {
"test_filejpeg"
}
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
file_attribute_id = graphene.Node.to_global_id("Attribute", file_attribute.pk)
variables = {
"id": variant_id,
"price": 15,
"attributes": [{"id": file_attribute_id, "file": file_attr_value.file_url}],
"sku": sku,
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert data["productErrors"][0] == {
"field": "attributes",
"code": ProductErrorCode.DUPLICATED_INPUT_ITEM.name,
}
def test_update_product_variant_with_file_attribute_new_value_is_not_created(
staff_api_client,
product_with_variant_with_file_attribute,
file_attribute,
permission_manage_products,
):
product = product_with_variant_with_file_attribute
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
existing_value = file_attribute.values.first()
assert variant.attributes.filter(
assignment__attribute=file_attribute, values=existing_value
).exists()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
file_attribute_id = graphene.Node.to_global_id("Attribute", file_attribute.pk)
variables = {
"id": variant_id,
"sku": sku,
"price": 15,
"attributes": [{"id": file_attribute_id, "file": existing_value.file_url}],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant_data = data["productVariant"]
assert variant_data
assert variant_data["sku"] == sku
assert len(variant_data["attributes"]) == 1
assert variant_data["attributes"][0]["attribute"]["slug"] == file_attribute.slug
assert len(variant_data["attributes"][0]["values"]) == 1
value_data = variant_data["attributes"][0]["values"][0]
assert value_data["slug"] == existing_value.slug
assert value_data["name"] == existing_value.name
assert value_data["file"]["url"] == existing_value.file_url
assert value_data["file"]["contentType"] == existing_value.content_type
def test_update_product_variant_with_page_reference_attribute(
staff_api_client,
product,
page,
product_type_page_reference_attribute,
permission_manage_products,
):
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
product_type = product.product_type
product_type.variant_attributes.clear()
product_type.variant_attributes.add(product_type_page_reference_attribute)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
ref_attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.pk
)
reference = graphene.Node.to_global_id("Page", page.pk)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [{"id": ref_attribute_id, "references": [reference]}],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant_data = data["productVariant"]
assert variant_data
assert variant_data["sku"] == sku
assert len(variant_data["attributes"]) == 1
assert (
variant_data["attributes"][0]["attribute"]["slug"]
== product_type_page_reference_attribute.slug
)
assert len(variant_data["attributes"][0]["values"]) == 1
assert (
variant_data["attributes"][0]["values"][0]["slug"] == f"{variant.pk}_{page.pk}"
)
assert variant_data["attributes"][0]["values"][0]["reference"] == reference
def test_update_product_variant_with_product_reference_attribute(
staff_api_client,
product_list,
product_type_product_reference_attribute,
permission_manage_products,
):
product = product_list[0]
product_ref = product_list[1]
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
product_type = product.product_type
product_type.variant_attributes.clear()
product_type.variant_attributes.add(product_type_product_reference_attribute)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
ref_attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.pk
)
reference = graphene.Node.to_global_id("Product", product_ref.pk)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [{"id": ref_attribute_id, "references": [reference]}],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant_data = data["productVariant"]
assert variant_data
assert variant_data["sku"] == sku
assert len(variant_data["attributes"]) == 1
assert (
variant_data["attributes"][0]["attribute"]["slug"]
== product_type_product_reference_attribute.slug
)
assert len(variant_data["attributes"][0]["values"]) == 1
assert (
variant_data["attributes"][0]["values"][0]["slug"]
== f"{variant.pk}_{product_ref.pk}"
)
assert variant_data["attributes"][0]["values"][0]["reference"] == reference
@pytest.mark.parametrize(
"values, message",
(
([], "Attribute expects a value but none were given"),
(["one", "two"], "Attribute must take only one value"),
([" "], "Attribute values cannot be blank"),
([None], "Attribute values cannot be blank"),
),
)
def test_update_product_variant_requires_values(
staff_api_client, variant, product_type, permission_manage_products, values, message
):
"""Ensures updating a variant with invalid values raise an error.
- No values
- Blank value
- None as value
- More than one value
"""
sku = "updated"
query = QUERY_UPDATE_VARIANT_ATTRIBUTES
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
attr_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().id
)
variables = {
"id": variant_id,
"attributes": [{"id": attr_id, "values": values}],
"sku": sku,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
content = get_graphql_content(response)
assert (
len(content["data"]["productVariantUpdate"]["errors"]) == 1
), f"expected: {message}"
assert content["data"]["productVariantUpdate"]["errors"][0] == {
"field": "attributes",
"message": message,
}
assert not variant.product.variants.filter(sku=sku).exists()
def test_update_product_variant_with_price_does_not_raise_price_validation_error(
staff_api_client, variant, size_attribute, permission_manage_products
):
mutation = """
mutation updateVariant ($id: ID!, $attributes: [AttributeValueInput]) {
productVariantUpdate(
id: $id,
input: {
attributes: $attributes,
}) {
productVariant {
id
}
productErrors {
field
code
}
}
}
"""
# given a product variant and an attribute
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
# when running the updateVariant mutation without price input field
variables = {
"id": variant_id,
"attributes": [{"id": attribute_id, "values": ["S"]}],
}
response = staff_api_client.post_graphql(
mutation, variables, permissions=[permission_manage_products]
)
# then mutation passes without validation errors
content = get_graphql_content(response)
assert not content["data"]["productVariantUpdate"]["productErrors"]
DELETE_VARIANT_MUTATION = """
mutation variantDelete($id: ID!) {
productVariantDelete(id: $id) {
productVariant {
sku
id
}
}
}
"""
def test_delete_variant(staff_api_client, product, permission_manage_products):
query = DELETE_VARIANT_MUTATION
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productVariantDelete"]
assert data["productVariant"]["sku"] == variant.sku
with pytest.raises(variant._meta.model.DoesNotExist):
variant.refresh_from_db()
def test_delete_variant_in_draft_order(
staff_api_client,
order_line,
permission_manage_products,
order_list,
channel_USD,
):
query = DELETE_VARIANT_MUTATION
draft_order = order_line.order
draft_order.status = OrderStatus.DRAFT
draft_order.save(update_fields=["status"])
variant = order_line.variant
variant_channel_listing = variant.channel_listings.get(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
product = variant.product
net = variant.get_price(product, [], channel_USD, variant_channel_listing, None)
gross = Money(amount=net.amount, currency=net.currency)
order_not_draft = order_list[-1]
unit_price = TaxedMoney(net=net, gross=gross)
quantity = 3
order_line_not_in_draft = OrderLine.objects.create(
variant=variant,
order=order_not_draft,
product_name=str(product),
variant_name=str(variant),
product_sku=variant.sku,
is_shipping_required=variant.is_shipping_required(),
unit_price=unit_price,
total_price=unit_price * quantity,
quantity=quantity,
)
order_line_not_in_draft_pk = order_line_not_in_draft.pk
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productVariantDelete"]
assert data["productVariant"]["sku"] == variant.sku
with pytest.raises(order_line._meta.model.DoesNotExist):
order_line.refresh_from_db()
assert OrderLine.objects.filter(pk=order_line_not_in_draft_pk).exists()
def test_delete_default_variant(
staff_api_client, product_with_two_variants, permission_manage_products
):
# given
query = DELETE_VARIANT_MUTATION
product = product_with_two_variants
default_variant = product.variants.first()
second_variant = product.variants.last()
product.default_variant = default_variant
product.save(update_fields=["default_variant"])
assert second_variant.pk != default_variant.pk
variant_id = graphene.Node.to_global_id("ProductVariant", default_variant.pk)
variables = {"id": variant_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariantDelete"]
assert data["productVariant"]["sku"] == default_variant.sku
with pytest.raises(default_variant._meta.model.DoesNotExist):
default_variant.refresh_from_db()
product.refresh_from_db()
assert product.default_variant.pk == second_variant.pk
def test_delete_not_default_variant_left_default_variant_unchanged(
staff_api_client, product_with_two_variants, permission_manage_products
):
# given
query = DELETE_VARIANT_MUTATION
product = product_with_two_variants
default_variant = product.variants.first()
second_variant = product.variants.last()
product.default_variant = default_variant
product.save(update_fields=["default_variant"])
assert second_variant.pk != default_variant.pk
variant_id = graphene.Node.to_global_id("ProductVariant", second_variant.pk)
variables = {"id": variant_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariantDelete"]
assert data["productVariant"]["sku"] == second_variant.sku
with pytest.raises(second_variant._meta.model.DoesNotExist):
second_variant.refresh_from_db()
product.refresh_from_db()
assert product.default_variant.pk == default_variant.pk
def test_delete_default_all_product_variant_left_product_default_variant_unset(
staff_api_client, product, permission_manage_products
):
# given
query = DELETE_VARIANT_MUTATION
default_variant = product.variants.first()
product.default_variant = default_variant
product.save(update_fields=["default_variant"])
assert product.variants.count() == 1
variant_id = graphene.Node.to_global_id("ProductVariant", default_variant.pk)
variables = {"id": variant_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariantDelete"]
assert data["productVariant"]["sku"] == default_variant.sku
with pytest.raises(default_variant._meta.model.DoesNotExist):
default_variant.refresh_from_db()
product.refresh_from_db()
assert not product.default_variant
def _fetch_all_variants(client, variables={}, permissions=None):
query = """
query fetchAllVariants($channel: String) {
productVariants(first: 10, channel: $channel) {
totalCount
edges {
node {
id
}
}
}
}
"""
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariants"]
def test_fetch_all_variants_staff_user(
staff_api_client, unavailable_product_with_variant, permission_manage_products
):
variant = unavailable_product_with_variant.variants.first()
data = _fetch_all_variants(
staff_api_client, permissions=[permission_manage_products]
)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
assert data["totalCount"] == 1
assert data["edges"][0]["node"]["id"] == variant_id
def test_fetch_all_variants_staff_user_with_channel(
staff_api_client,
product_list_with_variants_many_channel,
permission_manage_products,
channel_PLN,
):
variables = {"channel": channel_PLN.slug}
data = _fetch_all_variants(
staff_api_client, variables, permissions=[permission_manage_products]
)
assert data["totalCount"] == 2
def test_fetch_all_variants_staff_user_without_channel(
staff_api_client,
product_list_with_variants_many_channel,
permission_manage_products,
):
data = _fetch_all_variants(
staff_api_client, permissions=[permission_manage_products]
)
assert data["totalCount"] == 3
def test_fetch_all_variants_customer(
user_api_client, unavailable_product_with_variant, channel_USD
):
data = _fetch_all_variants(user_api_client, variables={"channel": channel_USD.slug})
assert data["totalCount"] == 0
def test_fetch_all_variants_anonymous_user(
api_client, unavailable_product_with_variant, channel_USD
):
data = _fetch_all_variants(api_client, variables={"channel": channel_USD.slug})
assert data["totalCount"] == 0
def test_product_variants_by_ids(user_api_client, variant, channel_USD):
query = """
query getProduct($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
}
}
}
}
"""
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariants"]
assert data["edges"][0]["node"]["id"] == variant_id
assert len(data["edges"]) == 1
def test_product_variants_visible_in_listings_by_customer(
user_api_client, product_list, channel_USD
):
# given
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
# when
data = _fetch_all_variants(user_api_client, variables={"channel": channel_USD.slug})
assert data["totalCount"] == product_count - 1
def test_product_variants_visible_in_listings_by_staff_without_manage_products(
staff_api_client, product_list, channel_USD
):
# given
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
# when
data = _fetch_all_variants(
staff_api_client, variables={"channel": channel_USD.slug}
)
assert data["totalCount"] == product_count
def test_product_variants_visible_in_listings_by_staff_with_perm(
staff_api_client, product_list, permission_manage_products, channel_USD
):
# given
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
# when
data = _fetch_all_variants(
staff_api_client,
variables={"channel": channel_USD.slug},
permissions=[permission_manage_products],
)
assert data["totalCount"] == product_count
def test_product_variants_visible_in_listings_by_app_without_manage_products(
app_api_client, product_list, channel_USD
):
# given
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
# when
data = _fetch_all_variants(app_api_client, variables={"channel": channel_USD.slug})
assert data["totalCount"] == product_count
def test_product_variants_visible_in_listings_by_app_with_perm(
app_api_client, product_list, permission_manage_products, channel_USD
):
# given
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
# when
data = _fetch_all_variants(
app_api_client,
variables={"channel": channel_USD.slug},
permissions=[permission_manage_products],
)
assert data["totalCount"] == product_count
def _fetch_variant(client, variant, channel_slug=None, permissions=None):
query = """
query ProductVariantDetails($variantId: ID!, $channel: String) {
productVariant(id: $variantId, channel: $channel) {
id
product {
id
}
}
}
"""
variables = {"variantId": graphene.Node.to_global_id("ProductVariant", variant.id)}
if channel_slug:
variables["channel"] = channel_slug
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariant"]
def test_fetch_unpublished_variant_staff_user(
staff_api_client, unavailable_product_with_variant, permission_manage_products
):
variant = unavailable_product_with_variant.variants.first()
data = _fetch_variant(
staff_api_client,
variant,
permissions=[permission_manage_products],
)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
product_id = graphene.Node.to_global_id(
"Product", unavailable_product_with_variant.pk
)
assert data["id"] == variant_id
assert data["product"]["id"] == product_id
def test_fetch_unpublished_variant_customer(
user_api_client, unavailable_product_with_variant, channel_USD
):
variant = unavailable_product_with_variant.variants.first()
data = _fetch_variant(user_api_client, variant, channel_slug=channel_USD.slug)
assert data is None
def test_fetch_unpublished_variant_anonymous_user(
api_client, unavailable_product_with_variant, channel_USD
):
variant = unavailable_product_with_variant.variants.first()
data = _fetch_variant(api_client, variant, channel_slug=channel_USD.slug)
assert data is None
PRODUCT_VARIANT_BULK_CREATE_MUTATION = """
mutation ProductVariantBulkCreate(
$variants: [ProductVariantBulkCreateInput]!, $productId: ID!
) {
productVariantBulkCreate(variants: $variants, product: $productId) {
bulkProductErrors {
field
message
code
index
warehouses
channels
}
productVariants{
id
name
sku
stocks {
warehouse {
slug
}
quantity
}
channelListings {
channel {
slug
}
price {
currency
amount
}
costPrice {
currency
amount
}
}
}
count
}
}
"""
def test_product_variant_bulk_create_by_attribute_id(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
attribute_value_count = size_attribute.values.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
attribut_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
sku = str(uuid4())[:12]
variants = [
{
"sku": sku,
"weight": 2.5,
"trackInventory": True,
"attributes": [{"id": attribut_id, "values": [attribute_value.name]}],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert data["productVariants"][0]["name"] == attribute_value.name
assert product_variant_count + 1 == ProductVariant.objects.count()
assert attribute_value_count == size_attribute.values.count()
product_variant = ProductVariant.objects.get(sku=sku)
product.refresh_from_db()
assert product.default_variant == product_variant
def test_product_variant_bulk_create_only_not_variant_selection_attributes(
staff_api_client, product, size_attribute, permission_manage_products
):
"""Ensure that sku is set as variant name when only variant selection attributes
are assigned.
"""
product_variant_count = ProductVariant.objects.count()
attribute_value_count = size_attribute.values.count()
size_attribute.input_type = AttributeInputType.MULTISELECT
size_attribute.save(update_fields=["input_type"])
product_id = graphene.Node.to_global_id("Product", product.pk)
attribut_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
sku = str(uuid4())[:12]
variants = [
{
"sku": sku,
"weight": 2.5,
"trackInventory": True,
"attributes": [{"id": attribut_id, "values": [attribute_value.name]}],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert data["productVariants"][0]["name"] == sku
assert product_variant_count + 1 == ProductVariant.objects.count()
assert attribute_value_count == size_attribute.values.count()
product_variant = ProductVariant.objects.get(sku=sku)
product.refresh_from_db()
assert product.default_variant == product_variant
def test_product_variant_bulk_create_empty_attribute(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
variants = [{"sku": str(uuid4())[:12], "attributes": []}]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert product_variant_count + 1 == ProductVariant.objects.count()
def test_product_variant_bulk_create_with_new_attribute_value(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
attribute_value_count = size_attribute.values.count()
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_value = size_attribute.values.last()
variants = [
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": ["Test-attribute"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 2
assert product_variant_count + 2 == ProductVariant.objects.count()
assert attribute_value_count + 1 == size_attribute.values.count()
def test_product_variant_bulk_create_variant_selection_and_other_attributes(
staff_api_client,
product,
size_attribute,
file_attribute,
permission_manage_products,
):
"""Ensure that only values for variant selection attributes are required."""
product_type = product.product_type
product_type.variant_attributes.add(file_attribute)
product_variant_count = ProductVariant.objects.count()
attribute_value_count = size_attribute.values.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
sku = str(uuid4())[:12]
variants = [
{
"sku": sku,
"weight": 2.5,
"trackInventory": True,
"attributes": [{"id": attribute_id, "values": [attribute_value.name]}],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert product_variant_count + 1 == ProductVariant.objects.count()
assert attribute_value_count == size_attribute.values.count()
product_variant = ProductVariant.objects.get(sku=sku)
product.refresh_from_db()
assert product.default_variant == product_variant
def test_product_variant_bulk_create_stocks_input(
staff_api_client, product, permission_manage_products, warehouses, size_attribute
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_value_count = size_attribute.values.count()
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
variants = [
{
"sku": str(uuid4())[:12],
"stocks": [
{
"quantity": 10,
"warehouse": graphene.Node.to_global_id(
"Warehouse", warehouses[0].pk
),
}
],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": ["Test-attribute"]}],
"stocks": [
{
"quantity": 15,
"warehouse": graphene.Node.to_global_id(
"Warehouse", warehouses[0].pk
),
},
{
"quantity": 15,
"warehouse": graphene.Node.to_global_id(
"Warehouse", warehouses[1].pk
),
},
],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 2
assert product_variant_count + 2 == ProductVariant.objects.count()
assert attribute_value_count + 1 == size_attribute.values.count()
expected_result = {
variants[0]["sku"]: {
"sku": variants[0]["sku"],
"stocks": [
{
"warehouse": {"slug": warehouses[0].slug},
"quantity": variants[0]["stocks"][0]["quantity"],
}
],
},
variants[1]["sku"]: {
"sku": variants[1]["sku"],
"stocks": [
{
"warehouse": {"slug": warehouses[0].slug},
"quantity": variants[1]["stocks"][0]["quantity"],
},
{
"warehouse": {"slug": warehouses[1].slug},
"quantity": variants[1]["stocks"][1]["quantity"],
},
],
},
}
for variant_data in data["productVariants"]:
variant_data.pop("id")
assert variant_data["sku"] in expected_result
expected_variant = expected_result[variant_data["sku"]]
expected_stocks = expected_variant["stocks"]
assert all([stock in expected_stocks for stock in variant_data["stocks"]])
def test_product_variant_bulk_create_duplicated_warehouses(
staff_api_client, product, permission_manage_products, warehouses, size_attribute
):
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
warehouse1_id = graphene.Node.to_global_id("Warehouse", warehouses[0].pk)
variants = [
{
"sku": str(uuid4())[:12],
"stocks": [
{
"quantity": 10,
"warehouse": graphene.Node.to_global_id(
"Warehouse", warehouses[1].pk
),
}
],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": ["Test-attribute"]}],
"stocks": [
{"quantity": 15, "warehouse": warehouse1_id},
{"quantity": 15, "warehouse": warehouse1_id},
],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
errors = data["bulkProductErrors"]
assert not data["productVariants"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "stocks"
assert error["index"] == 1
assert error["code"] == ProductErrorCode.DUPLICATED_INPUT_ITEM.name
assert error["warehouses"] == [warehouse1_id]
def test_product_variant_bulk_create_channel_listings_input(
staff_api_client,
product_available_in_many_channels,
permission_manage_products,
warehouses,
size_attribute,
channel_USD,
channel_PLN,
):
product = product_available_in_many_channels
ProductChannelListing.objects.filter(product=product, channel=channel_PLN).update(
is_published=False
)
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_value_count = size_attribute.values.count()
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
variants = [
{
"sku": str(uuid4())[:12],
"channelListings": [
{
"price": 10.0,
"costPrice": 11.0,
"channelId": graphene.Node.to_global_id("Channel", channel_USD.pk),
}
],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": ["Test-attribute"]}],
"channelListings": [
{
"price": 15.0,
"costPrice": 16.0,
"channelId": graphene.Node.to_global_id("Channel", channel_USD.pk),
},
{
"price": 12.0,
"costPrice": 13.0,
"channelId": graphene.Node.to_global_id("Channel", channel_PLN.pk),
},
],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 2
assert product_variant_count + 2 == ProductVariant.objects.count()
assert attribute_value_count + 1 == size_attribute.values.count()
expected_result = {
variants[0]["sku"]: {
"sku": variants[0]["sku"],
"channelListings": [
{
"channel": {"slug": channel_USD.slug},
"price": {
"amount": variants[0]["channelListings"][0]["price"],
"currency": channel_USD.currency_code,
},
"costPrice": {
"amount": variants[0]["channelListings"][0]["costPrice"],
"currency": channel_USD.currency_code,
},
}
],
},
variants[1]["sku"]: {
"sku": variants[1]["sku"],
"channelListings": [
{
"channel": {"slug": channel_USD.slug},
"price": {
"amount": variants[1]["channelListings"][0]["price"],
"currency": channel_USD.currency_code,
},
"costPrice": {
"amount": variants[1]["channelListings"][0]["costPrice"],
"currency": channel_USD.currency_code,
},
},
{
"channel": {"slug": channel_PLN.slug},
"price": {
"amount": variants[1]["channelListings"][1]["price"],
"currency": channel_PLN.currency_code,
},
"costPrice": {
"amount": variants[1]["channelListings"][1]["costPrice"],
"currency": channel_PLN.currency_code,
},
},
],
},
}
for variant_data in data["productVariants"]:
variant_data.pop("id")
assert variant_data["sku"] in expected_result
expected_variant = expected_result[variant_data["sku"]]
expected_channel_listing = expected_variant["channelListings"]
assert all(
[
channelListing in expected_channel_listing
for channelListing in variant_data["channelListings"]
]
)
def test_product_variant_bulk_create_duplicated_channels(
staff_api_client,
product_available_in_many_channels,
permission_manage_products,
warehouses,
size_attribute,
channel_USD,
):
product = product_available_in_many_channels
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
channel_id = graphene.Node.to_global_id("Channel", channel_USD.pk)
variants = [
{
"sku": str(uuid4())[:12],
"channelListings": [
{"price": 10.0, "channelId": channel_id},
{"price": 10.0, "channelId": channel_id},
],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "channelListings"
assert error["code"] == ProductErrorCode.DUPLICATED_INPUT_ITEM.name
assert error["index"] == 0
assert error["channels"] == [channel_id]
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_too_many_decimal_places_in_price(
staff_api_client,
product_available_in_many_channels,
permission_manage_products,
size_attribute,
channel_USD,
channel_PLN,
):
product = product_available_in_many_channels
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
channel_id = graphene.Node.to_global_id("Channel", channel_USD.pk)
channel_pln_id = graphene.Node.to_global_id("Channel", channel_PLN.pk)
variants = [
{
"sku": str(uuid4())[:12],
"channelListings": [
{"price": 10.1234, "costPrice": 10.1234, "channelId": channel_id},
{"price": 10.12345, "costPrice": 10.12345, "channelId": channel_pln_id},
],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 4
errors = data["bulkProductErrors"]
assert errors[0]["field"] == "price"
assert errors[0]["code"] == ProductErrorCode.INVALID.name
assert errors[0]["index"] == 0
assert errors[0]["channels"] == [channel_id]
assert errors[1]["field"] == "price"
assert errors[1]["code"] == ProductErrorCode.INVALID.name
assert errors[1]["index"] == 0
assert errors[1]["channels"] == [channel_pln_id]
assert errors[2]["field"] == "costPrice"
assert errors[2]["code"] == ProductErrorCode.INVALID.name
assert errors[2]["index"] == 0
assert errors[2]["channels"] == [channel_id]
assert errors[3]["field"] == "costPrice"
assert errors[3]["code"] == ProductErrorCode.INVALID.name
assert errors[3]["index"] == 0
assert errors[3]["channels"] == [channel_pln_id]
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_product_not_assigned_to_channel(
staff_api_client,
product,
permission_manage_products,
warehouses,
size_attribute,
channel_PLN,
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
assert not ProductChannelListing.objects.filter(
product=product, channel=channel_PLN
).exists()
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.pk)
variants = [
{
"sku": str(uuid4())[:12],
"channelListings": [{"price": 10.0, "channelId": channel_id}],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "channelId"
assert error["code"] == ProductErrorCode.PRODUCT_NOT_ASSIGNED_TO_CHANNEL.name
assert error["index"] == 0
assert error["channels"] == [channel_id]
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_duplicated_sku(
staff_api_client,
product,
product_with_default_variant,
size_attribute,
permission_manage_products,
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
sku = product.variants.first().sku
sku2 = product_with_default_variant.variants.first().sku
assert not sku == sku2
variants = [
{
"sku": sku,
"attributes": [{"id": size_attribute_id, "values": ["Test-value"]}],
},
{
"sku": sku2,
"attributes": [{"id": size_attribute_id, "values": ["Test-valuee"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 2
errors = data["bulkProductErrors"]
for index, error in enumerate(errors):
assert error["field"] == "sku"
assert error["code"] == ProductErrorCode.UNIQUE.name
assert error["index"] == index
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_duplicated_sku_in_input(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
sku = str(uuid4())[:12]
variants = [
{
"sku": sku,
"attributes": [{"id": size_attribute_id, "values": ["Test-value"]}],
},
{
"sku": sku,
"attributes": [{"id": size_attribute_id, "values": ["Test-value2"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "sku"
assert error["code"] == ProductErrorCode.UNIQUE.name
assert error["index"] == 1
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_many_errors(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
non_existent_attribute_pk = 0
invalid_attribute_id = graphene.Node.to_global_id(
"Attribute", non_existent_attribute_pk
)
sku = product.variants.first().sku
variants = [
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": ["Test-value1"]}],
},
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": ["Test-value4"]}],
},
{
"sku": sku,
"attributes": [{"id": size_attribute_id, "values": ["Test-value2"]}],
},
{
"sku": str(uuid4())[:12],
"attributes": [{"id": invalid_attribute_id, "values": ["Test-value3"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 2
errors = data["bulkProductErrors"]
expected_errors = [
{
"field": "sku",
"index": 2,
"code": ProductErrorCode.UNIQUE.name,
"message": ANY,
"warehouses": None,
"channels": None,
},
{
"field": "attributes",
"index": 3,
"code": ProductErrorCode.NOT_FOUND.name,
"message": ANY,
"warehouses": None,
"channels": None,
},
]
for expected_error in expected_errors:
assert expected_error in errors
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_two_variants_duplicated_attribute_value(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
variants = [
{
"sku": str(uuid4())[:12],
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["small"]},
],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "attributes"
assert error["code"] == ProductErrorCode.DUPLICATED_INPUT_ITEM.name
assert error["index"] == 0
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_two_variants_duplicated_attribute_value_in_input(
staff_api_client,
product_with_variant_with_two_attributes,
permission_manage_products,
color_attribute,
size_attribute,
):
product = product_with_variant_with_two_attributes
product_id = graphene.Node.to_global_id("Product", product.pk)
product_variant_count = ProductVariant.objects.count()
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
attributes = [
{"id": color_attribute_id, "values": [color_attribute.values.last().slug]},
{"id": size_attribute_id, "values": [size_attribute.values.last().slug]},
]
variants = [
{"sku": str(uuid4())[:12], "attributes": attributes},
{"sku": str(uuid4())[:12], "attributes": attributes},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "attributes"
assert error["code"] == ProductErrorCode.DUPLICATED_INPUT_ITEM.name
assert error["index"] == 1
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_two_variants_duplicated_one_attribute_value(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
variants = [
{
"sku": str(uuid4())[:12],
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["big"]},
],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert product_variant_count + 1 == ProductVariant.objects.count()
VARIANT_STOCKS_CREATE_MUTATION = """
mutation ProductVariantStocksCreate($variantId: ID!, $stocks: [StockInput!]!){
productVariantStocksCreate(variantId: $variantId, stocks: $stocks){
productVariant{
id
stocks {
quantity
quantityAllocated
id
warehouse{
slug
}
}
}
bulkStockErrors{
code
field
message
index
}
}
}
"""
def test_variant_stocks_create(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.id),
"quantity": 20,
},
{
"warehouse": graphene.Node.to_global_id("Warehouse", second_warehouse.id),
"quantity": 100,
},
]
variables = {"variantId": variant_id, "stocks": stocks}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_CREATE_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksCreate"]
expected_result = [
{
"quantity": stocks[0]["quantity"],
"quantityAllocated": 0,
"warehouse": {"slug": warehouse.slug},
},
{
"quantity": stocks[1]["quantity"],
"quantityAllocated": 0,
"warehouse": {"slug": second_warehouse.slug},
},
]
assert not data["bulkStockErrors"]
assert len(data["productVariant"]["stocks"]) == len(stocks)
result = []
for stock in data["productVariant"]["stocks"]:
stock.pop("id")
result.append(stock)
for res in result:
assert res in expected_result
def test_variant_stocks_create_empty_stock_input(
staff_api_client, variant, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"variantId": variant_id, "stocks": []}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_CREATE_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksCreate"]
assert not data["bulkStockErrors"]
assert len(data["productVariant"]["stocks"]) == variant.stocks.count()
assert data["productVariant"]["id"] == variant_id
def test_variant_stocks_create_stock_already_exists(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
Stock.objects.create(product_variant=variant, warehouse=warehouse, quantity=10)
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.id),
"quantity": 20,
},
{
"warehouse": graphene.Node.to_global_id("Warehouse", second_warehouse.id),
"quantity": 100,
},
]
variables = {"variantId": variant_id, "stocks": stocks}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_CREATE_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksCreate"]
errors = data["bulkStockErrors"]
assert errors
assert errors[0]["code"] == StockErrorCode.UNIQUE.name
assert errors[0]["field"] == "warehouse"
assert errors[0]["index"] == 0
def test_variant_stocks_create_stock_duplicated_warehouse(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
second_warehouse_id = graphene.Node.to_global_id("Warehouse", second_warehouse.id)
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.id),
"quantity": 20,
},
{"warehouse": second_warehouse_id, "quantity": 100},
{"warehouse": second_warehouse_id, "quantity": 120},
]
variables = {"variantId": variant_id, "stocks": stocks}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_CREATE_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksCreate"]
errors = data["bulkStockErrors"]
assert errors
assert errors[0]["code"] == StockErrorCode.UNIQUE.name
assert errors[0]["field"] == "warehouse"
assert errors[0]["index"] == 2
def test_variant_stocks_create_stock_duplicated_warehouse_and_warehouse_already_exists(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
second_warehouse_id = graphene.Node.to_global_id("Warehouse", second_warehouse.id)
Stock.objects.create(
product_variant=variant, warehouse=second_warehouse, quantity=10
)
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.id),
"quantity": 20,
},
{"warehouse": second_warehouse_id, "quantity": 100},
{"warehouse": second_warehouse_id, "quantity": 120},
]
variables = {"variantId": variant_id, "stocks": stocks}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_CREATE_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksCreate"]
errors = data["bulkStockErrors"]
assert len(errors) == 3
assert {error["code"] for error in errors} == {
StockErrorCode.UNIQUE.name,
}
assert {error["field"] for error in errors} == {
"warehouse",
}
assert {error["index"] for error in errors} == {1, 2}
VARIANT_STOCKS_UPDATE_MUTATIONS = """
mutation ProductVariantStocksUpdate($variantId: ID!, $stocks: [StockInput!]!){
productVariantStocksUpdate(variantId: $variantId, stocks: $stocks){
productVariant{
stocks{
quantity
quantityAllocated
id
warehouse{
slug
}
}
}
bulkStockErrors{
code
field
message
index
}
}
}
"""
def test_product_variant_stocks_update(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
Stock.objects.create(product_variant=variant, warehouse=warehouse, quantity=10)
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.id),
"quantity": 20,
},
{
"warehouse": graphene.Node.to_global_id("Warehouse", second_warehouse.id),
"quantity": 100,
},
]
variables = {"variantId": variant_id, "stocks": stocks}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_UPDATE_MUTATIONS,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksUpdate"]
expected_result = [
{
"quantity": stocks[0]["quantity"],
"quantityAllocated": 0,
"warehouse": {"slug": warehouse.slug},
},
{
"quantity": stocks[1]["quantity"],
"quantityAllocated": 0,
"warehouse": {"slug": second_warehouse.slug},
},
]
assert not data["bulkStockErrors"]
assert len(data["productVariant"]["stocks"]) == len(stocks)
result = []
for stock in data["productVariant"]["stocks"]:
stock.pop("id")
result.append(stock)
for res in result:
assert res in expected_result
def test_product_variant_stocks_update_with_empty_stock_list(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
stocks = []
variables = {"variantId": variant_id, "stocks": stocks}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_UPDATE_MUTATIONS,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksUpdate"]
assert not data["bulkStockErrors"]
assert len(data["productVariant"]["stocks"]) == len(stocks)
def test_variant_stocks_update_stock_duplicated_warehouse(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
Stock.objects.create(product_variant=variant, warehouse=warehouse, quantity=10)
stocks = [
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 20,
},
{
"warehouse": graphene.Node.to_global_id("Warehouse", second_warehouse.pk),
"quantity": 100,
},
{
"warehouse": graphene.Node.to_global_id("Warehouse", warehouse.pk),
"quantity": 150,
},
]
variables = {"variantId": variant_id, "stocks": stocks}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_UPDATE_MUTATIONS,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksUpdate"]
errors = data["bulkStockErrors"]
assert errors
assert errors[0]["code"] == StockErrorCode.UNIQUE.name
assert errors[0]["field"] == "warehouse"
assert errors[0]["index"] == 2
VARIANT_STOCKS_DELETE_MUTATION = """
mutation ProductVariantStocksDelete($variantId: ID!, $warehouseIds: [ID!]!){
productVariantStocksDelete(
variantId: $variantId, warehouseIds: $warehouseIds
){
productVariant{
stocks{
id
quantity
warehouse{
slug
}
}
}
stockErrors{
field
code
message
}
}
}
"""
def test_product_variant_stocks_delete_mutation(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
Stock.objects.bulk_create(
[
Stock(product_variant=variant, warehouse=warehouse, quantity=10),
Stock(product_variant=variant, warehouse=second_warehouse, quantity=140),
]
)
stocks_count = variant.stocks.count()
warehouse_ids = [graphene.Node.to_global_id("Warehouse", second_warehouse.id)]
variables = {"variantId": variant_id, "warehouseIds": warehouse_ids}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_DELETE_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksDelete"]
variant.refresh_from_db()
assert not data["stockErrors"]
assert (
len(data["productVariant"]["stocks"])
== variant.stocks.count()
== stocks_count - 1
)
assert data["productVariant"]["stocks"][0]["quantity"] == 10
assert data["productVariant"]["stocks"][0]["warehouse"]["slug"] == warehouse.slug
def test_product_variant_stocks_delete_mutation_invalid_warehouse_id(
staff_api_client, variant, warehouse, permission_manage_products
):
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
Stock.objects.bulk_create(
[Stock(product_variant=variant, warehouse=warehouse, quantity=10)]
)
stocks_count = variant.stocks.count()
warehouse_ids = [graphene.Node.to_global_id("Warehouse", second_warehouse.id)]
variables = {"variantId": variant_id, "warehouseIds": warehouse_ids}
response = staff_api_client.post_graphql(
VARIANT_STOCKS_DELETE_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantStocksDelete"]
variant.refresh_from_db()
assert not data["stockErrors"]
assert (
len(data["productVariant"]["stocks"]) == variant.stocks.count() == stocks_count
)
assert data["productVariant"]["stocks"][0]["quantity"] == 10
assert data["productVariant"]["stocks"][0]["warehouse"]["slug"] == warehouse.slug
| 33.697165
| 88
| 0.636609
|
8ca24806898a75ed633e54bd29a7c6011d3d66ee
| 6,745
|
py
|
Python
|
django_pypayzen/tests/data.py
|
SamambaMan/django-payzen
|
88b2df368bb7afe32a33ae398a8c858531647068
|
[
"MIT"
] | null | null | null |
django_pypayzen/tests/data.py
|
SamambaMan/django-payzen
|
88b2df368bb7afe32a33ae398a8c858531647068
|
[
"MIT"
] | null | null | null |
django_pypayzen/tests/data.py
|
SamambaMan/django-payzen
|
88b2df368bb7afe32a33ae398a8c858531647068
|
[
"MIT"
] | null | null | null |
import collections
url_exemple = "http://www.google.com/"
cards = [
{
'type': 'CB',
'card_number': '4970100000000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000007',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300023006',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000023006',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
]
theme_args = collections.OrderedDict([
("success_footer_msg_return", "Success footer msg test"),
("cancel_footer_msg_return", "Cancel footer msg test"),
("secure_message", "Secure message test"),
("secure_message_register", "Secure message register test"),
("site_id_label", "Site ID label test"),
("css_for_payment", url_exemple+"payment.css"),
("css_for_payment_mobile", url_exemple+"mobile_payment.css"),
("header_for_mail", url_exemple+"mail_header.html"),
("footer_for_mail", url_exemple+"footer_mail.html"),
("shop_logo", url_exemple+"logo.png"),
])
payment_config_args = {
"first": 5000,
"count": 2,
"period": 5
}
payment_args = {
# Base fields
"vads_amount": "10000",
"vads_capture_delay": "2",
"vads_payment_cards": "CB;Visa",
"vads_return_mode": "NONE",
"vads_validation_mode": "1",
"vads_url_success": url_exemple,
"vads_url_referral": url_exemple,
"vads_url_refused": url_exemple,
"vads_url_cancel": url_exemple,
"vads_url_error": url_exemple,
"vads_url_return": url_exemple,
"vads_user_info": "Abbath Doom Occulta",
"vads_shop_name": "Immortal",
"vads_redirect_success_timeout": "1",
"vads_redirect_success_message": "Tragedies Blows At Horizon",
"vads_redirect_error_timeout": "1",
"vads_redirect_error_message": "At The Heart Of Winter",
# customer fields
"vads_cust_address": "Oeschstr.",
"vads_cust_address_number": "9",
"vads_cust_country": "GE",
"vads_cust_email": "test@nuclearblast.de",
"vads_cust_id": "1",
"vads_cust_name": "NUCLEAR BLAST",
"vads_cust_cell_phone": "+49 7162 9280-0",
"vads_cust_phone": "+49 7162 9280 26",
"vads_cust_title": "Guitarist",
"vads_cust_city": "Donzdorf",
"vads_cust_state": "Donzdorf",
"vads_cust_zip": "73072",
"vads_language": "fr",
# order fields
"vads_order_id": "1234567890",
"vads_order_info": "Order test info 1",
"vads_order_info2": "Order test info 2",
"vads_order_info3": "Order test info 3",
# shipping fields
"vads_ship_to_name": "NUCLEAR BLAST",
"vads_ship_to_street_number": "9",
"vads_ship_to_street": "Oeschstr. 9",
"vads_ship_to_street2": "...",
"vads_ship_to_zip": "73072",
"vads_ship_to_city": "Donzdorf",
"vads_ship_to_country": "GE",
"vads_ship_to_phone_num": "+49 7162 9280-0",
"vads_ship_to_state": "Donzdorf"
}
| 28.340336
| 66
| 0.564566
|
49636c152799df9bc543f985a35be57a327cd8d8
| 3,062
|
py
|
Python
|
atomate/vasp/builders/file_materials.py
|
Zhuoying/atomate
|
067023f0f740d3abac47b7ae7743c1c31eff8a06
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
atomate/vasp/builders/file_materials.py
|
Zhuoying/atomate
|
067023f0f740d3abac47b7ae7743c1c31eff8a06
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
atomate/vasp/builders/file_materials.py
|
Zhuoying/atomate
|
067023f0f740d3abac47b7ae7743c1c31eff8a06
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from pymatgen.core import Composition
from tqdm import tqdm
from atomate.utils.utils import get_database, get_logger
from atomate.vasp.builders.base import AbstractBuilder
logger = get_logger(__name__)
__author__ = "Anubhav Jain <ajain@lbl.gov>"
class FileMaterialsBuilder(AbstractBuilder):
def __init__(self, materials_write, data_file, delimiter=",", header_lines=0):
"""
Updates the database using a data file. Format of file must be:
<material_id or formula>, <property>, <value>
Comment lines should *start* with '#'.
Args:
materials_write: mongodb collection for materials (write access needed)
data_file (str): path to data file
delimiter (str): delimiter for file parsing
header_lines (int): number of header lines to skip in data file
"""
self._materials = materials_write
self._data_file = data_file
self._delimiter = delimiter
self.header_lines = header_lines
def run(self):
logger.info("Starting FileMaterials Builder.")
with open(self._data_file) as f:
line_no = 0
lines = [line for line in f] # only good for smaller files
pbar = tqdm(lines)
for line in pbar:
line = line.strip()
if line and not line.startswith("#"):
line_no += 1
if line_no > self.header_lines:
line = line.split(self._delimiter)
if "-" in line[0]:
search_val = line[0]
search_key = "material_id"
else:
search_key = "formula_reduced_abc"
search_val = Composition(
line[0]
).reduced_composition.alphabetical_formula
key = line[1]
val = line[2]
try:
val = float(val)
except Exception:
pass
self._materials.update(
{search_key: search_val}, {"$set": {key: val}}
)
logger.info("FileMaterials Builder finished processing")
def reset(self):
logger.warning("Cannot reset FileMaterials Builder!")
@classmethod
def from_file(cls, db_file, data_file=None, m="materials", **kwargs):
"""
Get a FileMaterialsBuilder using only a db file.
Args:
db_file (str): path to db file
data_file (str): path to data file
m (str): name of "materials" collection
**kwargs: other parameters to feed into the builder, e.g. mapi_key
"""
db_write = get_database(db_file, admin=True)
if data_file:
return cls(db_write[m], data_file, **kwargs)
else:
raise ValueError("data_file must be provided")
| 36.452381
| 83
| 0.536577
|
14c018900b7ce5ce1932e16127e786f2e297b8fc
| 13,975
|
py
|
Python
|
napari/utils/events/evented_model.py
|
chili-chiu/napari
|
eb6e672975ce105ac0125f71da3d0970d17cefb9
|
[
"BSD-3-Clause"
] | 7
|
2018-07-03T17:35:46.000Z
|
2018-11-07T15:48:58.000Z
|
napari/utils/events/evented_model.py
|
chili-chiu/napari
|
eb6e672975ce105ac0125f71da3d0970d17cefb9
|
[
"BSD-3-Clause"
] | 120
|
2018-09-04T22:05:13.000Z
|
2019-03-02T01:13:57.000Z
|
napari/utils/events/evented_model.py
|
chili-chiu/napari
|
eb6e672975ce105ac0125f71da3d0970d17cefb9
|
[
"BSD-3-Clause"
] | 8
|
2018-09-04T21:48:26.000Z
|
2019-01-29T04:48:30.000Z
|
import operator
import sys
import warnings
from contextlib import contextmanager
from typing import Any, Callable, ClassVar, Dict, Set, Union
import numpy as np
from pydantic import BaseModel, PrivateAttr, main, utils
from ...utils.misc import pick_equality_operator
from ..translations import trans
from .event import EmitterGroup, Event
# encoders for non-napari specific field types. To declare a custom encoder
# for a napari type, add a `_json_encode` method to the class itself.
# it will be added to the model json_encoders in :func:`EventedMetaclass.__new__`
_BASE_JSON_ENCODERS = {np.ndarray: lambda arr: arr.tolist()}
@contextmanager
def no_class_attributes():
"""Context in which pydantic.main.ClassAttribute just passes value 2.
Due to a very annoying decision by PySide2, all class ``__signature__``
attributes may only be assigned **once**. (This seems to be regardless of
whether the class has anything to do with PySide2 or not). Furthermore,
the PySide2 ``__signature__`` attribute seems to break the python
descriptor protocol, which means that class attributes that have a
``__get__`` method will not be able to successfully retrieve their value
(instead, the descriptor object itself will be accessed).
This plays terribly with Pydantic, which assigns a ``ClassAttribute``
object to the value of ``cls.__signature__`` in ``ModelMetaclass.__new__``
in order to avoid masking the call signature of object instances that have
a ``__call__`` method (https://github.com/samuelcolvin/pydantic/pull/1466).
So, because we only get to set the ``__signature__`` once, this context
manager basically "opts-out" of pydantic's ``ClassAttribute`` strategy,
thereby directly setting the ``cls.__signature__`` to an instance of
``inspect.Signature``.
For additional context, see:
- https://github.com/napari/napari/issues/2264
- https://github.com/napari/napari/pull/2265
- https://bugreports.qt.io/browse/PYSIDE-1004
- https://codereview.qt-project.org/c/pyside/pyside-setup/+/261411
"""
if "PySide2" not in sys.modules:
yield
return
# monkey patch the pydantic ClassAttribute object
# the second argument to ClassAttribute is the inspect.Signature object
def _return2(x, y):
return y
main.ClassAttribute = _return2
try:
yield
finally:
# undo our monkey patch
main.ClassAttribute = utils.ClassAttribute
class EventedMetaclass(main.ModelMetaclass):
"""pydantic ModelMetaclass that preps "equality checking" operations.
A metaclass is the thing that "constructs" a class, and ``ModelMetaclass``
is where pydantic puts a lot of it's type introspection and ``ModelField``
creation logic. Here, we simply tack on one more function, that builds a
``cls.__eq_operators__`` dict which is mapping of field name to a function
that can be called to check equality of the value of that field with some
other object. (used in ``EventedModel.__eq__``)
This happens only once, when an ``EventedModel`` class is created (and not
when each instance of an ``EventedModel`` is instantiated).
"""
def __new__(mcs, name, bases, namespace, **kwargs):
with no_class_attributes():
cls = super().__new__(mcs, name, bases, namespace, **kwargs)
cls.__eq_operators__ = {}
for n, f in cls.__fields__.items():
cls.__eq_operators__[n] = pick_equality_operator(f.type_)
# If a field type has a _json_encode method, add it to the json
# encoders for this model.
# NOTE: a _json_encode field must return an object that can be
# passed to json.dumps ... but it needn't return a string.
if hasattr(f.type_, '_json_encode'):
encoder = f.type_._json_encode
cls.__config__.json_encoders[f.type_] = encoder
# also add it to the base config
# required for pydantic>=1.8.0 due to:
# https://github.com/samuelcolvin/pydantic/pull/2064
EventedModel.__config__.json_encoders[f.type_] = encoder
# check for @_.setters defined on the class, so we can allow them
# in EventedModel.__setattr__
cls.__property_setters__ = {}
for name, attr in namespace.items():
if isinstance(attr, property) and attr.fset is not None:
cls.__property_setters__[name] = attr
cls.__field_dependents__ = _get_field_dependents(cls)
return cls
def _get_field_dependents(cls: 'EventedModel') -> Dict[str, Set[str]]:
"""Return mapping of field name -> dependent set of property names.
Dependencies may be declared in the Model Config to emit an event
for a computed property when a model field that it depends on changes
e.g. (@property 'c' depends on model fields 'a' and 'b')
Examples
--------
class MyModel(EventedModel):
a: int = 1
b: int = 1
@property
def c(self) -> List[int]:
return [self.a, self.b]
@c.setter
def c(self, val: Sequence[int]):
self.a, self.b = val
class Config:
dependencies={'c': ['a', 'b']}
"""
if not cls.__property_setters__:
return {}
deps: Dict[str, Set[str]] = {}
_deps = getattr(cls.__config__, 'dependencies', None)
if _deps:
for prop, fields in _deps.items():
if prop not in cls.__property_setters__:
raise ValueError(
'Fields with dependencies must be property.setters. '
f'{prop!r} is not.'
)
for field in fields:
if field not in cls.__fields__:
warnings.warn(f"Unrecognized field dependency: {field}")
deps.setdefault(field, set()).add(prop)
else:
# if dependencies haven't been explicitly defined, we can glean
# them from the property.fget code object:
for prop, setter in cls.__property_setters__.items():
for name in setter.fget.__code__.co_names:
if name in cls.__fields__:
deps.setdefault(name, set()).add(prop)
return deps
class EventedModel(BaseModel, metaclass=EventedMetaclass):
"""A Model subclass that emits an event whenever a field value is changed.
Note: As per the standard pydantic behavior, default Field values are
not validated (#4138) and should be correctly typed.
"""
# add private attributes for event emission
_events: EmitterGroup = PrivateAttr(default_factory=EmitterGroup)
# mapping of name -> property obj for methods that are property setters
__property_setters__: ClassVar[Dict[str, property]]
# mapping of field name -> dependent set of property names
# when field is changed, an event for dependent properties will be emitted.
__field_dependents__: ClassVar[Dict[str, Set[str]]]
__eq_operators__: ClassVar[Dict[str, Callable[[Any, Any], bool]]]
__slots__: ClassVar[Set[str]] = {"__weakref__"} # type: ignore
# pydantic BaseModel configuration. see:
# https://pydantic-docs.helpmanual.io/usage/model_config/
class Config:
# whether to allow arbitrary user types for fields (they are validated
# simply by checking if the value is an instance of the type). If
# False, RuntimeError will be raised on model declaration
arbitrary_types_allowed = True
# whether to perform validation on assignment to attributes
validate_assignment = True
# whether to treat any underscore non-class var attrs as private
# https://pydantic-docs.helpmanual.io/usage/models/#private-model-attributes
underscore_attrs_are_private = True
# whether to validate field defaults (default: False)
# see https://github.com/napari/napari/pull/4138 before changing.
validate_all = False
# https://pydantic-docs.helpmanual.io/usage/exporting_models/#modeljson
# NOTE: json_encoders are also added EventedMetaclass.__new__ if the
# field declares a _json_encode method.
json_encoders = _BASE_JSON_ENCODERS
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._events.source = self
# add event emitters for each field which is mutable
event_names = [
name
for name, field in self.__fields__.items()
if field.field_info.allow_mutation
]
event_names.extend(self.__property_setters__)
self._events.add(**dict.fromkeys(event_names))
def _super_setattr_(self, name: str, value: Any) -> None:
# pydantic will raise a ValueError if extra fields are not allowed
# so we first check to see if this field has a property.setter.
# if so, we use it instead.
if name in self.__property_setters__:
self.__property_setters__[name].fset(self, value)
else:
super().__setattr__(name, value)
def __setattr__(self, name: str, value: Any) -> None:
if name not in getattr(self, 'events', {}):
# fallback to default behavior
self._super_setattr_(name, value)
return
# grab current value
before = getattr(self, name, object())
# set value using original setter
self._super_setattr_(name, value)
# if different we emit the event with new value
after = getattr(self, name)
are_equal = self.__eq_operators__.get(name, operator.eq)
if not are_equal(after, before):
getattr(self.events, name)(value=after) # emit event
# emit events for any dependent computed property setters as well
for dep in self.__field_dependents__.get(name, {}):
getattr(self.events, dep)(value=getattr(self, dep))
# expose the private EmitterGroup publically
@property
def events(self) -> EmitterGroup:
return self._events
@property
def _defaults(self):
return get_defaults(self)
def reset(self):
"""Reset the state of the model to default values."""
for name, value in self._defaults.items():
if isinstance(value, EventedModel):
getattr(self, name).reset()
elif (
self.__config__.allow_mutation
and self.__fields__[name].field_info.allow_mutation
):
setattr(self, name, value)
def update(
self, values: Union['EventedModel', dict], recurse: bool = True
) -> None:
"""Update a model in place.
Parameters
----------
values : dict, napari.utils.events.EventedModel
Values to update the model with. If an EventedModel is passed it is
first converted to a dictionary. The keys of this dictionary must
be found as attributes on the current model.
recurse : bool
If True, recursively update fields that are EventedModels.
Otherwise, just update the immediate fields of this EventedModel,
which is useful when the declared field type (e.g. ``Union``) can have
different realized types with different fields.
"""
if isinstance(values, self.__class__):
values = values.dict()
if not isinstance(values, dict):
raise ValueError(
trans._(
"Unsupported update from {values}",
deferred=True,
values=type(values),
)
)
with self.events.blocker() as block:
for key, value in values.items():
field = getattr(self, key)
if isinstance(field, EventedModel) and recurse:
field.update(value, recurse=recurse)
else:
setattr(self, key, value)
if block.count:
self.events(Event(self))
def __eq__(self, other) -> bool:
"""Check equality with another object.
We override the pydantic approach (which just checks
``self.dict() == other.dict()``) to accommodate more complicated types
like arrays, whose truth value is often ambiguous. ``__eq_operators__``
is constructed in ``EqualityMetaclass.__new__``
"""
if not isinstance(other, EventedModel):
return self.dict() == other
for f_name, eq in self.__eq_operators__.items():
if f_name not in other.__eq_operators__:
return False
if (
hasattr(self, f_name)
and hasattr(other, f_name)
and not eq(getattr(self, f_name), getattr(other, f_name))
):
return False
return True
@contextmanager
def enums_as_values(self, as_values: bool = True):
"""Temporarily override how enums are retrieved.
Parameters
----------
as_values : bool, optional
Whether enums should be shown as values (or as enum objects),
by default `True`
"""
null = object()
before = getattr(self.Config, 'use_enum_values', null)
self.Config.use_enum_values = as_values
try:
yield
finally:
if before is not null:
self.Config.use_enum_values = before
else:
delattr(self.Config, 'use_enum_values')
def get_defaults(obj: BaseModel):
"""Get possibly nested default values for a Model object."""
dflt = {}
for k, v in obj.__fields__.items():
d = v.get_default()
if d is None and isinstance(v.type_, main.ModelMetaclass):
d = get_defaults(v.type_)
dflt[k] = d
return dflt
| 39.701705
| 84
| 0.632558
|
c4640e711161c9c853adfba25701bfdeffebbf13
| 170
|
py
|
Python
|
docs/api-examples-source/widget.selectbox.py
|
Camilo-Mendoza/streamlit-ML
|
be8aafdf9f334b92a6e056e6c4f994da82587f80
|
[
"Apache-2.0"
] | null | null | null |
docs/api-examples-source/widget.selectbox.py
|
Camilo-Mendoza/streamlit-ML
|
be8aafdf9f334b92a6e056e6c4f994da82587f80
|
[
"Apache-2.0"
] | 9
|
2021-03-01T20:47:52.000Z
|
2022-02-12T20:49:50.000Z
|
docs/api-examples-source/widget.selectbox.py
|
Camilo-Mendoza/streamlit-ML
|
be8aafdf9f334b92a6e056e6c4f994da82587f80
|
[
"Apache-2.0"
] | null | null | null |
import streamlit as st
option = st.selectbox(
'How would you like to be contacted?',
('Email', 'Home phone', 'Mobile phone'))
st.write('You selected:', option)
| 21.25
| 44
| 0.664706
|
313c5625129b7c2fa4bf487115157cc796a548bd
| 2,401
|
py
|
Python
|
ensemble.py
|
JobQiu/kaggle_bowl18
|
cc62b5d406b0accbdbcf2357b70ee377e0344b3b
|
[
"MIT"
] | null | null | null |
ensemble.py
|
JobQiu/kaggle_bowl18
|
cc62b5d406b0accbdbcf2357b70ee377e0344b3b
|
[
"MIT"
] | null | null | null |
ensemble.py
|
JobQiu/kaggle_bowl18
|
cc62b5d406b0accbdbcf2357b70ee377e0344b3b
|
[
"MIT"
] | null | null | null |
import model as modellib
import pandas as pd
import cv2
import os
import numpy as np
from tqdm import tqdm
from inference_config import inference_config,inference_config101
from bowl_dataset import BowlDataset
from utils import rle_encode, rle_decode, rle_to_string
import functions as f
from u_net import *
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
model_path = 'weights/mask_rcnn_1.h5'
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
model2 = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
model2_path = 'weights/mask_rcnn_2.h5'
model2.load_weights(model2_path, by_name=True)
model_res101 = modellib.MaskRCNN(mode="inference",
config=inference_config101,
model_dir=MODEL_DIR)
model101_path = 'weights/mask_rcnn_101.h5'
model_res101.load_weights(model101_path, by_name=True)
u_net = get_unet()
u_net.load_weights('u-net/u-net.h5')
dataset_test = BowlDataset()
dataset_test.load_bowl('stage2_test_final')
dataset_test.prepare()
output = []
sample_submission = pd.read_csv('stage2_sample_submission_final.csv')
ImageId = []
EncodedPixels = []
print('start predicting')
for image_id in tqdm(sample_submission.ImageId):
image_path = os.path.join('stage2_test_final', image_id, 'images', image_id + '.png')
original_image = cv2.imread(image_path)
results = model.detect([original_image], verbose=0, probablymask=True)
results2 = model2.detect([original_image], verbose=0, probablymask=True)
results101 = model_res101.detect([original_image], verbose=0, probablymask=True)
temp = []
temp.append(original_image)
u_net.predict(np.array(temp))
r = results[0]
masks = r['masks']
probablymasks = r['probablymasks']
ImageId_batch, EncodedPixels_batch = f.numpy2encoding_no_overlap2(masks, image_id, r['scores'])
ImageId += ImageId_batch
EncodedPixels += EncodedPixels_batch
f.write2csv('submission_v2.csv', ImageId, EncodedPixels)
| 30.392405
| 99
| 0.723032
|
38ccbf34a904a78b83d817bedc56491d3114b009
| 1,467
|
py
|
Python
|
neurolang/probabilistic/cplogic/tests/test_problog.py
|
hndgzkn/NeuroLang
|
a3178d47f80bc0941440d9bb09e06c2f217b9566
|
[
"BSD-3-Clause"
] | 1
|
2021-01-07T02:00:22.000Z
|
2021-01-07T02:00:22.000Z
|
neurolang/probabilistic/cplogic/tests/test_problog.py
|
hndgzkn/NeuroLang
|
a3178d47f80bc0941440d9bb09e06c2f217b9566
|
[
"BSD-3-Clause"
] | 207
|
2020-11-04T12:51:10.000Z
|
2022-03-30T13:42:26.000Z
|
neurolang/probabilistic/cplogic/tests/test_problog.py
|
hndgzkn/NeuroLang
|
a3178d47f80bc0941440d9bb09e06c2f217b9566
|
[
"BSD-3-Clause"
] | 6
|
2020-11-04T13:59:35.000Z
|
2021-03-19T05:28:10.000Z
|
import problog.core
import problog.logic
import problog.sdd_formula
from ....datalog.expressions import Fact
from ....expressions import Constant, Symbol
from ....logic import Implication
from ..problog_solver import cplogic_to_problog
from ..program import CPLogicProgram
P = Symbol("P")
a = Constant("a")
def test_convert_cpl_to_pl():
cpl_program = CPLogicProgram()
cpl_program.add_probabilistic_facts_from_tuples(
P, {(0.2, "a"), (1.0, "b")}
)
pl = cplogic_to_problog(cpl_program)
query = problog.logic.Term("query")
query_pred = problog.logic.Term("P", problog.logic.Var("v"))
pl += query(query_pred)
res = problog.core.ProbLog.convert(pl, problog.sdd_formula.SDD).evaluate()
expected = {
problog.logic.Term("P")(problog.logic.Constant("a")): 0.2,
problog.logic.Term("P")(problog.logic.Constant("b")): 1.0,
}
assert res == expected
def test_zero_arity():
cpl_program = CPLogicProgram()
cpl_program.walk(Fact(P()))
cplogic_to_problog(cpl_program)
cpl_program = CPLogicProgram()
cpl_program.walk(Fact(P(a)))
cpl_program.walk(Implication(Symbol("yes")(), P(a)))
pl = cplogic_to_problog(cpl_program)
query = problog.logic.Term("query")
query_pred = problog.logic.Term("yes")
pl += query(query_pred)
res = problog.core.ProbLog.convert(pl, problog.sdd_formula.SDD).evaluate()
expected = {problog.logic.Term("yes"): 1.0}
assert res == expected
| 31.891304
| 78
| 0.68848
|
517fbbef43f7d5a5f1a855971dfa18ed564385d7
| 3,771
|
py
|
Python
|
fastparquet/test/test_pd_optional_types.py
|
lithomas1/fastparquet
|
089a592ebf9eca72b7ef16134d89749ff5454936
|
[
"Apache-2.0"
] | null | null | null |
fastparquet/test/test_pd_optional_types.py
|
lithomas1/fastparquet
|
089a592ebf9eca72b7ef16134d89749ff5454936
|
[
"Apache-2.0"
] | null | null | null |
fastparquet/test/test_pd_optional_types.py
|
lithomas1/fastparquet
|
089a592ebf9eca72b7ef16134d89749ff5454936
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import fastparquet as fp
from fastparquet.test.util import tempdir
from fastparquet import write, parquet_thrift
from fastparquet.parquet_thrift.parquet import ttypes as tt
import numpy.random as random
EXPECTED_SERIES_INT8 = pd.Series(random.uniform(low=-128, high=127,size=100)).round()
EXPECTED_SERIES_INT16 = pd.Series(random.uniform(low=-32768, high=32767,size=100)).round()
EXPECTED_SERIES_INT32 = pd.Series(random.uniform(low=-2147483648, high=2147483647,size=100)).round()
EXPECTED_SERIES_INT64 = pd.Series(random.uniform(low=-9223372036854775808, high=9223372036854775807,size=100)).round()
EXPECTED_SERIES_UINT8 = pd.Series(random.uniform(low=0, high=255,size=100)).round()
EXPECTED_SERIES_UINT16 = pd.Series(random.uniform(low=0, high=65535,size=100)).round()
EXPECTED_SERIES_UINT32 = pd.Series(random.uniform(low=0, high=4294967295,size=100)).round()
EXPECTED_SERIES_UINT64 = pd.Series(random.uniform(low=0, high=18446744073709551615,size=100)).round()
EXPECTED_SERIES_BOOL = pd.Series(random.choice([False, True], 100))
EXPECTED_SERIES_STRING = pd.Series(random.choice([
'You', 'are', 'my', 'fire',
'The', 'one', 'desire',
'Believe', 'when', 'I', 'say',
'I', 'want', 'it', 'that', 'way'
], 100))
EXPECTED_SERIES_INT8.loc[20:30] = np.nan
EXPECTED_SERIES_INT16.loc[20:30] = np.nan
EXPECTED_SERIES_INT32.loc[20:30] = np.nan
EXPECTED_SERIES_INT64.loc[20:30] = np.nan
EXPECTED_SERIES_UINT8.loc[20:30] = np.nan
EXPECTED_SERIES_UINT16.loc[20:30] = np.nan
EXPECTED_SERIES_UINT32.loc[20:30] = np.nan
EXPECTED_SERIES_UINT64.loc[20:30] = np.nan
EXPECTED_SERIES_BOOL.loc[20:30] = np.nan
EXPECTED_SERIES_STRING.loc[20:30] = np.nan
TEST = pd.DataFrame({
'int8': EXPECTED_SERIES_INT8.astype('Int8'),
'int16': EXPECTED_SERIES_INT16.astype('Int16'),
'int32': EXPECTED_SERIES_INT32.astype('Int32'),
'int64': EXPECTED_SERIES_INT64.astype('Int64'),
'uint8': EXPECTED_SERIES_UINT8.astype('UInt8'),
'uint16': EXPECTED_SERIES_UINT16.astype('UInt16'),
'uint32': EXPECTED_SERIES_UINT32.astype('UInt32'),
'uint64': EXPECTED_SERIES_UINT64.astype('UInt64'),
'bool': EXPECTED_SERIES_BOOL.astype('boolean'),
'string': EXPECTED_SERIES_STRING.astype('string')
})
EXPECTED = pd.DataFrame({
'int8': EXPECTED_SERIES_INT8.astype('float16'),
'int16': EXPECTED_SERIES_INT16.astype('float32'),
'int32': EXPECTED_SERIES_INT32.astype('float64'),
'int64': EXPECTED_SERIES_INT64.astype('float64'),
'uint8': EXPECTED_SERIES_UINT8.astype('float16'),
'uint16': EXPECTED_SERIES_UINT16.astype('float32'),
'uint32': EXPECTED_SERIES_UINT32.astype('float64'),
'uint64': EXPECTED_SERIES_UINT64.astype('float64'),
'bool': EXPECTED_SERIES_BOOL.astype('float16'),
'string': EXPECTED_SERIES_STRING
})
EXPECTED_PARQUET_TYPES = {
'int8': 'INT32',
'int16': 'INT32',
'int32': 'INT32',
'int64': 'INT64',
'uint8': 'INT32',
'uint16': 'INT32',
'uint32': 'INT32',
'uint64': 'INT64',
'bool': 'BOOLEAN',
'string': 'BYTE_ARRAY'
}
@pytest.mark.parametrize('comp', (None,'snappy', 'gzip'))
@pytest.mark.parametrize('scheme', ('simple', 'hive'))
def test_write_nullable_columns(tempdir, scheme, comp):
fname = os.path.join(tempdir, 'test_write_nullable_columns.parquet')
write(fname, TEST, file_scheme=scheme, compression=comp)
pf = fp.ParquetFile(fname)
df = pf.to_pandas()
pq_types = {
se.name: tt.Type._VALUES_TO_NAMES[se.type]
for se in pf.schema.schema_elements
if se.type is not None
}
assert_frame_equal(EXPECTED, df, check_index_type=False, check_dtype=False)
assert pq_types == EXPECTED_PARQUET_TYPES
| 38.876289
| 118
| 0.72315
|
5396740e2baba123b54f1cec8f2f28868c4fcffb
| 5,042
|
py
|
Python
|
utility/db.py
|
Andinoriel/diploma
|
a17060b1f0d54e03ba915e8c483bdb3df9e8787d
|
[
"MIT"
] | 3
|
2021-05-01T10:57:01.000Z
|
2021-05-06T16:36:35.000Z
|
utility/db.py
|
andinoriel/diploma
|
a17060b1f0d54e03ba915e8c483bdb3df9e8787d
|
[
"MIT"
] | null | null | null |
utility/db.py
|
andinoriel/diploma
|
a17060b1f0d54e03ba915e8c483bdb3df9e8787d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# =================================================================
#
# MODULE: utility:db
# LOCAL ENTRY POINT: .
#
# utility
# |-- common.sh
# |-- db.py *CURRENT*
# |-- utility.sh
#
# COMMENT: python DB log accessor
#
# =================================================================
import argparse
import datetime
import sqlite3
class DB:
conn = None
@staticmethod
def connect(path):
DB.conn = sqlite3.connect(path)
@staticmethod
def cursor():
return DB.conn.cursor()
@staticmethod
def close():
DB.conn.close()
@staticmethod
def init_db():
query = f'''
CREATE TABLE IF NOT EXISTS diploma_module (
id INTEGER PRIMARY KEY,
name TEXT,
description TEXT,
UNIQUE(name)
)
'''
DB.cursor().execute(query)
query = f'''
CREATE TABLE IF NOT EXISTS diploma_log (
id INTEGER PRIMARY KEY,
datetime DATETIME,
message TEXT,
status TEXT,
module_id INTEGER,
FOREIGN KEY(module_id) REFERENCES diploma_module(id)
)
'''
DB.cursor().execute(query)
DB.conn.commit()
@staticmethod
def log(module, datetime, message, status):
query = f'''
INSERT OR IGNORE INTO diploma_module(name) VALUES('{module}')
'''
DB.cursor().execute(query)
query = f'''
INSERT INTO diploma_log (module_id,datetime,message,status)
SELECT id,'{datetime}','{message}','{status}' FROM diploma_module
WHERE name='{module}'
'''
DB.cursor().execute(query)
DB.conn.commit()
@staticmethod
def get_all():
cursor = DB.cursor()
query = f'''
SELECT * FROM diploma_log
ORDER BY datetime DESC
'''
cursor.execute(query)
return cursor.fetchall()
@staticmethod
def get_by_date_range(date_start, date_end):
cursor = DB.cursor()
query = f'''
SELECT * FROM diploma_log
WHERE datetime BETWEEN '{date_start}' AND '{date_end}'
ORDER BY datetime DESC
'''
cursor.execute(query)
return cursor.fetchall()
@staticmethod
def get_by_module(module):
cursor = DB.cursor()
query = f'''
SELECT * FROM diploma_log
WHERE module='{module}'
ORDER BY datetime DESC
'''
cursor.execute(query)
return cursor.fetchall()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="DBMS sqlite wrapper for diploma")
parser.add_argument('-f', '--file', help='path to database', type=str)
parser.add_argument('-l', '--log', help='save log to database', nargs='+')
parser.add_argument('-a', '--all', help='select all', action='store_true')
parser.add_argument('-d', '--by-date-range',
help='select by datetime range', nargs='+')
parser.add_argument('-m', '--by-module',
help='select by module', nargs='+')
args = parser.parse_args()
DB.connect(args.file)
DB.init_db()
if args.log:
module = args.log[0]
datetime = datetime.datetime.now().isoformat()
message = args.log[1]
status = args.log[2]
DB.log(module, datetime, message, status)
if args.by_date_range:
date_start = args.by_date_range[0]
date_end = args.by_date_range[1]
print('%-10s | %-30s | %-40s | %-10s' %
('id', 'module', 'datetime', 'status'))
print('-'*120)
for elem in DB.get_by_date_range(date_start, date_end):
print('-'*120)
print('%-10s | %-30s | %-40s | %-10s' %
(elem[0], elem[1], elem[2], elem[4]))
print('-'*120)
print(elem[3])
print('='*120)
print()
if args.by_module:
module = args.by_module[0]
print('\n' + '-'*120)
print(f'\n{module} - SUMMARY')
print('\n' + '-'*120)
print('%-10s | %-40s | %-10s' % ('id', 'datetime', 'status'))
print('-'*120)
for elem in DB.get_by_module(module):
print('-'*120)
print('%-10s | %-40s | %-10s' % (elem[0], elem[2], elem[4]))
print('-'*120)
print('\n' + elem[3] + '\n')
print('='*120)
print()
if args.all:
print('\n' + '-'*120)
print(f'\nSUMMARY')
print('\n' + '-'*120)
print('%-10s | %-30s | %-40s | %-10s' %
('id', 'module', 'datetime', 'status'))
print('-'*120)
for elem in DB.get_all():
print('-'*120)
print('%-10s | %-30s | %-40s | %-10s' %
(elem[0], elem[1], elem[2], elem[4]))
print('-'*120)
print('\n' + elem[3] + '\n')
print('='*120)
print()
else:
pass
| 26.536842
| 78
| 0.493257
|
59e89aba12dc2fbd739e7aa010d5d86c99a4f976
| 8,649
|
py
|
Python
|
tests/test_graph.py
|
altdeep/y0
|
3e9e8d47b08b51f64216000db31d8f4c0fd388a3
|
[
"BSD-3-Clause"
] | 1
|
2021-09-14T01:36:50.000Z
|
2021-09-14T01:36:50.000Z
|
tests/test_graph.py
|
altdeep/y0
|
3e9e8d47b08b51f64216000db31d8f4c0fd388a3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_graph.py
|
altdeep/y0
|
3e9e8d47b08b51f64216000db31d8f4c0fd388a3
|
[
"BSD-3-Clause"
] | 1
|
2021-09-14T01:36:57.000Z
|
2021-09-14T01:36:57.000Z
|
# -*- coding: utf-8 -*-
"""Test graph construction and conversion."""
import unittest
from textwrap import dedent
from typing import Set, Tuple
import networkx as nx
from ananke.graphs import ADMG
from y0.examples import verma_1
from y0.graph import DEFAULT_TAG, DEFULT_PREFIX, NxMixedGraph
from y0.resources import VIRAL_PATHOGENESIS_PATH
class TestGraph(unittest.TestCase):
"""Test graph construction and conversion."""
def setUp(self) -> None:
"""Set up the test case."""
self.addTypeEqualityFunc(NxMixedGraph, self.assert_graph_equal)
def assert_graph_equal(self, a: NxMixedGraph, b: NxMixedGraph, msg=None) -> None:
"""Check the graphs are equal (more nice than the builtin :meth:`NxMixedGraph.__eq__` for testing)."""
self.assertEqual(set(a.directed.nodes()), set(b.directed.nodes()), msg=msg)
self.assertEqual(set(a.undirected.nodes()), set(b.undirected.nodes()), msg=msg)
self.assertEqual(set(a.directed.edges()), set(b.directed.edges()), msg=msg)
self.assertEqual(
set(map(frozenset, a.undirected.edges())),
set(map(frozenset, b.undirected.edges())),
msg=msg,
)
def test_causaleffect_str_verma_1(self):
"""Test generating R code for the figure 1A graph for causaleffect."""
expected = dedent(
"""
g <- graph.formula(V1 -+ V2, V2 -+ V3, V3 -+ V4, V2 -+ V4, V4 -+ V2, simplify = FALSE)
g <- set.edge.attribute(graph = g, name = "description", index = c(4, 5), value = "U")
"""
).strip()
self.assertEqual(expected, verma_1.to_causaleffect_str())
def assert_labeled_convertable(
self, graph: NxMixedGraph, labeled_edges: Set[Tuple[str, str]]
) -> None:
"""Test that the graph can be converted to a DAG, then back to an ADMG."""
prefix = DEFULT_PREFIX
tag = DEFAULT_TAG
labeled_dag = graph.to_latent_variable_dag(prefix=prefix, tag=tag)
for node in labeled_dag:
self.assertIn(tag, labeled_dag.nodes[node], msg=f"Node: {node}")
self.assertEqual(node.startswith(prefix), labeled_dag.nodes[node][tag])
self.assertEqual(labeled_edges, set(labeled_dag.edges()))
reconstituted = NxMixedGraph.from_latent_variable_dag(labeled_dag, tag=tag)
self.assertEqual(graph, reconstituted)
def test_convertable(self):
"""Test graphs are convertable."""
for graph, labeled_edges in [
(
verma_1,
{
("V1", "V2"),
("V2", "V3"),
("V3", "V4"),
(f"{DEFULT_PREFIX}0", "V2"),
(f"{DEFULT_PREFIX}0", "V4"),
},
),
]:
with self.subTest():
self.assert_labeled_convertable(graph, labeled_edges)
def test_from_causalfusion(self):
"""Test importing a CausalFusion graph."""
graph = NxMixedGraph.from_causalfusion_path(VIRAL_PATHOGENESIS_PATH)
self.assertIsInstance(graph, NxMixedGraph)
def test_from_admg(self):
"""Test that all ADMGs can be converted to NxMixedGraph."""
expected = NxMixedGraph.from_adj(
directed={"W": [], "X": ["Y"], "Y": ["Z"], "Z": []},
undirected={"W": [], "X": ["Z"], "Y": [], "Z": []},
)
admg = ADMG(
vertices=["W", "X", "Y", "Z"],
di_edges=[["X", "Y"], ["Y", "Z"]],
bi_edges=[["X", "Z"]],
)
self.assertEqual(expected, NxMixedGraph.from_admg(admg))
def test_from_adj(self):
"""Test the adjacency graph is not a multigraph."""
directed = dict([("a", ["b", "c"]), ("b", ["a"]), ("c", [])])
expected = NxMixedGraph.from_edges(directed=[("a", "b"), ("a", "c"), ("b", "a")])
self.assertEqual(expected, NxMixedGraph.from_adj(directed=directed))
def test_is_acyclic(self):
"""Test the directed edges are acyclic."""
example = NxMixedGraph.from_edges(directed=[("a", "b"), ("a", "c"), ("b", "a")])
self.assertFalse(nx.algorithms.dag.is_directed_acyclic_graph(example.directed))
def test_is_not_multigraph(self):
"""Test the undirected edges are not inverses of each other."""
redundant_edges = [("a", "b"), ("b", "a")]
directed_edges = [("a", "b")]
expected = NxMixedGraph.from_edges(directed=[("a", "b")], undirected=[("a", "b")])
actual = NxMixedGraph.from_edges(directed=directed_edges, undirected=redundant_edges)
self.assertEqual(expected, actual)
def test_subgraph(self):
"""Test generating a subgraph from a set of vertices."""
graph = NxMixedGraph()
graph.add_directed_edge("X", "Y")
graph.add_directed_edge("Y", "Z")
graph.add_undirected_edge("X", "Z")
self.assertEqual(graph, graph.subgraph({"X", "Y", "Z"}))
subgraph = NxMixedGraph()
subgraph.add_directed_edge("X", "Y")
self.assertEqual(subgraph, graph.subgraph({"X", "Y"}))
def test_intervention(self):
"""Test generating a subgraph based on an intervention."""
graph = NxMixedGraph()
graph.add_directed_edge("X", "Y")
graph.add_directed_edge("Z", "X")
graph.add_undirected_edge("X", "Z")
graph.add_undirected_edge("X", "Y")
graph.add_undirected_edge("Y", "Z")
self.assertEqual(graph, graph.intervene(set()))
intervened_graph = NxMixedGraph()
intervened_graph.add_directed_edge("X", "Y")
intervened_graph.add_undirected_edge("Z", "Y")
self.assertEqual(intervened_graph, graph.intervene({"X"}))
def test_remove_nodes_from(self):
"""Test generating a new graph without the given nodes."""
graph = NxMixedGraph()
graph.add_directed_edge("X", "Y")
graph.add_directed_edge("Z", "X")
graph.add_undirected_edge("X", "Z")
graph.add_undirected_edge("X", "Y")
graph.add_undirected_edge("Y", "Z")
self.assertEqual(graph, graph.remove_nodes_from(set()))
subgraph = NxMixedGraph()
subgraph.add_undirected_edge("Z", "Y")
self.assertEqual(subgraph, graph.remove_nodes_from({"X"}))
def test_remove_outgoing_edges_from(self):
"""Test generating a new graph without the outgoing edgs from the given nodes."""
graph = NxMixedGraph()
graph.add_directed_edge("X", "Y")
self.assertEqual(graph, graph.remove_outgoing_edges_from(set()))
graph = NxMixedGraph()
graph.add_undirected_edge("X", "Y")
self.assertEqual(graph, graph.remove_outgoing_edges_from(set()))
graph = NxMixedGraph()
graph.add_directed_edge("W", "X")
graph.add_directed_edge("X", "Y")
graph.add_directed_edge("Y", "Z")
expected = NxMixedGraph()
expected.add_node("X")
expected.add_directed_edge("W", "X")
expected.add_directed_edge("Y", "Z")
self.assertEqual(expected, graph.remove_outgoing_edges_from({"X"}))
def test_ancestors_inclusive(self):
"""Test getting ancestors, inclusive."""
graph = NxMixedGraph()
graph.add_directed_edge("C", "A")
graph.add_directed_edge("C", "B")
graph.add_directed_edge("D", "C")
graph.add_directed_edge("A", "X")
graph.add_directed_edge("A", "Y")
graph.add_directed_edge("B", "Z")
self.assertEqual({"A", "B", "C", "D"}, graph.ancestors_inclusive({"A", "B"}))
graph = NxMixedGraph()
graph.add_directed_edge("X", "Z")
graph.add_directed_edge("Z", "Y")
graph.add_undirected_edge("X", "Y")
self.assertEqual({"X", "Y", "Z"}, graph.ancestors_inclusive({"Y"}))
self.assertEqual({"X", "Z"}, graph.ancestors_inclusive({"Z"}))
self.assertEqual({"X"}, graph.ancestors_inclusive({"X"}))
def test_get_c_components(self):
"""Test that get_c_components works correctly."""
g1 = NxMixedGraph().from_edges(directed=[("X", "Y"), ("Z", "X"), ("Z", "Y")])
c1 = [frozenset(["X"]), frozenset(["Y"]), frozenset(["Z"])]
g2 = NxMixedGraph().from_edges(directed=[("X", "Y")], undirected=[("X", "Y")])
c2 = [frozenset(["X", "Y"])]
g3 = NxMixedGraph().from_edges(directed=[("X", "M"), ("M", "Y")], undirected=[("X", "Y")])
c3 = [frozenset(["X", "Y"]), frozenset(["M"])]
for graph, components in [(g1, c1), (g2, c2), (g3, c3)]:
self.assertIsInstance(graph, NxMixedGraph)
self.assertEqual(components, graph.get_c_components())
| 41.782609
| 110
| 0.594866
|
a598cd637c458da80382d89e9c24d4c6b567dd29
| 22,391
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/aio/operations/_load_balancers_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/aio/operations/_load_balancers_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/aio/operations/_load_balancers_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancersOperations:
"""LoadBalancersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.LoadBalancer":
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancer, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_08_01.models.LoadBalancer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "models.LoadBalancer",
**kwargs
) -> "models.LoadBalancer":
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LoadBalancer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "models.LoadBalancer",
**kwargs
) -> AsyncLROPoller["models.LoadBalancer"]:
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load balancer operation.
:type parameters: ~azure.mgmt.network.v2017_08_01.models.LoadBalancer
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LoadBalancer or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_08_01.models.LoadBalancer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.LoadBalancerListResult"]:
"""Gets all the load balancers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_08_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.LoadBalancerListResult"]:
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_08_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'} # type: ignore
| 48.049356
| 195
| 0.662811
|
30cacbdd070dfc6921a7d853332f74425cca7c62
| 1,470
|
py
|
Python
|
aliyun-python-sdk-cr/aliyunsdkcr/request/v20160607/GetRepoListRequest.py
|
LittleJober/aliyun-openapi-python-sdk
|
f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cr/aliyunsdkcr/request/v20160607/GetRepoListRequest.py
|
LittleJober/aliyun-openapi-python-sdk
|
f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-cr/aliyunsdkcr/request/v20160607/GetRepoListRequest.py
|
LittleJober/aliyun-openapi-python-sdk
|
f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class GetRepoListRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'cr', '2016-06-07', 'GetRepoList','acr')
self.set_uri_pattern('/repos')
self.set_method('GET')
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Page(self):
return self.get_query_params().get('Page')
def set_Page(self,Page):
self.add_query_param('Page',Page)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status)
| 32.666667
| 69
| 0.748299
|
b681fd955cb1f63d675b56b1590efb544ca05038
| 28,205
|
py
|
Python
|
Providers/Scripts/3.x/Scripts/nxFirewall.py
|
amitsara/PowerShell-DSC-for-Linux
|
22694d09f1fe61228210aae9bdd53b6f3da4c2d1
|
[
"MIT"
] | 2
|
2020-05-19T20:07:32.000Z
|
2020-08-08T00:58:15.000Z
|
Providers/Scripts/3.x/Scripts/nxFirewall.py
|
amitsara/PowerShell-DSC-for-Linux
|
22694d09f1fe61228210aae9bdd53b6f3da4c2d1
|
[
"MIT"
] | null | null | null |
Providers/Scripts/3.x/Scripts/nxFirewall.py
|
amitsara/PowerShell-DSC-for-Linux
|
22694d09f1fe61228210aae9bdd53b6f3da4c2d1
|
[
"MIT"
] | 4
|
2019-10-31T19:10:42.000Z
|
2022-03-15T07:42:03.000Z
|
#!/bin/env python
# =======================
# Copyright (c) Microsoft Corporation.
# All rights reserved.
# See license.txt for license information.
# =======================
import subprocess
import imp
import os
import sys
import socket
import re
from functools import reduce
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# [ClassVersion("1.0.0"), FriendlyName("nxFirewallResource")]
# class MSFT_nxFirewallResource:OMI_BaseResource
# {
# [Key] string Name;
# [Write] string InterfaceName;
# [Write] string FirewallType; # Iptables, Ip6tables, yast, \
# ufw, susefirewall2
# [Write ValueMap{"tcp", "udp", "icmp"}] string Protocol;
# [Write, {"Present", "Absent"},Values{"Present", "Absent"}] \
# string Ensure;
# [Write, ValueMap{"IPv4", "IPv6"},Values{"IPv4", "IPv6}] \
# string AddressFamily;
# [Write, ValueMap{"allow, block"},Values{"allow, block}] \
# string Access;
# [Write, ValueMap{"new, related, established"}] string State;
# [Write, ValueMap{"input, output, forward"},Values{"input, \
# output, forward}] string Direction;
# [Write, ValueMap{"top", "after-top", "before-end", "end"},\
# Values{"top", \
# "after-top", "before-end", "end"} string Position;
# [Write] string SourceHost;
# [Write] string SourcePort;
# [Write] string DestinationHost;
# [Write] string DestinationPort;
# };
def init_vars(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort):
if Name is None or Name == '':
print('Error: "Name" must be specified.', file=sys.stderr)
LG().Log('ERROR', 'Error: "Name" must be specified.')
raise Exception('Name must be specified.')
Name = Name
if InterfaceName is None or InterfaceName == '':
InterfaceName = 'eth0'
else:
InterfaceName = InterfaceName
if FirewallType is None or FirewallType == '':
print('Error: "FirewallType" must be specified.', file=sys.stderr)
LG().Log('ERROR', 'Error: "FirewallType" must be specified.')
raise Exception('FirewallType must be specified.')
FirewallType = FirewallType.lower()
if Protocol is None or Protocol == '':
Protocol = 'tcp'
Protocol = Protocol
if Ensure is None or Ensure == '':
Ensure = 'present'
Ensure = Ensure.lower()
if AddressFamily is None or AddressFamily == '':
AddressFamily = 'ipv4'
AddressFamily = AddressFamily.lower()
if Access is None or Access == '':
print(
'Error: "Access" must be specified.', \
file=sys.stderr)
LG().Log(
'ERROR', 'Error: "Access" must be specified.')
raise Exception('Access must be specified.')
Access = Access.lower()
if State is None:
State = ''
if Position is None or Position == '':
Position = 'top'
Position = Position
if SourceHost is None:
SourceHost = ''
else :
SourceHost = SourceHost
if ValidateAddress(SourceHost, AddressFamily) is False:
print(
'Error: Invalid address for "SourceHost".', file=sys.stderr)
LG().Log('ERROR', 'Error: Invalid address for "SourceHost".')
raise Exception('Error: Invalid address for "SourceHost".')
if AddressFamily == 'ipv6': # ip6tables only looks upto the first ':'
if '/' in SourceHost:
pfx=SourceHost.split('/')[1]
SourceHost = SourceHost.split(':')[0]+'::/'+pfx
else:
SourceHost = SourceHost.split(':')[0]+'::'
if SourcePort is None:
SourcePort = ''
else :
SourcePort = SourcePort
if ValidatePort(SourcePort) is False:
print(
'Error: Invalid address for "SourcePort".', file=sys.stderr)
LG().Log('ERROR', 'Error: Invalid address for "SourcePort".')
raise Exception('Error: Invalid address for "SourcePort".')
if DestinationHost is None:
DestinationHost = ''
else :
DestinationHost = DestinationHost
if ValidateAddress(DestinationHost, AddressFamily) is False:
print(
'Error: Invalid address for "DestinationHost".', file=sys.stderr)
LG().Log('ERROR', 'Error: Invalid address for "DestinationHost".')
raise Exception('Error: Invalid address for "DestinationHost".')
if AddressFamily == 'ipv6': # ip6tables only looks upto the first ':'
if '/' in DestinationHost:
pfx=DestinationHost.split('/')[1]
DestinationHost = DestinationHost.split(':')[0]+'::/'+pfx
else:
DestinationHost = DestinationHost.split(':')[0]+'::'
if DestinationPort is None:
DestinationPort = ''
else :
DestinationPort = DestinationPort
if ValidatePort(DestinationPort) is False:
print(
'Error: Invalid address for "DestinationPort".', file=sys.stderr)
LG().Log('ERROR', 'Error: Invalid address for "DestinationPort".')
raise Exception('Error: Invalid address for "DestinationPort".')
if Direction is None or Direction == '':
Direction = 'input'
Direction = Direction
return Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily, \
Access, State, Direction, Position, SourceHost, SourcePort, \
DestinationHost, DestinationPort
def Set_Marshall(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort):
(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort) = init_vars(Name, InterfaceName, FirewallType, Protocol, Ensure,
AddressFamily, Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort)
Rule = RuleBag(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort)
retval = Set(Rule)
return retval
def Test_Marshall(Name, InterfaceName, FirewallType, Protocol, Ensure,
AddressFamily, Access, State, Direction, Position, SourceHost,
SourcePort, DestinationHost, DestinationPort):
(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily, Access,
State, Direction, Position, SourceHost, SourcePort, DestinationHost,
DestinationPort) = init_vars(Name, InterfaceName, FirewallType, Protocol, Ensure,
AddressFamily, Access, State, Direction, Position, SourceHost,
SourcePort, DestinationHost, DestinationPort)
Rule = RuleBag(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort)
if Ensure == 'present':
if Test(Rule) == 0:
return [0]
else:
return [-1]
else:
if Test(Rule) == 0:
return [-1]
else:
return [0]
def Get_Marshall(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort):
arg_names = list(locals().keys())
(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily, Access, State,
Direction, Position, SourceHost, SourcePort, DestinationHost,
DestinationPort) = init_vars(Name, InterfaceName, FirewallType, Protocol, Ensure,
AddressFamily, Access, State, Direction, Position, SourceHost,
SourcePort, DestinationHost, DestinationPort)
Rule = RuleBag(Name, InterfaceName, FirewallType, Protocol, Ensure,
AddressFamily, Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort)
(Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort) = Get(Rule)
Name = protocol.MI_String(Name)
InterfaceName = protocol.MI_String(InterfaceName)
FirewallType = protocol.MI_String(FirewallType)
Protocol = protocol.MI_String(Protocol)
Ensure = protocol.MI_String(Ensure)
AddressFamily = protocol.MI_String(AddressFamily)
Access = protocol.MI_String(Access)
State = protocol.MI_StringA(State)
Direction = protocol.MI_String(Direction)
Position = protocol.MI_String(Position)
SourceHost = protocol.MI_String(SourceHost)
SourcePort = protocol.MI_String(SourcePort)
DestinationHost = protocol.MI_String(DestinationHost)
DestinationPort = protocol.MI_String(DestinationPort)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return 0, retd
# ############################
# ## Begin user defined DSC functions
# ############################
def RunGetOutput(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output.
Execute 'cmd'. Returns return code and STDOUT,
trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
def check_output(no_output, *popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
process = subprocess.Popen(stdout=out_file, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" \
% (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
output = ''
try:
output = subprocess.check_output(
no_output, cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
if chk_err:
print('CalledProcessError. Error Code is ' +
str(e.returncode), file=sys.stdout)
LG().Log('ERROR', 'CalledProcessError. Error Code is ' +
str(e.returncode))
print(
'CalledProcessError. Command string was ' + e.cmd, \
file=sys.stdout)
LG().Log('ERROR',
'CalledProcessError. Command string was ' + e.cmd, \
)
print('CalledProcessError. Command result was ' +
(e.output[:-1]).decode('ascii', 'ignore'), \
file=sys.stdout)
LG().Log('ERROR', 'CalledProcessError. Command result was ' +
(e.output[:-1]).decode('ascii', 'ignore'))
if no_output:
return e.returncode, None
else:
return e.returncode, e.output.decode('ascii', 'ignore')
if no_output:
return 0, None
else:
return 0, output.decode('ascii', 'ignore')
def ValidateAddress(IPAddress, AddressFamily):
if IPAddress == None or len(IPAddress) == 0: # allow empty or None.
return True
if ':' not in IPAddress and IPAddress[1].isalpha() == True: # dont try to validate a hostname.
return True
if '/' in IPAddress:
IPAddress=IPAddress.split('/')[0]
if 'ipv4' in AddressFamily:
ptype = socket.AF_INET
elif 'ipv6' in AddressFamily:
ptype = socket.AF_INET6
else:
return False
try:
socket.inet_pton(ptype, IPAddress)
except:
return False
return True
def ValidatePort(Port):
try:
socket.getaddrinfo(None, Port, 0, 0, socket.IPPROTO_TCP)
except:
return False
return True
def IsFirewallRunning(rule):
if rule.FirewallType == 'iptables':
code, out = RunGetOutput('iptables -L',False)
if code == 0:
return True
if rule.FirewallType == 'ip6tables':
code, out = RunGetOutput('ip6tables -L',False)
if code == 0:
return True
elif rule.FirewallType == 'firewalld':
code, out = RunGetOutput('ps -ef | grep -v grep | grep firewalld',False)
if code == 0:
return True
elif rule.FirewallType == 'ufw':
code, out = RunGetOutput(
'iptables -L | grep -v grep | grep ufw-before-input',False)
if code == 0:
return True
elif rule.FirewallType == 'yast' or rule.FirewallType == 'susefirewall2':
code, out = RunGetOutput('iptables -L | grep -v grep | grep SFW2',False)
if code == 0:
return True
return False
def RuleExists(rule):
if 'instancemethod' in repr(type(rule.cmds[rule.FirewallType]['check'])) :
return rule.cmds[rule.FirewallType]['check']()
if 'method' in repr(type(rule.cmds[rule.FirewallType]['check'])) :
return rule.cmds[rule.FirewallType]['check']()
print('REPR IS '+ repr(type(rule.cmds[rule.FirewallType]['check'])))
cmd = rule.fmt(rule.cmds[rule.FirewallType]['check'])
code, out = RunGetOutput(cmd,False)
print('Check rule exists: ' + cmd + ' result code is: ' + str(code))
LG().Log('INFO', 'Check rule exists: ' +
cmd + ' result code is: ' + str(code))
return code
def GetRuleCountInChain(rule):
rule.cmds[rule.FirewallType]['chain']()
cmd = rule.iptbls+' -L ' + rule.Direction
code, out = RunGetOutput(cmd,False)
if code != 0:
return 0
if out is not None and len(out) > 0:
Val = None
try:
Val = len(out.splitlines())-2
except:
print('ERROR: Rule count is not numeric in Check rule exists: ' +
cmd + ' result code is: ' + str(code))
LG().Log('ERROR', 'Rule count is not numeric in Check rule exists: ' +
cmd + ' result code is: ' + str(code))
print('Count Rules in chain: ' + cmd + ' result code is: ' + str(code))
LG().Log('INFO', 'Count Rules in chain: ' +
cmd + ' result code is: ' + str(code))
if Val is not None:
return Val
else:
return 0
def DoAddRemove(rule):
count = GetRuleCountInChain(rule)
rule.Index = 0
p = rule.Position
if p != 'end':
p = 'ind'
if rule.Position == 'top':
rule.Index = 1
elif rule.Position == 'after-top':
if count > 1:
rule.Index = 2
else:
rule.Index = 1
elif rule.Position == 'before-end':
if count > 1:
rule.Index = count
else:
p = 'end'
cmd = rule.fmt(rule.cmds[rule.FirewallType][rule.Ensure][p])
code, out = RunGetOutput(cmd,False)
print('Set rule ' + rule.Ensure + ': ' +
cmd + ' result code is: ' + str(code))
LG().Log('INFO', 'Set rule ' + rule.Ensure +
': ' + cmd + ' result code is: ' + str(code))
if code == 0:
rule.cmds[rule.FirewallType]['post']()
return code
def Test(rule):
if IsFirewallRunning(rule) is False:
print('Error ' + rule.FirewallType + ' is not running.')
LG().Log('ERROR','Error ' + rule.FirewallType + ' is not running.')
return -1
if RuleExists(rule) == 0:
return 0
return -1
def Set(rule):
if IsFirewallRunning(rule) is False:
print('Error ' + rule.FirewallType + ' is not running.')
LG().Log('ERROR','Error ' + rule.FirewallType + ' is not running.')
return [-1]
ret = DoAddRemove(rule)
if ret == 0:
return [0]
return [-1]
def Get(rule):
if Test(rule) == 0:
Ensure = 'Present'
else:
Ensure = 'Absent'
return rule.Name, rule.OrigInterfaceName, rule.FirewallType, rule.Protocol, Ensure, \
rule.AddressFamily, rule.Access, rule.OrigDirection, \
rule.Position, rule.SourceHost, rule.SourcePort, \
rule.DestinationHost, rule.DestinationPort
iptables_regex=r"""
^-A[ ]+(?P<Direction>.*?)[ ]+
((?:-s[ ]+)(?P<SourceHost>.*?)
(((?:/)(?P<Spfx>.*?)(?:[ ]+))|(?:[ ]+)))?
((?:-d[ ]+)(?P<DestinationHost>.*?)
(((?:/)(?P<Dpfx>.*?)(?:[ ]+))|(?:[ ]+)))?
((?:-i[ ]+)(?P<InterfaceName>.*?)(?:[ ]+))?
((?:-p[ ]+)(?P<proto>.*?)(?:[ ]+))?
((?:-m[ ]+)(?P<matchport>.*?)(?:[ ]+))?
((?:--sport[ ]+)(?P<SourcePort>.*?)(?:[ ]+))?
((?:--dport[ ]+)(?P<DestinationPort>.*?)(?:[ ]+))?
((?:-m[ ]+)(?P<matchstate>.*?)(?:[ ]+))?
((?:--state[ ]+)(?P<State>.*?)(?:[ ]+))?
((?:-j[ ]+)(?P<Access>.*?)((?:[ ]+)|(?:$)))?
"""
class RuleBag(object):
def __init__(self, Name, InterfaceName, FirewallType, Protocol, Ensure, AddressFamily,
Access, State, Direction, Position, SourceHost, SourcePort,
DestinationHost, DestinationPort):
self.Name = Name
self.FirewallType = FirewallType
self.Protocol = Protocol
self.Ensure = Ensure
self.AddressFamily = AddressFamily
self.iptbls='iptables'
if self.AddressFamily == 'ipv6' :
self.iptbls = 'ip6tables'
if 'allow' == Access :
self.Access = 'ACCEPT'
else:
self.Access = 'DROP'
if len(State)>0:
self.State=reduce(lambda x, y: x + ',' + y, State)
else:
self.State=''
self.Direction = Direction
self.Position = Position
self.SourceHost = SourceHost
self.SourcePort = SourcePort
self.DestinationHost = DestinationHost
self.DestinationPort = DestinationPort
self.Index = 0
self.InterfaceName = InterfaceName
self.OrigInterfaceName = InterfaceName
if self.Direction.lower() == 'output':
self.InterfaceName = ''
self.OrigDirection = Direction
self.cmds = {}
# iptables
self.cmds['iptables'] = {}
self.cmds['iptables']['present'] = {}
self.cmds['iptables']['present']['end'] = self.iptbls + ' -A {Direction} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['iptables']['present']['ind'] = self.iptbls + ' -I {Direction} {Index} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['iptables']['absent'] = {}
self.cmds['iptables']['absent']['end'] = self.iptbls + ' -D {Direction} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['iptables']['absent']['ind'] = self.iptbls + ' -D {Direction} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['iptables']['check'] = self.iptables_check
self.cmds['iptables']['post'] = self.iptables_post
self.cmds['iptables']['chain'] = self.iptables_chain_translate
# ip6tables
self.cmds['ip6tables'] = self.cmds['iptables']
# firewalld firewall-cmd [--permanent] --direct --add-rule { ipv4 | ipv6 | eb } <table> <chain> <priority> <args>
self.cmds['firewalld'] = {}
self.cmds['firewalld']['present'] = {}
self.cmds['firewalld']['present']['ind'] = 'firewall-cmd --direct --add-rule ' + ' {AddressFamily} filter {Direction} {Index} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['firewalld']['present']['end'] = 'firewall-cmd --direct --add-rule ' + ' {AddressFamily} filter {Direction} {Index} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['firewalld']['absent'] = {}
self.cmds['firewalld']['absent']['ind'] = 'firewall-cmd --direct --remove-rule ' + ' {AddressFamily} filter {Direction} {Index} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['firewalld']['absent']['end'] = 'firewall-cmd --direct --remove-rule ' + ' {AddressFamily} filter {Direction} {Index} -i {InterfaceName} -p {Protocol} -s {SourceHost} --sport {SourcePort} -d {DestinationHost} --dport {DestinationPort} -m state --state {State} -j {Access}'
self.cmds['firewalld']['check'] = self.iptables_check
self.cmds['firewalld']['post'] = self.firewalld_post
self.cmds['firewalld']['chain'] = self.firewalld_chain_translate
# SuSEfirewall2
self.cmds['susefirewall2'] = {}
self.cmds['susefirewall2']['present'] = self.cmds['iptables']['present']
self.cmds['susefirewall2']['absent'] = self.cmds['iptables']['absent']
self.cmds['susefirewall2']['check'] = self.cmds['iptables']['check']
self.cmds['susefirewall2']['post'] = self.susefirewall2_post
self.cmds['susefirewall2']['chain'] = self.susefirewall2_chain_translate
# SuSEfirewall2 - yast
self.cmds['yast']=self.cmds['susefirewall2']
# ufw
self.cmds['ufw'] = {}
self.cmds['ufw']['present'] = self.cmds['iptables']['present']
self.cmds['ufw']['absent'] = self.cmds['iptables']['absent']
self.cmds['ufw']['check'] = self.iptables_check
self.cmds['ufw']['post'] = self.ufw_post
self.cmds['ufw']['chain'] = self.ufw_chain_translate
def iptables_check(self):
self.cmds[self.FirewallType]['chain']()
r=re.compile(iptables_regex,re.VERBOSE)
code,out = RunGetOutput(self.iptbls + '-save ', False)
mykeys=self.__dict__.keys()
for line in out.splitlines():
m=r.search(line)
if m == None:
continue
found=True
groupd=dict(m.groupdict())
for k in groupd.keys():
if k in mykeys:
if groupd[k] == None :
groupd[k] = ''
if k[-4:] == 'Host':
if self.__dict__[k] != None and '/' in self.__dict__[k]:
groupd[k]+= '/' + m.group(k[0:1]+'pfx')
if groupd[k] == '::':
groupd[k] = ''
if groupd[k] != self.__dict__[k]:
found=False
break
if found == True:
return 0
return 1
def iptables_chain_translate(self):
self.Direction = self.OrigDirection.upper()
def iptables_post(self):
self.update_iptables_rules()
def update_iptables_rules(self):
rules_file = '/etc/sysconfig/' + self.iptbls
code = os.system(self.iptbls + '-save > ' + rules_file)
if code != 0 :
print('Error: '+ self.iptbls +'-save > ' + rules_file + ' failed.', file=sys.stderr)
LG().Log('ERROR', 'Error: '+ self.iptbls +'-save > ' + rules_file + ' failed.')
return
def firewalld_chain_translate(self):
self.Direction = self.OrigDirection.upper() + '_direct'
def firewalld_post(self):
self.update_firewalld_rules()
def update_firewalld_rules(self):
p = self.Position
if p != 'end':
p = 'ind'
rule = self.fmt(self.cmds[self.FirewallType][self.Ensure][p])
cmd = rule.replace(
'firewall-cmd', 'firewall-cmd --permanent ')
code, out = RunGetOutput(cmd,False)
print('Set permanent rule ' + self.Ensure +
': ' + cmd + ' result code is: ' + str(code))
LG().Log('INFO', 'Set permanent rule ' + self.Ensure +
': ' + cmd + ' result code is: ' + str(code))
return
def ufw_chain_translate(self):
if self.Position == 'top':
p = 'before'
elif self.Position == 'after-top' or self.Position == 'before-end':
p = 'user'
else:
p = 'after'
ufw = 'ufw-'
if self.AddressFamily == 'ipv6':
ufw = 'ufw6-'
self.Direction = ufw + p + '-' + self.OrigDirection.lower()
def ufw_post(self):
self.update_ufw_rules()
def update_ufw_rules(self):
rules_file = {}
p=''
if self.iptbls == 'ip6tables':
p='6'
rules_file['top'] = '/etc/ufw/before'+p+'.rules'
rules_file['after-top'] = '/lib/ufw/user'+p+'.rules'
rules_file['before-end'] = '/lib/ufw/user'+p+'.rules'
rules_file['end'] = '/etc/ufw/after'+p+'.rules'
p='end'
if self.Position != 'end':
p = 'ind'
search_str = \
r'^(.filter)(.*)((:.*?\n\n)|(:.*?\n#.*?\n\n))'
rule = self.fmt(self.cmds[self.FirewallType]['present'][p])
rule=re.sub(self.iptbls+r'.*? ','',rule)
rplace_str = r'\1\2\3' + rule + '\n\n'
text = ''
with open(rules_file[self.Position], 'r') as F:
text = F.read()
text=text.replace(rule+'\n','') # remove rule
if self.Ensure == 'present':
srch = re.compile(search_str, re.M | re.S)
text = srch.sub(rplace_str, text)
with open(rules_file[self.Position], 'w') as F:
F.write(text)
def susefirewall2_chain_translate(self):
self.Direction = self.OrigDirection.upper()
def susefirewall2_post(self):
self.update_susefirewall2_rules()
def update_susefirewall2_rules(self):
rules_file = '/etc/sysconfig/scripts/SuSEfirewall2-custom'
pos = {}
pos['top'] = 'fw_custom_before_antispoofing'
pos['after-top'] = 'fw_custom_after_antispoofing'
pos['before-end'] = 'fw_custom_before_masq'
pos['end'] = 'fw_custom_before_denyall'
pos['anchor'] = 'true\n'
search_str = r'^.*(fw_custom_before_antispoofing)(.*?[{].*?)(\n[ ]+true\n}\n)'
rule = self.fmt(self.cmds[self.FirewallType]['present']['end'])
rplace_str = r'\1\2' + rule + r'\n\3'
text = ''
with open(rules_file, 'r') as F:
text = F.read()
text=text.replace(rule+'\n','') # remove rule
if self.Ensure == 'present':
srch = re.compile(search_str, re.M| re.S)
text = srch.sub(rplace_str, text)
with open(rules_file, 'w') as F:
F.write(text)
def fmt(self, st):
for k in self.__dict__.keys():
if 'cmds' in k:
continue
if k == 'Direction':
self.cmds[self.FirewallType]['chain']()
if type(self.__dict__[k]) == int :
st = st.replace('{' + k + '}', str(self.__dict__[k]))
elif self.__dict__[k]==None or (type(self.__dict__[k]) == str and len(self.__dict__[k]) == 0) :
st = re.sub(r'}'+k[::-1]+'{.*?[-]+', '', st[::-1])[::-1]
if k == 'State':
st = st.replace('-m state ','')
else :
st = st.replace('{' + k + '}', self.__dict__[k])
return st
| 40.350501
| 290
| 0.583159
|
b26e115ce9f2358cca2523a9f605cb5aa50a072e
| 2,272
|
py
|
Python
|
tools/fuchsia/gather_flutter_runner_artifacts.py
|
adazh/engine
|
1f3013163d48f46cc967b31aac193691ef28f7d9
|
[
"BSD-3-Clause"
] | null | null | null |
tools/fuchsia/gather_flutter_runner_artifacts.py
|
adazh/engine
|
1f3013163d48f46cc967b31aac193691ef28f7d9
|
[
"BSD-3-Clause"
] | null | null | null |
tools/fuchsia/gather_flutter_runner_artifacts.py
|
adazh/engine
|
1f3013163d48f46cc967b31aac193691ef28f7d9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Gather all the fuchsia artifacts to a destination directory.
"""
import argparse
import errno
import json
import os
import platform
import shutil
import subprocess
import sys
_ARTIFACT_PATH_TO_DST = {
'flutter_runner': 'flutter_runner',
'icudtl.dat': 'data/icudtl.dat',
'dart_runner': 'dart_runner',
'flutter_patched_sdk': 'flutter_patched_sdk'
}
def EnsureParentExists(path):
dir_name, _ = os.path.split(path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def CopyPath(src, dst):
try:
EnsureParentExists(dst)
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
raise
def CreateMetaPackage(dst_root):
meta = os.path.join(dst_root, 'meta')
if not os.path.isdir(meta):
os.makedirs(meta)
content = {}
content['name'] = 'flutter_runner'
content['version'] = '0.0.1'
package = os.path.join(meta, 'package')
with open(package, 'w') as out_file:
json.dump(content, out_file)
def GatherArtifacts(src_root, dst_root, create_meta_package=True):
if not os.path.exists(dst_root):
os.makedirs(dst_root)
else:
shutil.rmtree(dst_root)
for src_rel, dst_rel in _ARTIFACT_PATH_TO_DST.iteritems():
src_full = os.path.join(src_root, src_rel)
dst_full = os.path.join(dst_root, dst_rel)
if not os.path.exists(src_full):
print 'Unable to find artifact: ', str(src_full)
sys.exit(1)
CopyPath(src_full, dst_full)
if create_meta_package:
CreateMetaPackage(dst_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--artifacts-root', dest='artifacts_root', action='store', required=True)
parser.add_argument(
'--dest-dir', dest='dst_dir', action='store', required=True)
args = parser.parse_args()
assert os.path.exists(args.artifacts_root)
dst_parent = os.path.abspath(os.path.join(args.dst_dir, os.pardir))
assert os.path.exists(dst_parent)
GatherArtifacts(args.artifacts_root, args.dst_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| 24.430108
| 79
| 0.707306
|
562c69f25298f5645b5b0fb7f451780b59ae5b3e
| 8,103
|
py
|
Python
|
binance-spot/impl/websocketconnection.py
|
AbdeenM/binance-spot
|
f48ab28dd837dd66bb8373e5e4b1bf24379e46ad
|
[
"MIT"
] | 2
|
2021-05-05T00:25:11.000Z
|
2021-08-07T23:26:55.000Z
|
binance-spot/impl/websocketconnection.py
|
AbdeenM/binance-spot
|
f48ab28dd837dd66bb8373e5e4b1bf24379e46ad
|
[
"MIT"
] | null | null | null |
binance-spot/impl/websocketconnection.py
|
AbdeenM/binance-spot
|
f48ab28dd837dd66bb8373e5e4b1bf24379e46ad
|
[
"MIT"
] | null | null | null |
import threading
import websocket
import gzip
import ssl
import logging
from urllib import parse
import urllib.parse
from common.scripts.binance_spot.impl.utils.timeservice import get_current_timestamp
from common.scripts.binance_spot.impl.utils.urlparamsbuilder import UrlParamsBuilder
from common.scripts.binance_spot.impl.utils.apisignature import create_signature
from common.scripts.binance_spot.exception.binanceapiexception import BinanceApiException
from common.scripts.binance_spot.impl.utils import *
from common.scripts.binance_spot.base.printobject import *
from common.scripts.binance_spot.model.constant import *
# Key: ws, Value: connection
websocket_connection_handler = dict()
def on_message(ws, message):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_message(message)
return
def on_error(ws, error):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_failure(error)
def on_close(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_close()
def on_open(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_open(ws)
connection_id = 0
class ConnectionState:
IDLE = 0
CONNECTED = 1
CLOSED_ON_ERROR = 2
def websocket_func(*args):
connection_instance = args[0]
connection_instance.ws = websocket.WebSocketApp(connection_instance.url,
on_message=on_message,
on_error=on_error,
on_close=on_close)
global websocket_connection_handler
websocket_connection_handler[connection_instance.ws] = connection_instance
connection_instance.logger.info(
'[Sub][' + str(connection_instance.id) + '] Connecting...')
connection_instance.delay_in_second = -1
connection_instance.ws.on_open = on_open
connection_instance.ws.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE})
connection_instance.logger.info(
'[Sub][' + str(connection_instance.id) + '] Connection event loop down')
if connection_instance.state == ConnectionState.CONNECTED:
connection_instance.state = ConnectionState.IDLE
class WebsocketConnection:
def __init__(self, api_key, secret_key, uri, watch_dog, request):
self.__thread = None
self.url = uri
self.__api_key = api_key
self.__secret_key = secret_key
self.request = request
self.__watch_dog = watch_dog
self.delay_in_second = -1
self.ws = None
self.last_receive_time = 0
self.logger = logging.getLogger('algo-trading')
self.state = ConnectionState.IDLE
global connection_id
connection_id += 1
self.id = connection_id
def in_delay_connection(self):
return self.delay_in_second != -1
def re_connect_in_delay(self, delay_in_second):
if self.ws is not None:
self.ws.close()
self.ws = None
self.delay_in_second = delay_in_second
self.logger.warning('[Sub][' + str(self.id) + '] Reconnecting after '
+ str(self.delay_in_second) + ' seconds later')
def re_connect(self):
if self.delay_in_second != 0:
self.delay_in_second -= 1
self.logger.warning('In delay connection: ' +
str(self.delay_in_second))
else:
self.connect()
def connect(self):
if self.state == ConnectionState.CONNECTED:
self.logger.info('[Sub][' + str(self.id) + '] Already connected')
else:
self.__thread = threading.Thread(
target=websocket_func, args=[self])
self.__thread.start()
def send(self, data):
self.ws.send(data)
def close(self):
self.ws.close()
del websocket_connection_handler[self.ws]
self.__watch_dog.on_connection_closed(self)
self.logger.error('[Sub][' + str(self.id) + '] Closing normally')
def on_open(self, ws):
self.logger.info('[Sub][' + str(self.id) + '] Connected to server')
self.ws = ws
self.last_receive_time = get_current_timestamp()
self.state = ConnectionState.CONNECTED
self.__watch_dog.on_connection_created(self)
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
return
def on_error(self, error_message):
if self.request.error_handler is not None:
print('error')
exception = BinanceApiException(
BinanceApiException.SUBSCRIPTION_ERROR, error_message)
self.request.error_handler(exception)
self.logger.error('[Sub][' + str(self.id) + '] ' + str(error_message))
def on_failure(self, error):
print('on_failure')
self.on_error('Unexpected error: ' + str(error))
self.close_on_error()
def on_message(self, message):
self.last_receive_time = get_current_timestamp()
json_wrapper = parse_json_from_string(message)
if json_wrapper.contain_key('status') and json_wrapper.get_string('status') != 'ok':
error_code = json_wrapper.get_string_or_default(
'err-code', 'Unknown error')
error_msg = json_wrapper.get_string_or_default(
'err-msg', 'Unknown error')
self.on_error(error_code + ': ' + error_msg)
elif json_wrapper.contain_key('err-code') and json_wrapper.get_int('err-code') != 0:
error_code = json_wrapper.get_string_or_default(
'err-code', 'Unknown error')
error_msg = json_wrapper.get_string_or_default(
'err-msg', 'Unknown error')
self.on_error(error_code + ': ' + error_msg)
elif json_wrapper.contain_key('result') and json_wrapper.contain_key('id'):
self.__on_receive_response(json_wrapper)
else:
self.__on_receive_payload(json_wrapper)
def __on_receive_response(self, json_wrapper):
res = None
try:
res = json_wrapper.get_int('id')
except Exception as e:
self.on_error('Failed to parse servers response: ' + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(
SubscribeMessageType.RESPONSE, res)
except Exception as e:
self.on_error('Process error: ' + str(e)
+ ' You should capture the exception in your error handler')
def __on_receive_payload(self, json_wrapper):
res = None
try:
if self.request.json_parser is not None:
res = self.request.json_parser(json_wrapper)
except Exception as e:
self.on_error('Failed to parse servers response: ' + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(SubscribeMessageType.PAYLOAD, res)
except Exception as e:
self.on_error('Process error: ' + str(e)
+ ' You should capture the exception in your error handler')
if self.request.auto_close:
self.close()
def __process_ping_on_trading_line(self, ping_ts):
self.send('{\'op\':\'pong\',\'ts\':' + str(ping_ts) + '}')
return
def __process_ping_on_market_line(self, ping_ts):
self.send('{\'pong\':' + str(ping_ts) + '}')
return
def close_on_error(self):
if self.ws is not None:
self.ws.close()
self.state = ConnectionState.CLOSED_ON_ERROR
self.logger.error(
'[Sub][' + str(self.id) + '] Connection is closing due to error')
| 37.864486
| 93
| 0.622362
|
bdf2811a1e637f7c7453e0a14fee15df3d055d03
| 7,392
|
py
|
Python
|
google/ads/google_ads/v3/proto/resources/group_placement_view_pb2.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v3/proto/resources/group_placement_view_pb2.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v3/proto/resources/group_placement_view_pb2.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | 1
|
2020-03-13T00:14:31.000Z
|
2020-03-13T00:14:31.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/resources/group_placement_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.enums import placement_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_placement__type__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/resources/group_placement_view.proto',
package='google.ads.googleads.v3.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v3.resourcesB\027GroupPlacementViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V3.Resources\312\002!Google\\Ads\\GoogleAds\\V3\\Resources\352\002%Google::Ads::GoogleAds::V3::Resources'),
serialized_pb=_b('\nBgoogle/ads/googleads_v3/proto/resources/group_placement_view.proto\x12!google.ads.googleads.v3.resources\x1a\x38google/ads/googleads_v3/proto/enums/placement_type.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\x8d\x03\n\x12GroupPlacementView\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12/\n\tplacement\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x64isplay_name\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\ntarget_url\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12V\n\x0eplacement_type\x18\x05 \x01(\x0e\x32>.google.ads.googleads.v3.enums.PlacementTypeEnum.PlacementType:q\xea\x41n\n+googleads.googleapis.com/GroupPlacementView\x12?customers/{customer}/groupPlacementViews/{group_placement_view}B\x84\x02\n%com.google.ads.googleads.v3.resourcesB\x17GroupPlacementViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V3.Resources\xca\x02!Google\\Ads\\GoogleAds\\V3\\Resources\xea\x02%Google::Ads::GoogleAds::V3::Resourcesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_placement__type__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GROUPPLACEMENTVIEW = _descriptor.Descriptor(
name='GroupPlacementView',
full_name='google.ads.googleads.v3.resources.GroupPlacementView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.resources.GroupPlacementView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='placement', full_name='google.ads.googleads.v3.resources.GroupPlacementView.placement', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.ads.googleads.v3.resources.GroupPlacementView.display_name', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_url', full_name='google.ads.googleads.v3.resources.GroupPlacementView.target_url', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='placement_type', full_name='google.ads.googleads.v3.resources.GroupPlacementView.placement_type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('\352An\n+googleads.googleapis.com/GroupPlacementView\022?customers/{customer}/groupPlacementViews/{group_placement_view}'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=650,
)
_GROUPPLACEMENTVIEW.fields_by_name['placement'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_GROUPPLACEMENTVIEW.fields_by_name['display_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_GROUPPLACEMENTVIEW.fields_by_name['target_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_GROUPPLACEMENTVIEW.fields_by_name['placement_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_placement__type__pb2._PLACEMENTTYPEENUM_PLACEMENTTYPE
DESCRIPTOR.message_types_by_name['GroupPlacementView'] = _GROUPPLACEMENTVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GroupPlacementView = _reflection.GeneratedProtocolMessageType('GroupPlacementView', (_message.Message,), dict(
DESCRIPTOR = _GROUPPLACEMENTVIEW,
__module__ = 'google.ads.googleads_v3.proto.resources.group_placement_view_pb2'
,
__doc__ = """A group placement view.
Attributes:
resource_name:
The resource name of the group placement view. Group placement
view resource names have the form: ``customers/{customer_id}/
groupPlacementViews/{ad_group_id}~{base64_placement}``
placement:
The automatic placement string at group level, e. g. web
domain, mobile app ID, or a YouTube channel ID.
display_name:
Domain name for websites and YouTube channel name for YouTube
channels.
target_url:
URL of the group placement, e.g. domain, link to the mobile
application in app store, or a YouTube channel URL.
placement_type:
Type of the placement, e.g. Website, YouTube Channel, Mobile
Application.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.resources.GroupPlacementView)
))
_sym_db.RegisterMessage(GroupPlacementView)
DESCRIPTOR._options = None
_GROUPPLACEMENTVIEW._options = None
# @@protoc_insertion_point(module_scope)
| 56
| 1,159
| 0.789502
|
68e865b3e056ad03b6f05bf5cc0a7de987f6cfd7
| 34,915
|
py
|
Python
|
pype/plugins/maya/publish/collect_render.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/collect_render.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/collect_render.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
"""
This collector will go through render layers in maya and prepare all data
needed to create instances and their representations for submition and
publishing on farm.
Requires:
instance -> families
instance -> setMembers
context -> currentFile
context -> workspaceDir
context -> user
session -> AVALON_ASSET
Optional:
Provides:
instance -> label
instance -> subset
instance -> attachTo
instance -> setMembers
instance -> publish
instance -> frameStart
instance -> frameEnd
instance -> byFrameStep
instance -> renderer
instance -> family
instance -> families
instance -> asset
instance -> time
instance -> author
instance -> source
instance -> expectedFiles
instance -> resolutionWidth
instance -> resolutionHeight
instance -> pixelAspect
"""
import re
import os
import types
import six
import json
from abc import ABCMeta, abstractmethod
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
import pyblish.api
from avalon import maya, api
import pype.maya.lib as lib
R_SINGLE_FRAME = re.compile(r"^(-?)\d+$")
R_FRAME_RANGE = re.compile(r"^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$")
R_FRAME_NUMBER = re.compile(r".+\.(?P<frame>[0-9]+)\..+")
R_LAYER_TOKEN = re.compile(
r".*%l.*|.*<layer>.*|.*<renderlayer>.*", re.IGNORECASE
)
R_AOV_TOKEN = re.compile(r".*%a.*|.*<aov>.*|.*<renderpass>.*", re.IGNORECASE)
R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a|<aov>|<renderpass>", re.IGNORECASE)
R_REMOVE_AOV_TOKEN = re.compile(r"_%a|_<aov>|_<renderpass>", re.IGNORECASE)
# to remove unused renderman tokens
R_CLEAN_FRAME_TOKEN = re.compile(r"\.?<f\d>\.?", re.IGNORECASE)
R_CLEAN_EXT_TOKEN = re.compile(r"\.?<ext>\.?", re.IGNORECASE)
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
r"%l|<layer>|<renderlayer>", re.IGNORECASE
)
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|<camera>", re.IGNORECASE)
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|<scene>", re.IGNORECASE)
RENDERER_NAMES = {
"mentalray": "MentalRay",
"vray": "V-Ray",
"arnold": "Arnold",
"renderman": "Renderman",
"redshift": "Redshift",
}
# not sure about the renderman image prefix
ImagePrefixes = {
"mentalray": "defaultRenderGlobals.imageFilePrefix",
"vray": "vraySettings.fileNamePrefix",
"arnold": "defaultRenderGlobals.imageFilePrefix",
"renderman": "rmanGlobals.imageFileFormat",
"redshift": "defaultRenderGlobals.imageFilePrefix",
}
class CollectMayaRender(pyblish.api.ContextPlugin):
"""Gather all publishable render layers from renderSetup"""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["maya"]
label = "Collect Render Layers"
def process(self, context):
render_instance = None
for instance in context:
if "rendering" in instance.data["families"]:
render_instance = instance
render_instance.data["remove"] = True
# make sure workfile instance publishing is enabled
if "workfile" in instance.data["families"]:
instance.data["publish"] = True
if not render_instance:
self.log.info(
"No render instance found, skipping render "
"layer collection."
)
return
render_globals = render_instance
collected_render_layers = render_instance.data["setMembers"]
filepath = context.data["currentFile"].replace("\\", "/")
asset = api.Session["AVALON_ASSET"]
workspace = context.data["workspaceDir"]
self._rs = renderSetup.instance()
maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()}
self.maya_layers = maya_render_layers
for layer in collected_render_layers:
# every layer in set should start with `LAYER_` prefix
try:
expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1)
except IndexError:
msg = "Invalid layer name in set [ {} ]".format(layer)
self.log.warnig(msg)
continue
self.log.info("processing %s" % layer)
# check if layer is part of renderSetup
if expected_layer_name not in maya_render_layers:
msg = "Render layer [ {} ] is not in " "Render Setup".format(
expected_layer_name
)
self.log.warning(msg)
continue
# check if layer is renderable
if not maya_render_layers[expected_layer_name].isRenderable():
msg = "Render layer [ {} ] is not " "renderable".format(
expected_layer_name
)
self.log.warning(msg)
continue
# test if there are sets (subsets) to attach render to
sets = cmds.sets(layer, query=True) or []
attachTo = []
if sets:
for s in sets:
attachTo.append(
{
"version": None, # we need integrator for that
"subset": s,
"family": cmds.getAttr("{}.family".format(s)),
}
)
self.log.info(" -> attach render to: {}".format(s))
layer_name = "rs_{}".format(expected_layer_name)
# collect all frames we are expecting to be rendered
renderer = cmds.getAttr(
"defaultRenderGlobals.currentRenderer"
).lower()
# handle various renderman names
if renderer.startswith("renderman"):
renderer = "renderman"
# return all expected files for all cameras and aovs in given
# frame range
exf = ExpectedFiles()
exp_files = exf.get(renderer, layer_name)
self.log.info("multipart: {}".format(exf.multipart))
assert exp_files, "no file names were generated, this is bug"
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV
# (considered to be subset on its own) to another subset
if attachTo:
assert len(exp_files[0].keys()) == 1, (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported"
)
# append full path
full_exp_files = []
aov_dict = {}
# we either get AOVs or just list of files. List of files can
# mean two things - there are no AOVs enabled or multipass EXR
# is produced. In either case we treat those as `beauty`.
if isinstance(exp_files[0], dict):
for aov, files in exp_files[0].items():
full_paths = []
for ef in files:
full_path = os.path.join(workspace, "renders", ef)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
aov_dict[aov] = full_paths
else:
full_paths = []
for ef in exp_files:
full_path = os.path.join(workspace, "renders", ef)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
aov_dict["beauty"] = full_paths
frame_start_render = int(self.get_render_attribute(
"startFrame", layer=layer_name))
frame_end_render = int(self.get_render_attribute(
"endFrame", layer=layer_name))
if (int(context.data['frameStartHandle']) == frame_start_render
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
handle_start = context.data['handleStart']
handle_end = context.data['handleEnd']
frame_start = context.data['frameStart']
frame_end = context.data['frameEnd']
frame_start_handle = context.data['frameStartHandle']
frame_end_handle = context.data['frameEndHandle']
else:
handle_start = 0
handle_end = 0
frame_start = frame_start_render
frame_end = frame_end_render
frame_start_handle = frame_start_render
frame_end_handle = frame_end_render
full_exp_files.append(aov_dict)
self.log.info(full_exp_files)
self.log.info("collecting layer: {}".format(layer_name))
# Get layer specific settings, might be overrides
data = {
"subset": expected_layer_name,
"attachTo": attachTo,
"setMembers": layer_name,
"multipartExr": exf.multipart,
"publish": True,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_start_handle,
"frameEndHandle": frame_end_handle,
"byFrameStep": int(
self.get_render_attribute("byFrameStep",
layer=layer_name)),
"renderer": self.get_render_attribute("currentRenderer",
layer=layer_name),
# instance subset
"family": "renderlayer",
"families": ["renderlayer"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath,
"expectedFiles": full_exp_files,
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"),
}
# Apply each user defined attribute as data
for attr in cmds.listAttr(layer, userDefined=True) or list():
try:
value = cmds.getAttr("{}.{}".format(layer, attr))
except Exception:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
# Include (optional) global settings
# Get global overrides and translate to Deadline values
overrides = self.parse_options(str(render_globals))
data.update(**overrides)
# Define nice label
label = "{0} ({1})".format(expected_layer_name, data["asset"])
label += " [{0}-{1}]".format(
int(data["frameStartHandle"]), int(data["frameEndHandle"])
)
instance = context.create_instance(expected_layer_name)
instance.data["label"] = label
instance.data.update(data)
self.log.debug("data: {}".format(json.dumps(data, indent=4)))
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without
Here's the kicker. These globals override defaults in the submission
integrator, but an empty value means no overriding is made.
Otherwise, Frames would override the default frames set under globals.
Args:
render_globals (str): collection of render globals
Returns:
dict: only overrides with values
"""
attributes = maya.read(render_globals)
options = {"renderGlobals": {}}
options["renderGlobals"]["Priority"] = attributes["priority"]
# Check for specific pools
pool_a, pool_b = self._discover_pools(attributes)
options["renderGlobals"].update({"Pool": pool_a})
if pool_b:
options["renderGlobals"].update({"SecondaryPool": pool_b})
# Machine list
machine_list = attributes["machineList"]
if machine_list:
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
options["renderGlobals"][key] = machine_list
# Suspend publish job
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
options["publishJobState"] = state
chunksize = attributes.get("framesPerTask", 1)
options["renderGlobals"]["ChunkSize"] = chunksize
# Override frames should be False if extendFrames is False. This is
# to ensure it doesn't go off doing crazy unpredictable things
override_frames = False
extend_frames = attributes.get("extendFrames", False)
if extend_frames:
override_frames = attributes.get("overrideExistingFrame", False)
options["extendFrames"] = extend_frames
options["overrideExistingFrame"] = override_frames
maya_render_plugin = "MayaBatch"
if not attributes.get("useMayaBatch", True):
maya_render_plugin = "MayaCmd"
options["mayaRenderPlugin"] = maya_render_plugin
return options
def _discover_pools(self, attributes):
pool_a = None
pool_b = None
# Check for specific pools
pool_b = []
if "primaryPool" in attributes:
pool_a = attributes["primaryPool"]
if "secondaryPool" in attributes:
pool_b = attributes["secondaryPool"]
else:
# Backwards compatibility
pool_str = attributes.get("pools", None)
if pool_str:
pool_a, pool_b = pool_str.split(";")
# Ensure empty entry token is caught
if pool_b == "-":
pool_b = None
return pool_a, pool_b
def _get_overrides(self, layer):
rset = self.maya_layers[layer].renderSettingsCollectionInstance()
return rset.getOverrides()
def get_render_attribute(self, attr, layer):
return lib.get_attr_in_layer(
"defaultRenderGlobals.{}".format(attr), layer=layer
)
class ExpectedFiles:
multipart = False
def get(self, renderer, layer):
if renderer.lower() == "arnold":
return self._get_files(ExpectedFilesArnold(layer))
elif renderer.lower() == "vray":
return self._get_files(ExpectedFilesVray(layer))
elif renderer.lower() == "redshift":
return self._get_files(ExpectedFilesRedshift(layer))
elif renderer.lower() == "mentalray":
return self._get_files(ExpectedFilesMentalray(layer))
elif renderer.lower() == "renderman":
return self._get_files(ExpectedFilesRenderman(layer))
else:
raise UnsupportedRendererException(
"unsupported {}".format(renderer)
)
def _get_files(self, renderer):
files = renderer.get_files()
self.multipart = renderer.multipart
return files
@six.add_metaclass(ABCMeta)
class AExpectedFiles:
renderer = None
layer = None
multipart = False
def __init__(self, layer):
self.layer = layer
@abstractmethod
def get_aovs(self):
pass
def get_renderer_prefix(self):
try:
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer)
)
return file_prefix
def _get_layer_data(self):
# ______________________________________________
# ____________________/ ____________________________________________/
# 1 - get scene name /__________________/
# ____________________/
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
# ______________________________________________
# ____________________/ ____________________________________________/
# 2 - detect renderer /__________________/
# ____________________/
renderer = self.renderer
# ________________________________________________
# __________________/ ______________________________________________/
# 3 - image prefix /__________________/
# __________________/
file_prefix = self.get_renderer_prefix()
if not file_prefix:
raise RuntimeError("Image prefix not set")
default_ext = cmds.getAttr("defaultRenderGlobals.imfPluginKey")
# ________________________________________________
# __________________/ ______________________________________________/
# 4 - get renderable cameras_____________/
# __________________/
# if we have <camera> token in prefix path we'll expect output for
# every renderable camera in layer.
renderable_cameras = self.get_renderable_cameras()
# ________________________________________________
# __________________/ ______________________________________________/
# 5 - get AOVs /____________________/
# __________________/
enabled_aovs = self.get_aovs()
layer_name = self.layer
if self.layer.startswith("rs_"):
layer_name = self.layer[3:]
start_frame = int(self.get_render_attribute("startFrame"))
end_frame = int(self.get_render_attribute("endFrame"))
frame_step = int(self.get_render_attribute("byFrameStep"))
padding = int(self.get_render_attribute("extensionPadding"))
scene_data = {
"frameStart": start_frame,
"frameEnd": end_frame,
"frameStep": frame_step,
"padding": padding,
"cameras": renderable_cameras,
"sceneName": scene_name,
"layerName": layer_name,
"renderer": renderer,
"defaultExt": default_ext,
"filePrefix": file_prefix,
"enabledAOVs": enabled_aovs,
}
return scene_data
def _generate_single_file_sequence(self, layer_data):
expected_files = []
file_prefix = layer_data["filePrefix"]
for cam in layer_data["cameras"]:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
# this is required to remove unfilled aov token, for example
# in Redshift
(R_REMOVE_AOV_TOKEN, ""),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, ""),
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
expected_files.append(
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
layer_data["defaultExt"],
)
)
return expected_files
def _generate_aov_file_sequences(self, layer_data):
expected_files = []
aov_file_list = {}
file_prefix = layer_data["filePrefix"]
for aov in layer_data["enabledAOVs"]:
for cam in layer_data["cameras"]:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, ""),
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
aov_files = []
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"]),
):
aov_files.append(
"{}.{}.{}".format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
aov[1],
)
)
# if we have more then one renderable camera, append
# camera name to AOV to allow per camera AOVs.
aov_name = aov[0]
if len(layer_data["cameras"]) > 1:
aov_name = "{}_{}".format(aov[0], cam)
aov_file_list[aov_name] = aov_files
file_prefix = layer_data["filePrefix"]
expected_files.append(aov_file_list)
return expected_files
def get_files(self):
"""
This method will return list of expected files.
It will translate render token strings ('<RenderPass>', etc.) to
their values. This task is tricky as every renderer deals with this
differently. It depends on `get_aovs()` abstract method implemented
for every supported renderer.
"""
layer_data = self._get_layer_data()
expected_files = []
if layer_data.get("enabledAOVs"):
expected_files = self._generate_aov_file_sequences(layer_data)
else:
expected_files = self._generate_single_file_sequence(layer_data)
return expected_files
def get_renderable_cameras(self):
cam_parents = [
cmds.listRelatives(x, ap=True)[-1] for x in cmds.ls(cameras=True)
]
renderable_cameras = []
for cam in cam_parents:
renderable = False
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
renderable = True
for override in self.get_layer_overrides(
"{}.renderable".format(cam), self.layer
):
renderable = self.maya_is_true(override)
if renderable:
renderable_cameras.append(cam)
return renderable_cameras
def maya_is_true(self, attr_val):
"""
Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)
def get_layer_overrides(self, attr, layer):
connections = cmds.listConnections(attr, plugs=True)
if connections:
for connection in connections:
if connection:
node_name = connection.split(".")[0]
if cmds.nodeType(node_name) == "renderLayer":
attr_name = "%s.value" % ".".join(
connection.split(".")[:-1]
)
if node_name == layer:
yield cmds.getAttr(attr_name)
def get_render_attribute(self, attr):
return lib.get_attr_in_layer(
"defaultRenderGlobals.{}".format(attr), layer=self.layer
)
class ExpectedFilesArnold(AExpectedFiles):
# Arnold AOV driver extension mapping
# Is there a better way?
aiDriverExtension = {
"jpeg": "jpg",
"exr": "exr",
"deepexr": "exr",
"png": "png",
"tiff": "tif",
"mtoa_shaders": "ass", # TODO: research what those last two should be
"maya": "",
}
def __init__(self, layer):
super(ExpectedFilesArnold, self).__init__(layer)
self.renderer = "arnold"
def get_aovs(self):
enabled_aovs = []
try:
if not (
cmds.getAttr("defaultArnoldRenderOptions.aovMode")
and not cmds.getAttr("defaultArnoldDriver.mergeAOVs") # noqa: W503, E501
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
# AOVs are set to be rendered separately. We should expect
# <RenderPass> token in path.
ai_aovs = [n for n in cmds.ls(type="aiAOV")]
for aov in ai_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
ai_driver = cmds.listConnections("{}.outputs".format(aov))[0]
ai_translator = cmds.getAttr("{}.aiTranslator".format(ai_driver))
try:
aov_ext = self.aiDriverExtension[ai_translator]
except KeyError:
msg = (
"Unrecognized arnold " "driver format for AOV - {}"
).format(cmds.getAttr("{}.name".format(aov)))
raise AOVError(msg)
for override in self.get_layer_overrides(
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
# If aov RGBA is selected, arnold will translate it to `beauty`
aov_name = cmds.getAttr("%s.name" % aov)
if aov_name == "RGBA":
aov_name = "beauty"
enabled_aovs.append((aov_name, aov_ext))
# Append 'beauty' as this is arnolds
# default. If <RenderPass> token is specified and no AOVs are
# defined, this will be used.
enabled_aovs.append(
(u"beauty", cmds.getAttr("defaultRenderGlobals.imfPluginKey"))
)
return enabled_aovs
class ExpectedFilesVray(AExpectedFiles):
# V-ray file extension mapping
# 5 - exr
# 6 - multichannel exr
# 13 - deep exr
def __init__(self, layer):
super(ExpectedFilesVray, self).__init__(layer)
self.renderer = "vray"
def get_renderer_prefix(self):
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
prefix = "{}_<aov>".format(prefix)
return prefix
def get_files(self):
expected_files = super(ExpectedFilesVray, self).get_files()
# we need to add one sequence for plain beauty if AOVs are enabled.
# as vray output beauty without 'beauty' in filename.
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
) # noqa: E501
return expected_files
def get_aovs(self):
enabled_aovs = []
try:
# really? do we set it in vray just by selecting multichannel exr?
if (
cmds.getAttr("vraySettings.imageFormatStr")
== "exr (multichannel)" # noqa: W503
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
default_ext = cmds.getAttr("vraySettings.imageFormatStr")
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
vr_aovs = [
n
for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"]
)
]
# todo: find out how to detect multichannel exr for vray
for aov in vr_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
"{}.enabled".format(aov), "rs_{}".format(self.layer)
):
enabled = self.maya_is_true(override)
if enabled:
# todo: find how vray set format for AOVs
enabled_aovs.append(
(self._get_vray_aov_name(aov), default_ext))
return enabled_aovs
def _get_vray_aov_name(self, node):
# Get render element pass type
vray_node_attr = next(
attr
for attr in cmds.listAttr(node)
if attr.startswith("vray_name")
)
pass_type = vray_node_attr.rsplit("_", 1)[-1]
# Support V-Ray extratex explicit name (if set by user)
if pass_type == "extratex":
explicit_attr = "{}.vray_explicit_name_extratex".format(node)
explicit_name = cmds.getAttr(explicit_attr)
if explicit_name:
return explicit_name
# Node type is in the attribute name but we need to check if value
# of the attribute as it can be changed
return cmds.getAttr("{}.{}".format(node, vray_node_attr))
class ExpectedFilesRedshift(AExpectedFiles):
# mapping redshift extension dropdown values to strings
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
def __init__(self, layer):
super(ExpectedFilesRedshift, self).__init__(layer)
self.renderer = "redshift"
def get_renderer_prefix(self):
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
prefix = "{}_<aov>".format(prefix)
return prefix
def get_files(self):
expected_files = super(ExpectedFilesRedshift, self).get_files()
# we need to add one sequence for plain beauty if AOVs are enabled.
# as redshift output beauty without 'beauty' in filename.
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(
layer_data
) # noqa: E501
return expected_files
def get_aovs(self):
enabled_aovs = []
try:
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")
):
# AOVs are merged in mutli-channel file
self.multipart = True
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
default_ext = self.ext_mapping[
cmds.getAttr("redshiftOptions.imageFormat")
]
rs_aovs = [n for n in cmds.ls(type="RedshiftAOV")]
# todo: find out how to detect multichannel exr for redshift
for aov in rs_aovs:
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
for override in self.get_layer_overrides(
"{}.enabled".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append(
(cmds.getAttr("%s.name" % aov), default_ext)
)
return enabled_aovs
class ExpectedFilesRenderman(AExpectedFiles):
def __init__(self, layer):
super(ExpectedFilesRenderman, self).__init__(layer)
self.renderer = "renderman"
def get_aovs(self):
enabled_aovs = []
default_ext = "exr"
displays = cmds.listConnections("rmanGlobals.displays")
for aov in displays:
aov_name = str(aov)
if aov_name == "rmanDefaultDisplay":
aov_name = "beauty"
enabled = self.maya_is_true(cmds.getAttr("{}.enable".format(aov)))
for override in self.get_layer_overrides(
"{}.enable".format(aov), self.layer
):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append((aov_name, default_ext))
return enabled_aovs
def get_files(self):
"""
In renderman we hack it with prepending path. This path would
normally be translated from `rmanGlobals.imageOutputDir`. We skip
this and harcode prepend path we expect. There is no place for user
to mess around with this settings anyway and it is enforced in
render settings validator.
"""
layer_data = self._get_layer_data()
new_aovs = {}
expected_files = super(ExpectedFilesRenderman, self).get_files()
# we always get beauty
for aov, files in expected_files[0].items():
new_files = []
for file in files:
new_file = "{}/{}/{}".format(
layer_data["sceneName"], layer_data["layerName"], file
)
new_files.append(new_file)
new_aovs[aov] = new_files
return [new_aovs]
class ExpectedFilesMentalray(AExpectedFiles):
def __init__(self, layer):
raise UnimplementedRendererException("Mentalray not implemented")
def get_aovs(self):
return []
class AOVError(Exception):
pass
class UnsupportedRendererException(Exception):
pass
class UnimplementedRendererException(Exception):
pass
| 36.331946
| 101
| 0.574882
|
5c7acf00d4b56f7d990ed6160e824fbd92f1a51e
| 2,202
|
py
|
Python
|
src/camera/test_run.py
|
jphacks/TK_1804
|
b71e5ee95ea60476758979845f3ebfd5a4355d41
|
[
"MIT"
] | 1
|
2018-11-19T14:46:38.000Z
|
2018-11-19T14:46:38.000Z
|
src/camera/test_run.py
|
jphacks/TK_1804
|
b71e5ee95ea60476758979845f3ebfd5a4355d41
|
[
"MIT"
] | 11
|
2018-10-27T09:31:41.000Z
|
2018-11-13T07:05:11.000Z
|
src/camera/test_run.py
|
jphacks/TK_1804
|
b71e5ee95ea60476758979845f3ebfd5a4355d41
|
[
"MIT"
] | 1
|
2021-08-10T04:41:55.000Z
|
2021-08-10T04:41:55.000Z
|
import numpy as np
from head_vector import HeadVector
from select_speakers import SelectSpeakers
if __name__ == '__main__':
face_landmark_path = './src/camera/shape_predictor_68_face_landmarks.dat'
K = [6.523417721418979909e+02, 0.0, 3.240992613348381610e+02,
0.0, 6.314784883620466189e+02, 2.369864861289960629e+02,
0.0, 0.0, 1.0]
D = [-4.425469845416301617e-01,4.114960065684757362e-01,5.860505097580077059e-03,3.197849383691316570e-03,-3.379210829526543836e-01]
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)
object_pts = np.float32([[6.825897, 6.760612, 4.402142],
[1.330353, 7.122144, 6.903745],
[-1.330353, 7.122144, 6.903745],
[-6.825897, 6.760612, 4.402142],
[5.311432, 5.485328, 3.987654],
[1.789930, 5.393625, 4.413414],
[-1.789930, 5.393625, 4.413414],
[-5.311432, 5.485328, 3.987654],
[2.005628, 1.409845, 6.165652],
[-2.005628, 1.409845, 6.165652],
[2.774015, -2.080775, 5.048531],
[-2.774015, -2.080775, 5.048531],
[0.000000, -3.116408, 6.097667],
[0.000000, -7.415691, 4.070434]])
reprojectsrc = np.float32([[10.0, 10.0, 10.0],
[10.0, 10.0, -10.0],
[10.0, -10.0, -10.0],
[10.0, -10.0, 10.0],
[-10.0, 10.0, 10.0],
[-10.0, 10.0, -10.0],
[-10.0, -10.0, -10.0],
[-10.0, -10.0, 10.0]])
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
[4, 5], [5, 6], [6, 7], [7, 4],
[0, 4], [1, 5], [2, 6], [3, 7]]
select_speaker = SelectSpeakers(K, D, object_pts, reprojectsrc, line_pairs, face_landmark_path)
while(True):
print(select_speaker.estimate_head_orientation(1))
| 44.04
| 136
| 0.470027
|
8e01f22d15dc38990736170d05f1bb4cb40ac4fe
| 3,432
|
py
|
Python
|
venv/lib/python3.8/site-packages/cairosvg/shapes.py
|
sakthipriya-07/BuildingConstructionMaterialsSupply
|
e4b32d97eb6e574e78b955a03a0717bc7b5d13d4
|
[
"MIT"
] | 1
|
2021-06-22T18:52:15.000Z
|
2021-06-22T18:52:15.000Z
|
venv/lib/python3.8/site-packages/cairosvg/shapes.py
|
sakthipriya-07/BuildingConstructionMaterialsSupply
|
e4b32d97eb6e574e78b955a03a0717bc7b5d13d4
|
[
"MIT"
] | 7
|
2021-03-05T23:08:02.000Z
|
2022-03-12T00:47:19.000Z
|
venv/lib/python3.8/site-packages/cairosvg/shapes.py
|
sakthipriya-07/BuildingConstructionMaterialsSupply
|
e4b32d97eb6e574e78b955a03a0717bc7b5d13d4
|
[
"MIT"
] | null | null | null |
"""
Shapes drawers.
"""
from math import pi
from .helpers import normalize, point, point_angle, size
def circle(surface, node):
"""Draw a circle ``node`` on ``surface``."""
r = size(surface, node.get('r'))
if not r:
return
cx = size(surface, node.get('cx'), 'x')
cy = size(surface, node.get('cy'), 'y')
surface.context.new_sub_path()
surface.context.arc(cx, cy, r, 0, 2 * pi)
def ellipse(surface, node):
"""Draw an ellipse ``node`` on ``surface``."""
rx = size(surface, node.get('rx'), 'x')
ry = size(surface, node.get('ry'), 'y')
if not rx or not ry:
return
cx = size(surface, node.get('cx'), 'x')
cy = size(surface, node.get('cy'), 'y')
ratio = ry / rx
surface.context.new_sub_path()
surface.context.save()
surface.context.scale(1, ratio)
surface.context.arc(cx, cy / ratio, rx, 0, 2 * pi)
surface.context.restore()
def line(surface, node):
"""Draw a line ``node``."""
x1, y1, x2, y2 = tuple(
size(surface, node.get(position), position[0])
for position in ('x1', 'y1', 'x2', 'y2'))
surface.context.move_to(x1, y1)
surface.context.line_to(x2, y2)
angle = point_angle(x1, y1, x2, y2)
node.vertices = [(x1, y1), (pi - angle, angle), (x2, y2)]
def polygon(surface, node):
"""Draw a polygon ``node`` on ``surface``."""
polyline(surface, node)
surface.context.close_path()
def polyline(surface, node):
"""Draw a polyline ``node``."""
points = normalize(node.get('points', ''))
if points:
x, y, points = point(surface, points)
surface.context.move_to(x, y)
node.vertices = [(x, y)]
while points:
x_old, y_old = x, y
x, y, points = point(surface, points)
angle = point_angle(x_old, y_old, x, y)
node.vertices.append((pi - angle, angle))
surface.context.line_to(x, y)
node.vertices.append((x, y))
def rect(surface, node):
"""Draw a rect ``node`` on ``surface``."""
x, y = size(surface, node.get('x'), 'x'), size(surface, node.get('y'), 'y')
width = size(surface, node.get('width'), 'x')
height = size(surface, node.get('height'), 'y')
rx = node.get('rx')
ry = node.get('ry')
if rx and ry is None:
ry = rx
elif ry and rx is None:
rx = ry
rx = size(surface, rx, 'x')
ry = size(surface, ry, 'y')
if rx == 0 or ry == 0:
surface.context.rectangle(x, y, width, height)
else:
if rx > width / 2:
rx = width / 2
if ry > height / 2:
ry = height / 2
# Inspired by Cairo Cookbook
# http://cairographics.org/cookbook/roundedrectangles/
ARC_TO_BEZIER = 4 * (2 ** .5 - 1) / 3
c1 = ARC_TO_BEZIER * rx
c2 = ARC_TO_BEZIER * ry
surface.context.new_path()
surface.context.move_to(x + rx, y)
surface.context.rel_line_to(width - 2 * rx, 0)
surface.context.rel_curve_to(c1, 0, rx, c2, rx, ry)
surface.context.rel_line_to(0, height - 2 * ry)
surface.context.rel_curve_to(0, c2, c1 - rx, ry, -rx, ry)
surface.context.rel_line_to(-width + 2 * rx, 0)
surface.context.rel_curve_to(-c1, 0, -rx, -c2, -rx, -ry)
surface.context.rel_line_to(0, -height + 2 * ry)
surface.context.rel_curve_to(0, -c2, rx - c1, -ry, rx, -ry)
surface.context.close_path()
| 30.642857
| 79
| 0.56148
|
4bd75cd3193f2dbc5fe51cf26004e965daf3b368
| 10,893
|
py
|
Python
|
lib-python/2.7/ctypes/test/test_byteswap.py
|
jeff5/jython-whinchat
|
65d8e5268189f8197295ff2d91be3decb1ee0081
|
[
"CNRI-Jython"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
BitmessageKit/Vendor/static-python/Lib/ctypes/test/test_byteswap.py
|
VoluntaryLabs/BitmessageKit
|
dd634977a629ab4dec184e12bb6324cc01149ba3
|
[
"MIT"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
BitmessageKit/Vendor/static-python/Lib/ctypes/test/test_byteswap.py
|
VoluntaryLabs/BitmessageKit
|
dd634977a629ab4dec184e12bb6324cc01149ba3
|
[
"MIT"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(memoryview(s)).upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
def X_test(self):
print >> sys.stderr, sys.byteorder
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
def test_endian_short(self):
if sys.byteorder == "little":
self.assertTrue(c_short.__ctype_le__ is c_short)
self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short)
else:
self.assertTrue(c_short.__ctype_be__ is c_short)
self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.assertTrue(c_int.__ctype_le__ is c_int)
self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int)
else:
self.assertTrue(c_int.__ctype_be__ is c_int)
self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertTrue(c_longlong.__ctype_le__ is c_longlong)
self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong)
else:
self.assertTrue(c_longlong.__ctype_be__ is c_longlong)
self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.assertTrue(c_float.__ctype_le__ is c_float)
self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float)
else:
self.assertTrue(c_float.__ctype_be__ is c_float)
self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, 6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertTrue(c_double.__ctype_le__ is c_double)
self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double)
else:
self.assertTrue(c_double.__ctype_be__ is c_double)
self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertTrue(c_byte.__ctype_le__ is c_byte)
self.assertTrue(c_byte.__ctype_be__ is c_byte)
self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte)
self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte)
self.assertTrue(c_char.__ctype_le__ is c_char)
self.assertTrue(c_char.__ctype_be__ is c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
def test_struct_struct(self):
# nested structures with different byteorders
# create nested structures with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianStructure,
LittleEndianStructure,
Structure,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
class TestStructure(parent):
_fields_ = [("point", NestedStructure)]
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
del ctypes._pointer_type_cache[TestStructure]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
if __name__ == "__main__":
unittest.main()
| 36.925424
| 88
| 0.570366
|
0ed9dd4406bfd784de808ec47d03d5c38021601b
| 1,249
|
py
|
Python
|
experiment_scripts/2-2-mujoco/2-2b.py
|
alexlioralexli/pytorch-a2c-ppo-acktr-gail
|
99ec24575ccfc53cd2f1942cf798ca0322a83539
|
[
"MIT"
] | null | null | null |
experiment_scripts/2-2-mujoco/2-2b.py
|
alexlioralexli/pytorch-a2c-ppo-acktr-gail
|
99ec24575ccfc53cd2f1942cf798ca0322a83539
|
[
"MIT"
] | null | null | null |
experiment_scripts/2-2-mujoco/2-2b.py
|
alexlioralexli/pytorch-a2c-ppo-acktr-gail
|
99ec24575ccfc53cd2f1942cf798ca0322a83539
|
[
"MIT"
] | null | null | null |
total = 0
envs = ['Ant-v2', 'Humanoid-v2', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']
for env in envs:
commands = []
base_command = f'python main.py --env-name {env} --algo ppo --use-gae --log-interval 1 --num-steps 2048 --num-processes 1 --lr 3e-4 --entropy-coef 0 --value-loss-coef 0.5 --ppo-epoch 10 --num-mini-batch 32 --gamma 0.99 --gae-lambda 0.95 --num-env-steps 1000000 --use-linear-lr-decay --use-proper-time-limits --hidden_dim 256'
for n_hidden in [1,2]:
commands.append(f'{base_command} --network_class MLP --n_hidden {n_hidden}')
for type in ['--train_B', '--concatenate_fourier --train_B']:
for n_hidden in [1]:
for fourier_dim in [256]:
for sigma in [0.03, 0.01, 0.003]:
commands.append(f'{base_command} --network_class FourierMLP --n_hidden {n_hidden} --sigma {sigma} --fourier_dim {fourier_dim} {type}')
count = 0
for command in commands:
# gpus = list(range(8,10))
gpus = list(range(10))
for seed in [10]:
if total % 8 == 0:
print(total)
total += 1
print(f'CUDA_VISIBLE_DEVICES={gpus[count]} {command} --seed {seed} &')
count = (count + 1) % len(gpus)
| 54.304348
| 329
| 0.588471
|
8c6db0af4734fde9b38ff154757b37beafa7543c
| 17,013
|
py
|
Python
|
asreview/data/base.py
|
asrodwin/asreview
|
28f2a4f93eed5b5ab1759a461650032c9dd471c9
|
[
"Apache-2.0"
] | null | null | null |
asreview/data/base.py
|
asrodwin/asreview
|
28f2a4f93eed5b5ab1759a461650032c9dd471c9
|
[
"Apache-2.0"
] | null | null | null |
asreview/data/base.py
|
asrodwin/asreview
|
28f2a4f93eed5b5ab1759a461650032c9dd471c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import pkg_resources
from pathlib import Path
from urllib.parse import urlparse
import numpy as np
import pandas as pd
from asreview.config import COLUMN_DEFINITIONS
from asreview.config import LABEL_NA
from asreview.exceptions import BadFileFormatError
from asreview.io.paper_record import PaperRecord
from asreview.io.ris_reader import write_ris
from asreview.io.utils import type_from_column
from asreview.io.utils import convert_keywords
from asreview.utils import is_iterable
from asreview.utils import is_url
class ASReviewData():
"""Data object to the dataset with texts, labels, DOIs etc.
Arguments
---------
df: pandas.DataFrame
Dataframe containing the data for the ASReview data object.
data_name: str
Give a name to the data object.
data_type: str
What kind of data the dataframe contains.
column_spec: dict
Specification for which column corresponds to which standard
specification. Key is the standard specification, key is which column
it is actually in.
"""
def __init__(self,
df=None,
data_name="empty",
data_type="standard",
column_spec=None):
self.df = df
self.data_name = data_name
self.prior_idx = np.array([], dtype=int)
if df is None:
self.column_spec = {}
return
self.max_idx = max(df.index.values) + 1
# Infer column specifications if it is not given.
if column_spec is None:
self.column_spec = {}
for col_name in list(df):
data_type = type_from_column(col_name, COLUMN_DEFINITIONS)
if data_type is not None:
self.column_spec[data_type] = col_name
else:
self.column_spec = column_spec
if "included" not in self.column_spec:
self.column_spec["included"] = "included"
if data_type == "included":
self.labels = np.ones(len(self), dtype=int)
if data_type == "excluded":
self.labels = np.zeros(len(self), dtype=int)
if data_type == "prior":
self.prior_idx = df.index.values
def __len__(self):
if self.df is None:
return 0
return len(self.df.index)
def hash(self):
"""Compute a hash from the dataset.
Returns
-------
str:
SHA1 hash, computed from the titles/abstracts of the dataframe.
"""
if ((len(self.df.index) < 1000 and self.bodies is not None) or
self.texts is None):
texts = " ".join(self.bodies)
else:
texts = " ".join(self.texts)
return hashlib.sha1(" ".join(texts).encode(
encoding='UTF-8', errors='ignore')).hexdigest()
def slice(self, idx, by_index=True):
"""Create a slice from itself.
Useful if some parts should be kept/thrown away.
Arguments
---------
idx: list, numpy.ndarray
Record ids that should be kept.
Returns
-------
ASReviewData
Slice of itself.
"""
if self.df is None:
raise ValueError("Cannot slice empty ASReviewData object.")
if by_index:
return ASReviewData(self.df.iloc[idx], data_name="sliced")
return ASReviewData(self.df.loc[idx, :], data_name="sliced")
def append(self, as_data):
"""Append another ASReviewData object.
It puts the training data at the end.
Arguments
---------
as_data: ASReviewData
Dataset to append.
"""
if as_data.df is None:
return
if len(self) == 0:
self.df = as_data.df
self.data_name = as_data.data_name
self.prior_idx = as_data.prior_idx
self.max_idx = as_data.max_idx
self.column_spec = as_data.column_spec
return
reindex_val = max(self.max_idx - min(as_data.df.index.values), 0)
new_index = np.append(self.df.index.values,
as_data.df.index.values + reindex_val)
new_priors = np.append(self.prior_idx, as_data.prior_idx + reindex_val)
new_df = self.df.append(as_data.df, sort=False)
new_df.index = new_index
new_labels = None
if self.labels is None and as_data.labels is not None:
new_labels = np.append(np.full(len(self), LABEL_NA, dtype=int),
as_data.labels)
elif self.labels is not None and as_data.labels is None:
new_labels = np.append(self.labels,
np.full(len(as_data), LABEL_NA, dtype=int))
self.max_idx = max(self.max_idx, as_data.max_idx, max(new_index))
self.df = new_df
if new_labels is not None:
self.labels = new_labels
self.prior_idx = new_priors
self.data_name += "_" + as_data.data_name
for data_type, col in as_data.column_spec.items():
if data_type in self.column_spec:
if self.column_spec[data_type] != col:
raise ValueError(
"Error merging dataframes: column specifications "
f"differ: {self.column_spec} vs {as_data.column_spec}")
else:
self.column_spec[data_type] = col
@classmethod
def from_file(cls, fp, read_fn=None, data_name=None, data_type=None):
"""Create instance from csv/ris/excel file.
It works in two ways; either manual control where the conversion
functions are supplied or automatic, where it searches in the entry
points for the right conversion functions.
Arguments
---------
fp: str, pathlib.Path
Read the data from this file.
read_fn: callable
Function to read the file. It should return a standardized
dataframe.
data_name: str
Name of the data.
data_type: str
What kind of data it is. Special names: 'included', 'excluded',
'prior'.
"""
if is_url(fp):
path = urlparse(fp).path
new_data_name = Path(path.split("/")[-1]).stem
else:
path = str(Path(fp).resolve())
new_data_name = Path(fp).stem
if data_name is None:
data_name = new_data_name
if read_fn is not None:
return cls(read_fn(fp), data_name=data_name, data_type=data_type)
entry_points = {
entry.name: entry
for entry in pkg_resources.iter_entry_points('asreview.readers')
}
best_suffix = None
for suffix, entry in entry_points.items():
if path.endswith(suffix):
if best_suffix is None or len(suffix) > len(best_suffix):
best_suffix = suffix
if best_suffix is None:
raise ValueError(f"Error reading file {fp}, no capabilities for "
"reading such a file.")
read_fn = entry_points[best_suffix].load()
df, column_spec = read_fn(fp)
return cls(df,
column_spec=column_spec,
data_name=data_name,
data_type=data_type)
def record(self, i, by_index=True):
"""Create a record from an index.
Arguments
---------
i: int, iterable
Index of the record, or list of indices.
by_index: bool
If True, take the i-th value as used internally by the review.
If False, take the record with record_id==i.
Returns
-------
PaperRecord
The corresponding record if i was an integer, or a list of records
if i was an iterable.
"""
if not is_iterable(i):
index_list = [i]
else:
index_list = i
if not by_index:
records = [
PaperRecord(**self.df.loc[j, :],
record_id=j,
column_spec=self.column_spec) for j in index_list
]
else:
records = [
PaperRecord(**self.df.iloc[j],
column_spec=self.column_spec,
record_id=self.df.index.values[j])
for j in index_list
]
if is_iterable(i):
return records
return records[0]
@property
def record_ids(self):
return self.df.index.values
@property
def texts(self):
if self.headings is None:
return self.bodies
if self.bodies is None:
return self.headings
cur_texts = np.array([
self.headings[i] + " " + self.bodies[i] for i in range(len(self))
], dtype=object)
return cur_texts
@property
def headings(self):
return self.title
@property
def title(self):
try:
return self.df[self.column_spec["title"]].values
except KeyError:
return None
@property
def bodies(self):
return self.abstract
@property
def abstract(self):
try:
return self.df[self.column_spec["abstract"]].values
except KeyError:
return None
@property
def keywords(self):
try:
return self.df[self.column_spec["keywords"]].apply(
convert_keywords).values
except KeyError:
return None
@property
def authors(self):
try:
return self.df[self.column_spec["authors"]].values
except KeyError:
return None
def get(self, name):
"Get column with name."
try:
return self.df[self.column_spec[name]].values
except KeyError:
return self.df[name].values
@property
def prior_data_idx(self):
"Get prior_included, prior_excluded from dataset."
convert_array = np.full(self.max_idx, 999999999)
convert_array[self.df.index.values] = np.arange(len(self.df.index))
return convert_array[self.prior_idx]
@property
def included(self):
return self.labels
@included.setter
def included(self, labels):
self.labels = labels
@property # pending deprecation
def final_included(self):
return self.labels
@final_included.setter # pending deprecation
def final_included(self, labels):
self.labels = labels
@property
def labels(self):
try:
column = self.column_spec["included"]
return self.df[column].values
except KeyError:
return None
@labels.setter
def labels(self, labels):
try:
column = self.column_spec["included"]
self.df[column] = labels
except KeyError:
self.df["included"] = labels
@property
def abstract_included(self):
return self.get("abstract_included")
@abstract_included.setter
def abstract_included(self, abstract_included):
try:
column = self.column_spec["abstract_included"]
self.df[column] = abstract_included
except KeyError:
self.df["abstract_included"] = abstract_included
def prior_labels(self, state, by_index=True):
"""Get the labels that are marked as 'initial'.
state: BaseState
Open state that contains the label information.
by_index: bool
If True, return internal indexing.
If False, return record_ids for indexing.
Returns
-------
numpy.ndarray
Array of indices that have the 'initial' property.
"""
query_src = state.startup_vals()["query_src"]
if "initial" not in query_src:
return np.array([], dtype=int)
if by_index:
return np.array(query_src["initial"], dtype=int)
return self.df.index.values[query_src["initial"]]
def to_file(self, fp, labels=None, ranking=None):
"""Export data object to file.
RIS, CSV and Excel are supported file formats at the moment.
Arguments
---------
fp: str
Filepath to export to.
labels: list, numpy.ndarray
Labels to be inserted into the dataframe before export.
ranking: list, numpy.ndarray
Optionally, dataframe rows can be reordered.
"""
if Path(fp).suffix in [".csv", ".CSV"]:
self.to_csv(fp, labels=labels, ranking=ranking)
elif Path(fp).suffix in [".ris", ".RIS"]:
self.to_ris(fp, labels=labels, ranking=ranking)
elif Path(fp).suffix in [".xlsx", ".XLSX"]:
self.to_excel(fp, labels=labels, ranking=ranking)
else:
raise BadFileFormatError(
f"Unknown file extension: {Path(fp).suffix}.\n"
f"from file {fp}")
def to_dataframe(self, labels=None, ranking=None):
"""Create new dataframe with updated label (order).
Arguments
---------
labels: list, numpy.ndarray
Current labels will be overwritten by these labels
(including unlabelled). No effect if labels is None.
ranking: list
Reorder the dataframe according to these record_ids.
Default ordering if ranking is None.
Returns
-------
pandas.DataFrame
Dataframe of all available record data.
"""
result_df = pd.DataFrame.copy(self.df)
col_label = self.column_spec["included"]
# if there are labels, add them to the frame
if labels is not None:
# unnest the nested (record_id, label) tuples
labeled_record_ids = [x[0] for x in labels]
labeled_values = [x[1] for x in labels]
# remove the old results and write the values
result_df[col_label] = LABEL_NA
result_df.loc[labeled_record_ids, col_label] = labeled_values
# if there is a ranking, apply this ranking as order
if ranking is not None:
# sort the datasets based on the ranking
result_df = result_df.loc[ranking]
# append a column with 1 to n
result_df["asreview_ranking"] = np.arange(1, len(result_df) + 1)
# replace labeled NA values by np.nan
if col_label in list(result_df):
result_df[col_label] = result_df[col_label].astype(object)
result_df.loc[result_df[col_label] == LABEL_NA, col_label] = np.nan
return result_df
def to_csv(self, fp, labels=None, ranking=None):
"""Export to csv.
Arguments
---------
fp: str, NoneType
Filepath or None for buffer.
labels: list, numpy.ndarray
Current labels will be overwritten by these labels
(including unlabelled). No effect if labels is None.
ranking: list
Reorder the dataframe according to these (internal) indices.
Default ordering if ranking is None.
Returns
-------
pandas.DataFrame
Dataframe of all available record data.
"""
df = self.to_dataframe(labels=labels, ranking=ranking)
return df.to_csv(fp, index=True)
def to_excel(self, fp, labels=None, ranking=None):
"""Export to Excel xlsx file.
Arguments
---------
fp: str, NoneType
Filepath or None for buffer.
labels: list, numpy.ndarray
Current labels will be overwritten by these labels
(including unlabelled). No effect if labels is None.
ranking: list
Reorder the dataframe according to these (internal) indices.
Default ordering if ranking is None.
Returns
-------
pandas.DataFrame
Dataframe of all available record data.
"""
df = self.to_dataframe(labels=labels, ranking=ranking)
return df.to_excel(fp, index=True)
def to_ris(self, ris_fp, labels=None, ranking=None):
df = self.to_dataframe(labels=labels, ranking=ranking)
write_ris(df, ris_fp)
| 32.654511
| 79
| 0.582848
|
bb23e05aa56d7be2a1adc8b9eb12bf9c07bf8c32
| 1,955
|
py
|
Python
|
addons14/hr_timesheet_task_stage/tests/test_hr_timesheet_task_stage.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/hr_timesheet_task_stage/tests/test_hr_timesheet_task_stage.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/hr_timesheet_task_stage/tests/test_hr_timesheet_task_stage.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
# Copyright 2016-2018 Tecnativa - Pedro M. Baeza
# Copyright 2019 Brainbean Apps (https://brainbeanapps.com)
# Copyright 2020 Tecnativa - Manuel Calero
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
class TestHrTimesheetTaskStage(common.TransactionCase):
def setUp(self):
super().setUp()
self.project = self.env["project.project"].create({"name": "Test project"})
self.analytic_account = self.project.analytic_account_id
self.task = self.env["project.task"].create(
{"name": "Test task", "project_id": self.project.id}
)
task_type_obj = self.env["project.task.type"]
self.stage_open = task_type_obj.create(
{
"name": "New",
"is_closed": False,
"project_ids": [(6, 0, self.project.ids)],
}
)
self.stage_close = task_type_obj.create(
{
"name": "Done",
"is_closed": True,
"project_ids": [(6, 0, self.project.ids)],
}
)
self.line = self.env["account.analytic.line"].create(
{
"task_id": self.task.id,
"account_id": self.analytic_account.id,
"name": "Test line",
}
)
def test_open_close_task(self):
self.line.action_close_task()
self.assertEqual(self.line.task_id.stage_id, self.stage_close)
self.line.action_open_task()
self.assertEqual(self.line.task_id.stage_id, self.stage_open)
def test_toggle_task_stage(self):
self.line.action_toggle_task_stage()
self.assertTrue(self.line.task_id.stage_id.is_closed)
self.assertTrue(self.line.is_task_closed)
self.line.action_toggle_task_stage()
self.assertFalse(self.line.task_id.stage_id.is_closed)
self.assertFalse(self.line.is_task_closed)
| 36.203704
| 83
| 0.600512
|
7337d11312aef143f6f5d3c769898325e79c56cd
| 166
|
py
|
Python
|
app/urls.py
|
abdukhashimov/great-rest-api
|
a57455d22b7ba7d06945889aed5dd89550292dae
|
[
"MIT"
] | null | null | null |
app/urls.py
|
abdukhashimov/great-rest-api
|
a57455d22b7ba7d06945889aed5dd89550292dae
|
[
"MIT"
] | null | null | null |
app/urls.py
|
abdukhashimov/great-rest-api
|
a57455d22b7ba7d06945889aed5dd89550292dae
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('info.urls'))
]
| 20.75
| 38
| 0.686747
|
980a17b5c06197a22cdc362296da136e65394aa3
| 833
|
py
|
Python
|
ws2122-lspm/Lib/site-packages/pm4py/objects/conversion/process_tree/variants/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-19T04:02:46.000Z
|
2022-01-19T04:02:46.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/objects/conversion/process_tree/variants/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2021-11-19T07:21:48.000Z
|
2021-11-19T07:21:48.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/objects/conversion/process_tree/variants/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-14T17:15:38.000Z
|
2022-01-14T17:15:38.000Z
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.objects.conversion.process_tree.variants import to_petri_net, to_petri_net_transition_bordered, to_bpmn
| 46.277778
| 114
| 0.751501
|
d5929a22a0c7002168aeaa195878ebf4570e53be
| 663
|
py
|
Python
|
rstream/amqp.py
|
qweeze/rstream
|
5ae747af11275285fde1aa48e8823890fae4dd1b
|
[
"MIT"
] | 16
|
2021-08-16T10:42:53.000Z
|
2022-03-30T18:35:41.000Z
|
rstream/amqp.py
|
qweeze/rstream
|
5ae747af11275285fde1aa48e8823890fae4dd1b
|
[
"MIT"
] | 20
|
2021-09-22T08:12:52.000Z
|
2022-03-28T07:11:37.000Z
|
rstream/amqp.py
|
qweeze/rstream
|
5ae747af11275285fde1aa48e8823890fae4dd1b
|
[
"MIT"
] | 4
|
2021-08-16T07:34:05.000Z
|
2022-03-21T13:42:24.000Z
|
from __future__ import annotations
from typing import Any, Optional, Protocol, cast
import proton
class _MessageProtocol(Protocol):
publishing_id: Optional[int] = None
def __bytes__(self) -> bytes:
...
class AMQPMessage(proton.Message, _MessageProtocol): # type:ignore
def __init__(self, *args: Any, publishing_id: Optional[int] = None, **kwargs: Any):
self.publishing_id = publishing_id
super().__init__(*args, **kwargs)
def __bytes__(self) -> bytes:
return cast(bytes, self.encode())
def amqp_decoder(data: bytes) -> AMQPMessage:
message = AMQPMessage()
message.decode(data)
return message
| 23.678571
| 87
| 0.687783
|
1a75449b1d2ad8496090fb3edd69a0b238a235c3
| 6,503
|
py
|
Python
|
recon.py
|
wate123/face-recognition
|
22c2ac6074f459d9448d0c08ea59171cb5487325
|
[
"Apache-2.0"
] | null | null | null |
recon.py
|
wate123/face-recognition
|
22c2ac6074f459d9448d0c08ea59171cb5487325
|
[
"Apache-2.0"
] | null | null | null |
recon.py
|
wate123/face-recognition
|
22c2ac6074f459d9448d0c08ea59171cb5487325
|
[
"Apache-2.0"
] | null | null | null |
import os
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Layer
from model import create_model, TripletLossLayer
import numpy as np
import os.path
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from align import AlignDlib
from train import train_model
from data import triplet_generator
from sklearn.metrics import f1_score, accuracy_score
from utils import load_metadata, load_image, download_landmarks
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
import warnings
dst_dir = 'models'
dst_file = os.path.join(dst_dir, 'landmarks.dat')
if not os.path.exists(dst_file):
os.makedirs(dst_dir)
download_landmarks(dst_file)
nn4_small2_train = create_model()
nn4_small2_train.load_weights('weights/nn4.small2.v1.h5')
# try:
# open('weights/nn4.small2.myTrain.h5', 'r')
# nn4_small2_train.load_weights('weights/nn4.small2.myTrain.h5')
# except FileNotFoundError:
#
# nn4_small2_train = train_model()
metadata = load_metadata('images')
# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')
# Load an image of Schwarzenegger
jc_orig = load_image(metadata[92].image_path())
# Detect face and return bounding box
bb = alignment.getLargestFaceBoundingBox(jc_orig)
# Transform image using specified face landmark indices and crop image to 96x96
jc_aligned = alignment.align(96, jc_orig, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# Show original image
plt.subplot(131)
plt.imshow(jc_orig)
# Show original image with bounding box
plt.subplot(132)
plt.imshow(jc_orig)
plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red'))
# Show aligned image
plt.subplot(133)
plt.imshow(jc_aligned)
plt.show()
def align_image(img):
return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
embedded = np.zeros((metadata.shape[0], 128))
for i, m in enumerate(metadata):
img = load_image(m.image_path())
img = align_image(img)
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
embedded[i] = nn4_small2_train.predict(np.expand_dims(img, axis=0))[0]
# Verify
def distance(emb1, emb2):
return np.sum(np.square(emb1 - emb2))
def show_pair(idx1, idx2):
plt.figure(figsize=(8,3))
plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')
plt.subplot(121)
plt.imshow(load_image(metadata[idx1].image_path()))
plt.subplot(122)
plt.imshow(load_image(metadata[idx2].image_path()));
show_pair(94, 95)
show_pair(94, 89)
plt.show()
distances = [] # squared L2 distance between pairs
identical = [] # 1 if same identity, 0 otherwise
num = len(metadata)
for i in range(num - 1):
for j in range(1, num):
distances.append(distance(embedded[i], embedded[j]))
identical.append(1 if metadata[i].name == metadata[j].name else 0)
distances = np.array(distances)
identical = np.array(identical)
thresholds = np.arange(0.3, 1.0, 0.01)
f1_scores = [f1_score(identical, distances < t) for t in thresholds]
acc_scores = [accuracy_score(identical, distances < t) for t in thresholds]
opt_idx = np.argmax(f1_scores)
# Threshold at maximal F1 score
opt_tau = thresholds[opt_idx]
# Accuracy at maximal F1 score
opt_acc = accuracy_score(identical, distances < opt_tau)
# Plot F1 score and accuracy as function of distance threshold
plt.plot(thresholds, f1_scores, label='F1 score')
plt.plot(thresholds, acc_scores, label='Accuracy')
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title(f'Accuracy at threshold {opt_tau:.2f} = {opt_acc:.3f}');
plt.xlabel('Distance threshold')
plt.legend()
dist_pos = distances[identical == 1]
dist_neg = distances[identical == 0]
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.hist(dist_pos)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (pos. pairs)')
plt.legend()
plt.subplot(122)
plt.hist(dist_neg)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (neg. pairs)')
plt.legend()
dist_pos = distances[identical == 1]
dist_neg = distances[identical == 0]
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.hist(dist_pos)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (pos. pairs)')
plt.legend()
plt.subplot(122)
plt.hist(dist_neg)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (neg. pairs)')
plt.legend()
plt.show()
targets = np.array([m.name for m in metadata])
encoder = LabelEncoder()
encoder.fit(targets)
# Numerical encoding of identities
y = encoder.transform(targets)
train_idx = np.arange(metadata.shape[0]) % 2 != 0
test_idx = np.arange(metadata.shape[0]) % 2 == 0
# 50 train examples of 10 identities (5 examples each)
X_train = embedded[train_idx]
# 50 test examples of 10 identities (5 examples each)
X_test = embedded[test_idx]
y_train = y[train_idx]
y_test = y[test_idx]
knn = KNeighborsClassifier(n_neighbors=1, metric='euclidean')
svc = LinearSVC()
knn.fit(X_train, y_train)
svc.fit(X_train, y_train)
acc_knn = accuracy_score(y_test, knn.predict(X_test))
acc_svc = accuracy_score(y_test, svc.predict(X_test))
print(f'KNN accuracy = {acc_knn}, SVM accuracy = {acc_svc}')
# Suppress LabelEncoder warning
warnings.filterwarnings('ignore')
example_idx = 23
# example_image = load_image('test/220px-Arnold_Schwarzenegger_September_2017.jpg')
example_image = load_image(metadata[test_idx][example_idx].image_path())
bb = alignment.getLargestFaceBoundingBox(example_image)
example_prediction = svc.predict([embedded[test_idx][example_idx]])
example_identity = encoder.inverse_transform(example_prediction)[0]
print(example_identity)
plt.imshow(example_image)
plt.title(f'Recognized as {example_identity} using SVM')
plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red'))
plt.show()
from sklearn.manifold import TSNE
X_embedded = TSNE(n_components=2).fit_transform(embedded)
for i, t in enumerate(set(targets)):
idx = targets == t
plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t)
plt.legend(bbox_to_anchor=(1, 1))
plt.show()
| 28.52193
| 111
| 0.743657
|
00d5cd58f57e0cc22d7a4aa97cdf5d655386a6dc
| 629
|
py
|
Python
|
foody/__init__.py
|
AdrienW97/foody
|
1393f79ef06ee8c50470adb8aa2ab9fe49be4399
|
[
"WTFPL"
] | null | null | null |
foody/__init__.py
|
AdrienW97/foody
|
1393f79ef06ee8c50470adb8aa2ab9fe49be4399
|
[
"WTFPL"
] | null | null | null |
foody/__init__.py
|
AdrienW97/foody
|
1393f79ef06ee8c50470adb8aa2ab9fe49be4399
|
[
"WTFPL"
] | null | null | null |
import os
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, current_user
from flask_login import logout_user, login_required
app = Flask(__name__)
app.config['SECRET_KEY'] = "hard-to-guess-string"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
#setup database
db = SQLAlchemy(app)
#LoginManager + Bcrypt for hashing passwords
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = "login"
login_manager.login_message_category = 'info' #add better style
from foody import routes
| 29.952381
| 73
| 0.807631
|
c9a41777bdabb747c89d1960c7a992f934c9a46c
| 69,507
|
py
|
Python
|
qiskit/dagcircuit/dagcircuit.py
|
TakahitoMotoki/qiskit-terra
|
531e62f3a3c218fee6db116f54ed41ce4e88d9a9
|
[
"Apache-2.0"
] | null | null | null |
qiskit/dagcircuit/dagcircuit.py
|
TakahitoMotoki/qiskit-terra
|
531e62f3a3c218fee6db116f54ed41ce4e88d9a9
|
[
"Apache-2.0"
] | null | null | null |
qiskit/dagcircuit/dagcircuit.py
|
TakahitoMotoki/qiskit-terra
|
531e62f3a3c218fee6db116f54ed41ce4e88d9a9
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Object to represent a quantum circuit as a directed acyclic graph (DAG).
The nodes in the graph are either input/output nodes or operation nodes.
The edges correspond to qubits or bits in the circuit. A directed edge
from node A to node B means that the (qu)bit passes from the output of A
to the input of B. The object's methods allow circuits to be constructed,
composed, and modified. Some natural properties like depth can be computed
directly from the graph.
"""
from collections import OrderedDict, defaultdict
import copy
import itertools
import math
import numpy as np
import retworkx as rx
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.quantumregister import QuantumRegister, Qubit
from qiskit.circuit.classicalregister import ClassicalRegister, Clbit
from qiskit.circuit.gate import Gate
from qiskit.circuit.parameterexpression import ParameterExpression
from qiskit.dagcircuit.exceptions import DAGCircuitError
from qiskit.dagcircuit.dagnode import DAGNode, DAGOpNode, DAGInNode, DAGOutNode
from qiskit.utils import optionals as _optionals
class DAGCircuit:
"""
Quantum circuit as a directed acyclic graph.
There are 3 types of nodes in the graph: inputs, outputs, and operations.
The nodes are connected by directed edges that correspond to qubits and
bits.
"""
# pylint: disable=invalid-name
def __init__(self):
"""Create an empty circuit."""
# Circuit name. Generally, this corresponds to the name
# of the QuantumCircuit from which the DAG was generated.
self.name = None
# Circuit metadata
self.metadata = None
# Set of wires (Register,idx) in the dag
self._wires = set()
# Map from wire (Register,idx) to input nodes of the graph
self.input_map = OrderedDict()
# Map from wire (Register,idx) to output nodes of the graph
self.output_map = OrderedDict()
# Directed multigraph whose nodes are inputs, outputs, or operations.
# Operation nodes have equal in- and out-degrees and carry
# additional data about the operation, including the argument order
# and parameter values.
# Input nodes have out-degree 1 and output nodes have in-degree 1.
# Edges carry wire labels (reg,idx) and each operation has
# corresponding in- and out-edges with the same wire labels.
self._multi_graph = rx.PyDAG()
# Map of qreg/creg name to Register object.
self.qregs = OrderedDict()
self.cregs = OrderedDict()
# List of Qubit/Clbit wires that the DAG acts on.
self.qubits = []
self.clbits = []
self._global_phase = 0
self._calibrations = defaultdict(dict)
self._op_names = {}
self.duration = None
self.unit = "dt"
@_optionals.HAS_NETWORKX.require_in_call
def to_networkx(self):
"""Returns a copy of the DAGCircuit in networkx format."""
import networkx as nx
G = nx.MultiDiGraph()
for node in self._multi_graph.nodes():
G.add_node(node)
for node_id in rx.topological_sort(self._multi_graph):
for source_id, dest_id, edge in self._multi_graph.in_edges(node_id):
G.add_edge(self._multi_graph[source_id], self._multi_graph[dest_id], wire=edge)
return G
@classmethod
@_optionals.HAS_NETWORKX.require_in_call
def from_networkx(cls, graph):
"""Take a networkx MultiDigraph and create a new DAGCircuit.
Args:
graph (networkx.MultiDiGraph): The graph to create a DAGCircuit
object from. The format of this MultiDiGraph format must be
in the same format as returned by to_networkx.
Returns:
DAGCircuit: The dagcircuit object created from the networkx
MultiDiGraph.
Raises:
MissingOptionalLibraryError: If networkx is not installed
DAGCircuitError: If input networkx graph is malformed
"""
import networkx as nx
dag = DAGCircuit()
for node in nx.topological_sort(graph):
if isinstance(node, DAGOutNode):
continue
if isinstance(node, DAGInNode):
if isinstance(node.wire, Qubit):
dag.add_qubits([node.wire])
elif isinstance(node.wire, Clbit):
dag.add_clbits([node.wire])
else:
raise DAGCircuitError(f"unknown node wire type: {node.wire}")
elif isinstance(node, DAGOpNode):
dag.apply_operation_back(node.op.copy(), node.qargs, node.cargs)
return dag
@property
def wires(self):
"""Return a list of the wires in order."""
return self.qubits + self.clbits
@property
def node_counter(self):
"""
Returns the number of nodes in the dag.
"""
return len(self._multi_graph)
@property
def global_phase(self):
"""Return the global phase of the circuit."""
return self._global_phase
@global_phase.setter
def global_phase(self, angle):
"""Set the global phase of the circuit.
Args:
angle (float, ParameterExpression)
"""
if isinstance(angle, ParameterExpression):
self._global_phase = angle
else:
# Set the phase to the [0, 2π) interval
angle = float(angle)
if not angle:
self._global_phase = 0
else:
self._global_phase = angle % (2 * math.pi)
@property
def calibrations(self):
"""Return calibration dictionary.
The custom pulse definition of a given gate is of the form
{'gate_name': {(qubits, params): schedule}}
"""
return dict(self._calibrations)
@calibrations.setter
def calibrations(self, calibrations):
"""Set the circuit calibration data from a dictionary of calibration definition.
Args:
calibrations (dict): A dictionary of input in the format
{'gate_name': {(qubits, gate_params): schedule}}
"""
self._calibrations = defaultdict(dict, calibrations)
def add_calibration(self, gate, qubits, schedule, params=None):
"""Register a low-level, custom pulse definition for the given gate.
Args:
gate (Union[Gate, str]): Gate information.
qubits (Union[int, Tuple[int]]): List of qubits to be measured.
schedule (Schedule): Schedule information.
params (Optional[List[Union[float, Parameter]]]): A list of parameters.
Raises:
Exception: if the gate is of type string and params is None.
"""
if isinstance(gate, Gate):
self._calibrations[gate.name][
(tuple(qubits), tuple(float(p) for p in gate.params))
] = schedule
else:
self._calibrations[gate][(tuple(qubits), tuple(params or []))] = schedule
def has_calibration_for(self, node):
"""Return True if the dag has a calibration defined for the node operation. In this
case, the operation does not need to be translated to the device basis.
"""
if not self.calibrations or node.op.name not in self.calibrations:
return False
qubits = tuple(self.qubits.index(qubit) for qubit in node.qargs)
params = []
for p in node.op.params:
if isinstance(p, ParameterExpression) and not p.parameters:
params.append(float(p))
else:
params.append(p)
params = tuple(params)
return (qubits, params) in self.calibrations[node.op.name]
def remove_all_ops_named(self, opname):
"""Remove all operation nodes with the given name."""
for n in self.named_nodes(opname):
self.remove_op_node(n)
def add_qubits(self, qubits):
"""Add individual qubit wires."""
if any(not isinstance(qubit, Qubit) for qubit in qubits):
raise DAGCircuitError("not a Qubit instance.")
duplicate_qubits = set(self.qubits).intersection(qubits)
if duplicate_qubits:
raise DAGCircuitError("duplicate qubits %s" % duplicate_qubits)
self.qubits.extend(qubits)
for qubit in qubits:
self._add_wire(qubit)
def add_clbits(self, clbits):
"""Add individual clbit wires."""
if any(not isinstance(clbit, Clbit) for clbit in clbits):
raise DAGCircuitError("not a Clbit instance.")
duplicate_clbits = set(self.clbits).intersection(clbits)
if duplicate_clbits:
raise DAGCircuitError("duplicate clbits %s" % duplicate_clbits)
self.clbits.extend(clbits)
for clbit in clbits:
self._add_wire(clbit)
def add_qreg(self, qreg):
"""Add all wires in a quantum register."""
if not isinstance(qreg, QuantumRegister):
raise DAGCircuitError("not a QuantumRegister instance.")
if qreg.name in self.qregs:
raise DAGCircuitError("duplicate register %s" % qreg.name)
self.qregs[qreg.name] = qreg
existing_qubits = set(self.qubits)
for j in range(qreg.size):
if qreg[j] not in existing_qubits:
self.qubits.append(qreg[j])
self._add_wire(qreg[j])
def add_creg(self, creg):
"""Add all wires in a classical register."""
if not isinstance(creg, ClassicalRegister):
raise DAGCircuitError("not a ClassicalRegister instance.")
if creg.name in self.cregs:
raise DAGCircuitError("duplicate register %s" % creg.name)
self.cregs[creg.name] = creg
existing_clbits = set(self.clbits)
for j in range(creg.size):
if creg[j] not in existing_clbits:
self.clbits.append(creg[j])
self._add_wire(creg[j])
def _add_wire(self, wire):
"""Add a qubit or bit to the circuit.
Args:
wire (Bit): the wire to be added
This adds a pair of in and out nodes connected by an edge.
Raises:
DAGCircuitError: if trying to add duplicate wire
"""
if wire not in self._wires:
self._wires.add(wire)
inp_node = DAGInNode(wire=wire)
outp_node = DAGOutNode(wire=wire)
input_map_id, output_map_id = self._multi_graph.add_nodes_from([inp_node, outp_node])
inp_node._node_id = input_map_id
outp_node._node_id = output_map_id
self.input_map[wire] = inp_node
self.output_map[wire] = outp_node
self._multi_graph.add_edge(inp_node._node_id, outp_node._node_id, wire)
else:
raise DAGCircuitError(f"duplicate wire {wire}")
def remove_clbits(self, *clbits):
"""
Remove classical bits from the circuit. All bits MUST be idle.
Any registers with references to at least one of the specified bits will
also be removed.
Args:
clbits (List[Clbit]): The bits to remove.
Raises:
DAGCircuitError: a clbit is not a :obj:`.Clbit`, is not in the circuit,
or is not idle.
"""
if any(not isinstance(clbit, Clbit) for clbit in clbits):
raise DAGCircuitError(
"clbits not of type Clbit: %s" % [b for b in clbits if not isinstance(b, Clbit)]
)
clbits = set(clbits)
unknown_clbits = clbits.difference(self.clbits)
if unknown_clbits:
raise DAGCircuitError("clbits not in circuit: %s" % unknown_clbits)
busy_clbits = {bit for bit in clbits if not self._is_wire_idle(bit)}
if busy_clbits:
raise DAGCircuitError("clbits not idle: %s" % busy_clbits)
# remove any references to bits
cregs_to_remove = {creg for creg in self.cregs.values() if not clbits.isdisjoint(creg)}
self.remove_cregs(*cregs_to_remove)
for clbit in clbits:
self._remove_idle_wire(clbit)
self.clbits.remove(clbit)
def remove_cregs(self, *cregs):
"""
Remove classical registers from the circuit, leaving underlying bits
in place.
Raises:
DAGCircuitError: a creg is not a ClassicalRegister, or is not in
the circuit.
"""
if any(not isinstance(creg, ClassicalRegister) for creg in cregs):
raise DAGCircuitError(
"cregs not of type ClassicalRegister: %s"
% [r for r in cregs if not isinstance(r, ClassicalRegister)]
)
unknown_cregs = set(cregs).difference(self.cregs.values())
if unknown_cregs:
raise DAGCircuitError("cregs not in circuit: %s" % unknown_cregs)
for creg in cregs:
del self.cregs[creg.name]
def _is_wire_idle(self, wire):
"""Check if a wire is idle.
Args:
wire (Bit): a wire in the circuit.
Returns:
bool: true if the wire is idle, false otherwise.
Raises:
DAGCircuitError: the wire is not in the circuit.
"""
if wire not in self._wires:
raise DAGCircuitError("wire %s not in circuit" % wire)
try:
child = next(self.successors(self.input_map[wire]))
except StopIteration as e:
raise DAGCircuitError(
"Invalid dagcircuit input node %s has no output" % self.input_map[wire]
) from e
return child is self.output_map[wire]
def _remove_idle_wire(self, wire):
"""Remove an idle qubit or bit from the circuit.
Args:
wire (Bit): the wire to be removed, which MUST be idle.
"""
inp_node = self.input_map[wire]
oup_node = self.output_map[wire]
self._multi_graph.remove_node(inp_node._node_id)
self._multi_graph.remove_node(oup_node._node_id)
self._wires.remove(wire)
del self.input_map[wire]
del self.output_map[wire]
def _check_condition(self, name, condition):
"""Verify that the condition is valid.
Args:
name (string): used for error reporting
condition (tuple or None): a condition tuple (ClassicalRegister, int) or (Clbit, bool)
Raises:
DAGCircuitError: if conditioning on an invalid register
"""
if (
condition is not None
and condition[0] not in self.clbits
and condition[0].name not in self.cregs
):
raise DAGCircuitError("invalid creg in condition for %s" % name)
def _check_bits(self, args, amap):
"""Check the values of a list of (qu)bit arguments.
For each element of args, check that amap contains it.
Args:
args (list[Bit]): the elements to be checked
amap (dict): a dictionary keyed on Qubits/Clbits
Raises:
DAGCircuitError: if a qubit is not contained in amap
"""
# Check for each wire
for wire in args:
if wire not in amap:
raise DAGCircuitError(f"(qu)bit {wire} not found in {amap}")
def _bits_in_condition(self, cond):
"""Return a list of bits in the given condition.
Args:
cond (tuple or None): optional condition (ClassicalRegister, int) or (Clbit, bool)
Returns:
list[Clbit]: list of classical bits
Raises:
CircuitError: if cond[0] is not ClassicalRegister or Clbit
"""
if cond is None:
return []
elif isinstance(cond[0], ClassicalRegister):
# Returns a list of all the cbits in the given creg cond[0].
return cond[0][:]
elif isinstance(cond[0], Clbit):
# Returns a singleton list of the conditional cbit.
return [cond[0]]
else:
raise CircuitError("Condition must be used with ClassicalRegister or Clbit.")
def _increment_op(self, op):
if op.name in self._op_names:
self._op_names[op.name] += 1
else:
self._op_names[op.name] = 1
def _decrement_op(self, op):
if self._op_names[op.name] == 1:
del self._op_names[op.name]
else:
self._op_names[op.name] -= 1
def _add_op_node(self, op, qargs, cargs):
"""Add a new operation node to the graph and assign properties.
Args:
op (qiskit.circuit.Instruction): the operation associated with the DAG node
qargs (list[Qubit]): list of quantum wires to attach to.
cargs (list[Clbit]): list of classical wires to attach to.
Returns:
int: The integer node index for the new op node on the DAG
"""
# Add a new operation node to the graph
new_node = DAGOpNode(op=op, qargs=qargs, cargs=cargs)
node_index = self._multi_graph.add_node(new_node)
new_node._node_id = node_index
self._increment_op(op)
return node_index
def _copy_circuit_metadata(self):
"""Return a copy of source_dag with metadata but empty."""
target_dag = DAGCircuit()
target_dag.name = self.name
target_dag._global_phase = self._global_phase
target_dag.duration = self.duration
target_dag.unit = self.unit
target_dag.metadata = self.metadata
target_dag.add_qubits(self.qubits)
target_dag.add_clbits(self.clbits)
for qreg in self.qregs.values():
target_dag.add_qreg(qreg)
for creg in self.cregs.values():
target_dag.add_creg(creg)
return target_dag
def apply_operation_back(self, op, qargs=None, cargs=None):
"""Apply an operation to the output of the circuit.
Args:
op (qiskit.circuit.Instruction): the operation associated with the DAG node
qargs (list[Qubit]): qubits that op will be applied to
cargs (list[Clbit]): cbits that op will be applied to
Returns:
DAGOpNode: the node for the op that was added to the dag
Raises:
DAGCircuitError: if a leaf node is connected to multiple outputs
"""
qargs = qargs or []
cargs = cargs or []
all_cbits = self._bits_in_condition(op.condition)
all_cbits = set(all_cbits).union(cargs)
self._check_condition(op.name, op.condition)
self._check_bits(qargs, self.output_map)
self._check_bits(all_cbits, self.output_map)
node_index = self._add_op_node(op, qargs, cargs)
# Add new in-edges from predecessors of the output nodes to the
# operation node while deleting the old in-edges of the output nodes
# and adding new edges from the operation node to each output node
al = [qargs, all_cbits]
self._multi_graph.insert_node_on_in_edges_multiple(
node_index, [self.output_map[q]._node_id for q in itertools.chain(*al)]
)
return self._multi_graph[node_index]
def apply_operation_front(self, op, qargs, cargs):
"""Apply an operation to the input of the circuit.
Args:
op (qiskit.circuit.Instruction): the operation associated with the DAG node
qargs (list[Qubit]): qubits that op will be applied to
cargs (list[Clbit]): cbits that op will be applied to
Returns:
DAGOpNode: the node for the op that was added to the dag
Raises:
DAGCircuitError: if initial nodes connected to multiple out edges
"""
all_cbits = self._bits_in_condition(op.condition)
all_cbits.extend(cargs)
self._check_condition(op.name, op.condition)
self._check_bits(qargs, self.input_map)
self._check_bits(all_cbits, self.input_map)
node_index = self._add_op_node(op, qargs, cargs)
# Add new out-edges to successors of the input nodes from the
# operation node while deleting the old out-edges of the input nodes
# and adding new edges to the operation node from each input node
al = [qargs, all_cbits]
self._multi_graph.insert_node_on_out_edges_multiple(
node_index, [self.input_map[q]._node_id for q in itertools.chain(*al)]
)
return self._multi_graph[node_index]
def _check_edgemap_registers(self, inbound_wires, inbound_regs):
"""Check that wiremap neither fragments nor leaves duplicate registers.
1. There are no fragmented registers. A register in keyregs
is fragmented if not all of its (qu)bits are renamed by edge_map.
2. There are no duplicate registers. A register is duplicate if
it appears in both self and keyregs but not in edge_map.
Args:
inbound_wires (list): a list of wires being mapped from the inbound dag
inbound_regs (list): a list from registers from the inbound dag
Returns:
set(Register): the set of regs to add to self
Raises:
DAGCircuitError: if the wiremap fragments, or duplicates exist
"""
add_regs = set()
reg_frag_chk = {}
for inbound_reg in inbound_regs:
reg_frag_chk[inbound_reg] = {reg_bit: False for reg_bit in inbound_reg}
for inbound_bit in inbound_wires:
for inbound_reg in inbound_regs:
if inbound_bit in inbound_reg:
reg_frag_chk[inbound_reg][inbound_bit] = True
break
for inbound_reg, v in reg_frag_chk.items():
s = set(v.values())
if len(s) == 2:
raise DAGCircuitError("inbound_wires fragments reg %s" % inbound_reg)
if s == {False}:
if inbound_reg.name in self.qregs or inbound_reg.name in self.cregs:
raise DAGCircuitError("unmapped duplicate reg %s" % inbound_reg)
# Add registers that appear only in inbound_regs
add_regs.add(inbound_reg)
return add_regs
def _check_wiremap_validity(self, wire_map, keymap, valmap):
"""Check that the wiremap is consistent.
Check that the wiremap refers to valid wires and that
those wires have consistent types.
Args:
wire_map (dict): map from Bit in keymap to Bit in valmap
keymap (list): a list of wire_map keys
valmap (dict): a map whose keys are wire_map values
Raises:
DAGCircuitError: if wire_map not valid
"""
for k, v in wire_map.items():
if k not in keymap:
raise DAGCircuitError("invalid wire mapping key %s" % k)
if v not in valmap:
raise DAGCircuitError("invalid wire mapping value %s" % v)
# TODO Support mapping from AncillaQubit to Qubit, since AncillaQubits are mapped to
# Qubits upon being converted to an Instruction. Until this translation is fixed
# and Instructions have a concept of ancilla qubits, this fix is required.
if not (isinstance(k, type(v)) or isinstance(v, type(k))):
raise DAGCircuitError(f"inconsistent wire_map at ({k},{v})")
@staticmethod
def _map_condition(wire_map, condition, target_cregs):
"""Use the wire_map dict to change the condition tuple's creg name.
Args:
wire_map (dict): a map from source wires to destination wires
condition (tuple or None): (ClassicalRegister,int)
target_cregs (list[ClassicalRegister]): List of all cregs in the
target circuit onto which the condition might possibly be mapped.
Returns:
tuple(ClassicalRegister,int): new condition
Raises:
DAGCircuitError: if condition register not in wire_map, or if
wire_map maps condition onto more than one creg, or if the
specified condition is not present in a classical register.
"""
if condition is None:
new_condition = None
else:
# if there is a condition, map the condition bits to the
# composed cregs based on the wire_map
is_reg = False
if isinstance(condition[0], Clbit):
cond_creg = [condition[0]]
else:
cond_creg = condition[0]
is_reg = True
cond_val = condition[1]
new_cond_val = 0
new_creg = None
bits_in_condcreg = [bit for bit in wire_map if bit in cond_creg]
for bit in bits_in_condcreg:
if is_reg:
try:
candidate_creg = next(
creg for creg in target_cregs if wire_map[bit] in creg
)
except StopIteration as ex:
raise DAGCircuitError(
"Did not find creg containing mapped clbit in conditional."
) from ex
else:
# If cond is on a single Clbit then the candidate_creg is
# the target Clbit to which 'bit' is mapped to.
candidate_creg = wire_map[bit]
if new_creg is None:
new_creg = candidate_creg
elif new_creg != candidate_creg:
# Raise if wire_map maps condition creg on to more than one
# creg in target DAG.
raise DAGCircuitError(
"wire_map maps conditional register onto more than one creg."
)
if not is_reg:
# If the cond is on a single Clbit then the new_cond_val is the
# same as the cond_val since the new_creg is also a single Clbit.
new_cond_val = cond_val
elif 2 ** (cond_creg[:].index(bit)) & cond_val:
# If the conditional values of the Clbit 'bit' is 1 then the new_cond_val
# is updated such that the conditional value of the Clbit to which 'bit'
# is mapped to in new_creg is 1.
new_cond_val += 2 ** (new_creg[:].index(wire_map[bit]))
if new_creg is None:
raise DAGCircuitError("Condition registers not found in wire_map.")
new_condition = (new_creg, new_cond_val)
return new_condition
def compose(self, other, qubits=None, clbits=None, front=False, inplace=True):
"""Compose the ``other`` circuit onto the output of this circuit.
A subset of input wires of ``other`` are mapped
to a subset of output wires of this circuit.
``other`` can be narrower or of equal width to ``self``.
Args:
other (DAGCircuit): circuit to compose with self
qubits (list[Qubit|int]): qubits of self to compose onto.
clbits (list[Clbit|int]): clbits of self to compose onto.
front (bool): If True, front composition will be performed (not implemented yet)
inplace (bool): If True, modify the object. Otherwise return composed circuit.
Returns:
DAGCircuit: the composed dag (returns None if inplace==True).
Raises:
DAGCircuitError: if ``other`` is wider or there are duplicate edge mappings.
"""
if front:
raise DAGCircuitError("Front composition not supported yet.")
if len(other.qubits) > len(self.qubits) or len(other.clbits) > len(self.clbits):
raise DAGCircuitError(
"Trying to compose with another DAGCircuit which has more 'in' edges."
)
# number of qubits and clbits must match number in circuit or None
identity_qubit_map = dict(zip(other.qubits, self.qubits))
identity_clbit_map = dict(zip(other.clbits, self.clbits))
if qubits is None:
qubit_map = identity_qubit_map
elif len(qubits) != len(other.qubits):
raise DAGCircuitError(
"Number of items in qubits parameter does not"
" match number of qubits in the circuit."
)
else:
qubit_map = {
other.qubits[i]: (self.qubits[q] if isinstance(q, int) else q)
for i, q in enumerate(qubits)
}
if clbits is None:
clbit_map = identity_clbit_map
elif len(clbits) != len(other.clbits):
raise DAGCircuitError(
"Number of items in clbits parameter does not"
" match number of clbits in the circuit."
)
else:
clbit_map = {
other.clbits[i]: (self.clbits[c] if isinstance(c, int) else c)
for i, c in enumerate(clbits)
}
edge_map = {**qubit_map, **clbit_map} or None
# if no edge_map, try to do a 1-1 mapping in order
if edge_map is None:
edge_map = {**identity_qubit_map, **identity_clbit_map}
# Check the edge_map for duplicate values
if len(set(edge_map.values())) != len(edge_map):
raise DAGCircuitError("duplicates in wire_map")
# Compose
if inplace:
dag = self
else:
dag = copy.deepcopy(self)
dag.global_phase += other.global_phase
for gate, cals in other.calibrations.items():
dag._calibrations[gate].update(cals)
for nd in other.topological_nodes():
if isinstance(nd, DAGInNode):
# if in edge_map, get new name, else use existing name
m_wire = edge_map.get(nd.wire, nd.wire)
# the mapped wire should already exist
if m_wire not in dag.output_map:
raise DAGCircuitError(
"wire %s[%d] not in self" % (m_wire.register.name, m_wire.index)
)
if nd.wire not in other._wires:
raise DAGCircuitError(
"inconsistent wire type for %s[%d] in other"
% (nd.register.name, nd.wire.index)
)
elif isinstance(nd, DAGOutNode):
# ignore output nodes
pass
elif isinstance(nd, DAGOpNode):
condition = dag._map_condition(edge_map, nd.op.condition, dag.cregs.values())
dag._check_condition(nd.op.name, condition)
m_qargs = list(map(lambda x: edge_map.get(x, x), nd.qargs))
m_cargs = list(map(lambda x: edge_map.get(x, x), nd.cargs))
op = nd.op.copy()
op.condition = condition
dag.apply_operation_back(op, m_qargs, m_cargs)
else:
raise DAGCircuitError("bad node type %s" % type(nd))
if not inplace:
return dag
else:
return None
def reverse_ops(self):
"""Reverse the operations in the ``self`` circuit.
Returns:
DAGCircuit: the reversed dag.
"""
# TODO: speed up
# pylint: disable=cyclic-import
from qiskit.converters import dag_to_circuit, circuit_to_dag
qc = dag_to_circuit(self)
reversed_qc = qc.reverse_ops()
reversed_dag = circuit_to_dag(reversed_qc)
return reversed_dag
def idle_wires(self, ignore=None):
"""Return idle wires.
Args:
ignore (list(str)): List of node names to ignore. Default: []
Yields:
Bit: Bit in idle wire.
Raises:
DAGCircuitError: If the DAG is invalid
"""
if ignore is None:
ignore = set()
ignore_set = set(ignore)
for wire in self._wires:
if not ignore:
if self._is_wire_idle(wire):
yield wire
else:
for node in self.nodes_on_wire(wire, only_ops=True):
if node.op.name not in ignore_set:
# If we found an op node outside of ignore we can stop iterating over the wire
break
else:
yield wire
def size(self):
"""Return the number of operations."""
return len(self._multi_graph) - 2 * len(self._wires)
def depth(self):
"""Return the circuit depth.
Returns:
int: the circuit depth
Raises:
DAGCircuitError: if not a directed acyclic graph
"""
try:
depth = rx.dag_longest_path_length(self._multi_graph) - 1
except rx.DAGHasCycle as ex:
raise DAGCircuitError("not a DAG") from ex
return depth if depth >= 0 else 0
def width(self):
"""Return the total number of qubits + clbits used by the circuit.
This function formerly returned the number of qubits by the calculation
return len(self._wires) - self.num_clbits()
but was changed by issue #2564 to return number of qubits + clbits
with the new function DAGCircuit.num_qubits replacing the former
semantic of DAGCircuit.width().
"""
return len(self._wires)
def num_qubits(self):
"""Return the total number of qubits used by the circuit.
num_qubits() replaces former use of width().
DAGCircuit.width() now returns qubits + clbits for
consistency with Circuit.width() [qiskit-terra #2564].
"""
return len(self.qubits)
def num_clbits(self):
"""Return the total number of classical bits used by the circuit."""
return len(self.clbits)
def num_tensor_factors(self):
"""Compute how many components the circuit can decompose into."""
return rx.number_weakly_connected_components(self._multi_graph)
def _check_wires_list(self, wires, node):
"""Check that a list of wires is compatible with a node to be replaced.
- no duplicate names
- correct length for operation
Raise an exception otherwise.
Args:
wires (list[Bit]): gives an order for (qu)bits
in the input circuit that is replacing the node.
node (DAGOpNode): a node in the dag
Raises:
DAGCircuitError: if check doesn't pass.
"""
if len(set(wires)) != len(wires):
raise DAGCircuitError("duplicate wires")
wire_tot = len(node.qargs) + len(node.cargs)
if node.op.condition is not None:
wire_tot += node.op.condition[0].size
if len(wires) != wire_tot:
raise DAGCircuitError("expected %d wires, got %d" % (wire_tot, len(wires)))
def __eq__(self, other):
# Try to convert to float, but in case of unbound ParameterExpressions
# a TypeError will be raise, fallback to normal equality in those
# cases
try:
self_phase = float(self.global_phase)
other_phase = float(other.global_phase)
if (
abs((self_phase - other_phase + np.pi) % (2 * np.pi) - np.pi) > 1.0e-10
): # TODO: atol?
return False
except TypeError:
if self.global_phase != other.global_phase:
return False
if self.calibrations != other.calibrations:
return False
self_bit_indices = {bit: idx for idx, bit in enumerate(self.qubits + self.clbits)}
other_bit_indices = {bit: idx for idx, bit in enumerate(other.qubits + other.clbits)}
self_qreg_indices = [
(regname, [self_bit_indices[bit] for bit in reg]) for regname, reg in self.qregs.items()
]
self_creg_indices = [
(regname, [self_bit_indices[bit] for bit in reg]) for regname, reg in self.cregs.items()
]
other_qreg_indices = [
(regname, [other_bit_indices[bit] for bit in reg])
for regname, reg in other.qregs.items()
]
other_creg_indices = [
(regname, [other_bit_indices[bit] for bit in reg])
for regname, reg in other.cregs.items()
]
if self_qreg_indices != other_qreg_indices or self_creg_indices != other_creg_indices:
return False
def node_eq(node_self, node_other):
return DAGNode.semantic_eq(node_self, node_other, self_bit_indices, other_bit_indices)
return rx.is_isomorphic_node_match(self._multi_graph, other._multi_graph, node_eq)
def topological_nodes(self, key=None):
"""
Yield nodes in topological order.
Args:
key (Callable): A callable which will take a DAGNode object and
return a string sort key. If not specified the
:attr:`~qiskit.dagcircuit.DAGNode.sort_key` attribute will be
used as the sort key for each node.
Returns:
generator(DAGOpNode, DAGInNode, or DAGOutNode): node in topological order
"""
def _key(x):
return x.sort_key
if key is None:
key = _key
return iter(rx.lexicographical_topological_sort(self._multi_graph, key=key))
def topological_op_nodes(self, key=None):
"""
Yield op nodes in topological order.
Allowed to pass in specific key to break ties in top order
Args:
key (Callable): A callable which will take a DAGNode object and
return a string sort key. If not specified the
:attr:`~qiskit.dagcircuit.DAGNode.sort_key` attribute will be
used as the sort key for each node.
Returns:
generator(DAGOpNode): op node in topological order
"""
return (nd for nd in self.topological_nodes(key) if isinstance(nd, DAGOpNode))
def replace_block_with_op(self, node_block, op, wire_pos_map, cycle_check=True):
"""Replace a block of nodes with a single.
This is used to consolidate a block of DAGOpNodes into a single
operation. A typical example is a block of gates being consolidated
into a single ``UnitaryGate`` representing the unitary matrix of the
block.
Args:
node_block (List[DAGNode]): A list of dag nodes that represents the
node block to be replaced
op (qiskit.circuit.Instruction): The instruction to replace the
block with
wire_pos_map (Dict[Qubit, int]): The dictionary mapping the qarg to
the position. This is necessary to reconstruct the qarg order
over multiple gates in the combined singe op node.
cycle_check (bool): When set to True this method will check that
replacing the provided ``node_block`` with a single node
would introduce a a cycle (which would invalidate the
``DAGCircuit``) and will raise a ``DAGCircuitError`` if a cycle
would be introduced. This checking comes with a run time
penalty, if you can guarantee that your input ``node_block`` is
a contiguous block and won't introduce a cycle when it's
contracted to a single node, this can be set to ``False`` to
improve the runtime performance of this method.
Raises:
DAGCircuitError: if ``cycle_check`` is set to ``True`` and replacing
the specified block introduces a cycle or if ``node_block`` is
empty.
"""
# TODO: Replace this with a function in retworkx to do this operation in
# the graph
block_preds = defaultdict(set)
block_succs = defaultdict(set)
block_qargs = set()
block_cargs = set()
block_ids = {x._node_id for x in node_block}
# If node block is empty return early
if not node_block:
raise DAGCircuitError("Can't replace an empty node_block")
for nd in node_block:
for parent_id, _, edge in self._multi_graph.in_edges(nd._node_id):
if parent_id not in block_ids:
block_preds[parent_id].add(edge)
for _, child_id, edge in self._multi_graph.out_edges(nd._node_id):
if child_id not in block_ids:
block_succs[child_id].add(edge)
block_qargs |= set(nd.qargs)
if isinstance(nd, DAGOpNode) and nd.op.condition:
block_cargs |= set(nd.cargs)
if cycle_check:
# If we're cycle checking copy the graph to ensure we don't create
# invalid DAG when we encounter a cycle
backup_graph = self._multi_graph.copy()
# Add node and wire it into graph
new_index = self._add_op_node(
op,
sorted(block_qargs, key=lambda x: wire_pos_map[x]),
sorted(block_cargs, key=lambda x: wire_pos_map[x]),
)
for node_id, edges in block_preds.items():
for edge in edges:
self._multi_graph.add_edge(node_id, new_index, edge)
for node_id, edges in block_succs.items():
for edge in edges:
self._multi_graph.add_edge(new_index, node_id, edge)
for nd in node_block:
self._multi_graph.remove_node(nd._node_id)
# If enabled ensure block won't introduce a cycle when node_block is
# contracted
if cycle_check:
# If a cycle was introduced remove new op node and raise error
if not rx.is_directed_acyclic_graph(self._multi_graph):
# If a cycle was encountered restore the graph to the
# original valid state
self._multi_graph = backup_graph
self._decrement_op(op)
raise DAGCircuitError("Replacing the specified node block would introduce a cycle")
for nd in node_block:
self._decrement_op(nd.op)
def substitute_node_with_dag(self, node, input_dag, wires=None):
"""Replace one node with dag.
Args:
node (DAGOpNode): node to substitute
input_dag (DAGCircuit): circuit that will substitute the node
wires (list[Bit]): gives an order for (qu)bits
in the input circuit. This order gets matched to the node wires
by qargs first, then cargs, then conditions.
Returns:
dict: maps node IDs from `input_dag` to their new node incarnations in `self`.
Raises:
DAGCircuitError: if met with unexpected predecessor/successors
"""
in_dag = input_dag
# the dag must be amended if used in a
# conditional context. delete the op nodes and replay
# them with the condition.
if node.op.condition:
in_dag = copy.deepcopy(input_dag)
in_dag.add_creg(node.op.condition[0])
to_replay = []
for sorted_node in in_dag.topological_nodes():
if isinstance(sorted_node, DAGOpNode):
sorted_node.op.condition = node.op.condition
to_replay.append(sorted_node)
for input_node in in_dag.op_nodes():
in_dag.remove_op_node(input_node)
for replay_node in to_replay:
in_dag.apply_operation_back(replay_node.op, replay_node.qargs, replay_node.cargs)
if in_dag.global_phase:
self.global_phase += in_dag.global_phase
if wires is None:
wires = in_dag.wires
wire_set = set(wires)
self._check_wires_list(wires, node)
# Create a proxy wire_map to identify fragments and duplicates
# and determine what registers need to be added to self
add_qregs = self._check_edgemap_registers(wires, in_dag.qregs.values())
for qreg in add_qregs:
self.add_qreg(qreg)
add_cregs = self._check_edgemap_registers(wires, in_dag.cregs.values())
for creg in add_cregs:
self.add_creg(creg)
# Replace the node by iterating through the input_circuit.
# Constructing and checking the validity of the wire_map.
# If a gate is conditioned, we expect the replacement subcircuit
# to depend on those condition bits as well.
if not isinstance(node, DAGOpNode):
raise DAGCircuitError("expected node DAGOpNode, got %s" % type(node))
condition_bit_list = self._bits_in_condition(node.op.condition)
new_wires = list(node.qargs) + list(node.cargs) + list(condition_bit_list)
wire_map = {}
reverse_wire_map = {}
for wire, new_wire in zip(wires, new_wires):
wire_map[wire] = new_wire
reverse_wire_map[new_wire] = wire
self._check_wiremap_validity(wire_map, wires, self.input_map)
if condition_bit_list:
# If we are replacing a conditional node, map input dag through
# wire_map to verify that it will not modify any of the conditioning
# bits.
condition_bits = set(condition_bit_list)
for op_node in in_dag.op_nodes():
mapped_cargs = {wire_map[carg] for carg in op_node.cargs}
if condition_bits & mapped_cargs:
raise DAGCircuitError(
"Mapped DAG would alter clbits on which it would be conditioned."
)
# Add wire from pred to succ if no ops on mapped wire on ``in_dag``
# retworkx's substitute_node_with_subgraph lacks the DAGCircuit
# context to know what to do in this case (the method won't even see
# these nodes because they're filtered) so we manually retain the
# edges prior to calling substitute_node_with_subgraph and set the
# edge_map_fn callback kwarg to skip these edges when they're
# encountered.
for wire in wires:
input_node = in_dag.input_map[wire]
output_node = in_dag.output_map[wire]
if in_dag._multi_graph.has_edge(input_node._node_id, output_node._node_id):
self_wire = wire_map[wire]
pred = self._multi_graph.find_predecessors_by_edge(
node._node_id, lambda edge, wire=self_wire: edge == wire
)[0]
succ = self._multi_graph.find_successors_by_edge(
node._node_id, lambda edge, wire=self_wire: edge == wire
)[0]
self._multi_graph.add_edge(pred._node_id, succ._node_id, self_wire)
# Exlude any nodes from in_dag that are not a DAGOpNode or are on
# bits outside the set specified by the wires kwarg
def filter_fn(node):
if not isinstance(node, DAGOpNode):
return False
for qarg in node.qargs:
if qarg not in wire_set:
return False
return True
# Map edges into and out of node to the appropriate node from in_dag
def edge_map_fn(source, _target, self_wire):
wire = reverse_wire_map[self_wire]
# successor edge
if source == node._node_id:
wire_output_id = in_dag.output_map[wire]._node_id
out_index = in_dag._multi_graph.predecessor_indices(wire_output_id)[0]
# Edge directly from from input nodes to output nodes in in_dag are
# already handled prior to calling retworkx. Don't map these edges
# in retworkx.
if not isinstance(in_dag._multi_graph[out_index], DAGOpNode):
return None
# predecessor edge
else:
wire_input_id = in_dag.input_map[wire]._node_id
out_index = in_dag._multi_graph.successor_indices(wire_input_id)[0]
# Edge directly from from input nodes to output nodes in in_dag are
# already handled prior to calling retworkx. Don't map these edges
# in retworkx.
if not isinstance(in_dag._multi_graph[out_index], DAGOpNode):
return None
return out_index
# Adjust edge weights from in_dag
def edge_weight_map(wire):
return wire_map[wire]
node_map = self._multi_graph.substitute_node_with_subgraph(
node._node_id, in_dag._multi_graph, edge_map_fn, filter_fn, edge_weight_map
)
self._decrement_op(node.op)
# Iterate over nodes of input_circuit and update wires in node objects migrated
# from in_dag
for old_node_index, new_node_index in node_map.items():
# update node attributes
old_node = in_dag._multi_graph[old_node_index]
condition = self._map_condition(wire_map, old_node.op.condition, self.cregs.values())
m_qargs = [wire_map.get(x, x) for x in old_node.qargs]
m_cargs = [wire_map.get(x, x) for x in old_node.cargs]
new_node = DAGOpNode(old_node.op, qargs=m_qargs, cargs=m_cargs)
new_node._node_id = new_node_index
new_node.op.condition = condition
self._multi_graph[new_node_index] = new_node
self._increment_op(new_node.op)
return {k: self._multi_graph[v] for k, v in node_map.items()}
def substitute_node(self, node, op, inplace=False):
"""Replace an DAGOpNode with a single instruction. qargs, cargs and
conditions for the new instruction will be inferred from the node to be
replaced. The new instruction will be checked to match the shape of the
replaced instruction.
Args:
node (DAGOpNode): Node to be replaced
op (qiskit.circuit.Instruction): The :class:`qiskit.circuit.Instruction`
instance to be added to the DAG
inplace (bool): Optional, default False. If True, existing DAG node
will be modified to include op. Otherwise, a new DAG node will
be used.
Returns:
DAGOpNode: the new node containing the added instruction.
Raises:
DAGCircuitError: If replacement instruction was incompatible with
location of target node.
"""
if not isinstance(node, DAGOpNode):
raise DAGCircuitError("Only DAGOpNodes can be replaced.")
if node.op.num_qubits != op.num_qubits or node.op.num_clbits != op.num_clbits:
raise DAGCircuitError(
"Cannot replace node of width ({} qubits, {} clbits) with "
"instruction of mismatched width ({} qubits, {} clbits).".format(
node.op.num_qubits, node.op.num_clbits, op.num_qubits, op.num_clbits
)
)
if inplace:
if op.name != node.op.name:
self._increment_op(op)
self._decrement_op(node.op)
save_condition = node.op.condition
node.op = op
node.op.condition = save_condition
return node
new_node = copy.copy(node)
save_condition = new_node.op.condition
new_node.op = op
new_node.op.condition = save_condition
self._multi_graph[node._node_id] = new_node
if op.name != node.op.name:
self._increment_op(op)
self._decrement_op(node.op)
return new_node
def node(self, node_id):
"""Get the node in the dag.
Args:
node_id(int): Node identifier.
Returns:
node: the node.
"""
return self._multi_graph[node_id]
def nodes(self):
"""Iterator for node values.
Yield:
node: the node.
"""
yield from self._multi_graph.nodes()
def edges(self, nodes=None):
"""Iterator for edge values and source and dest node
This works by returning the output edges from the specified nodes. If
no nodes are specified all edges from the graph are returned.
Args:
nodes(DAGOpNode, DAGInNode, or DAGOutNode|list(DAGOpNode, DAGInNode, or DAGOutNode):
Either a list of nodes or a single input node. If none is specified,
all edges are returned from the graph.
Yield:
edge: the edge in the same format as out_edges the tuple
(source node, destination node, edge data)
"""
if nodes is None:
nodes = self._multi_graph.nodes()
elif isinstance(nodes, (DAGOpNode, DAGInNode, DAGOutNode)):
nodes = [nodes]
for node in nodes:
raw_nodes = self._multi_graph.out_edges(node._node_id)
for source, dest, edge in raw_nodes:
yield (self._multi_graph[source], self._multi_graph[dest], edge)
def op_nodes(self, op=None, include_directives=True):
"""Get the list of "op" nodes in the dag.
Args:
op (Type): :class:`qiskit.circuit.Instruction` subclass op nodes to
return. If None, return all op nodes.
include_directives (bool): include `barrier`, `snapshot` etc.
Returns:
list[DAGOpNode]: the list of node ids containing the given op.
"""
nodes = []
for node in self._multi_graph.nodes():
if isinstance(node, DAGOpNode):
if not include_directives and node.op._directive:
continue
if op is None or isinstance(node.op, op):
nodes.append(node)
return nodes
def gate_nodes(self):
"""Get the list of gate nodes in the dag.
Returns:
list[DAGOpNode]: the list of DAGOpNodes that represent gates.
"""
nodes = []
for node in self.op_nodes():
if isinstance(node.op, Gate):
nodes.append(node)
return nodes
def named_nodes(self, *names):
"""Get the set of "op" nodes with the given name."""
named_nodes = []
for node in self._multi_graph.nodes():
if isinstance(node, DAGOpNode) and node.op.name in names:
named_nodes.append(node)
return named_nodes
def two_qubit_ops(self):
"""Get list of 2 qubit operations. Ignore directives like snapshot and barrier."""
ops = []
for node in self.op_nodes(include_directives=False):
if len(node.qargs) == 2:
ops.append(node)
return ops
def multi_qubit_ops(self):
"""Get list of 3+ qubit operations. Ignore directives like snapshot and barrier."""
ops = []
for node in self.op_nodes(include_directives=False):
if len(node.qargs) >= 3:
ops.append(node)
return ops
def longest_path(self):
"""Returns the longest path in the dag as a list of DAGOpNodes, DAGInNodes, and DAGOutNodes."""
return [self._multi_graph[x] for x in rx.dag_longest_path(self._multi_graph)]
def successors(self, node):
"""Returns iterator of the successors of a node as DAGOpNodes and DAGOutNodes."""
return iter(self._multi_graph.successors(node._node_id))
def predecessors(self, node):
"""Returns iterator of the predecessors of a node as DAGOpNodes and DAGInNodes."""
return iter(self._multi_graph.predecessors(node._node_id))
def is_successor(self, node, node_succ):
"""Checks if a second node is in the successors of node."""
return self._multi_graph.has_edge(node._node_id, node_succ._node_id)
def is_predecessor(self, node, node_pred):
"""Checks if a second node is in the predecessors of node."""
return self._multi_graph.has_edge(node_pred._node_id, node._node_id)
def quantum_predecessors(self, node):
"""Returns iterator of the predecessors of a node that are
connected by a quantum edge as DAGOpNodes and DAGInNodes."""
return iter(
self._multi_graph.find_predecessors_by_edge(
node._node_id, lambda edge_data: isinstance(edge_data, Qubit)
)
)
def ancestors(self, node):
"""Returns set of the ancestors of a node as DAGOpNodes and DAGInNodes."""
return {self._multi_graph[x] for x in rx.ancestors(self._multi_graph, node._node_id)}
def descendants(self, node):
"""Returns set of the descendants of a node as DAGOpNodes and DAGOutNodes."""
return {self._multi_graph[x] for x in rx.descendants(self._multi_graph, node._node_id)}
def bfs_successors(self, node):
"""
Returns an iterator of tuples of (DAGNode, [DAGNodes]) where the DAGNode is the current node
and [DAGNode] is its successors in BFS order.
"""
return iter(rx.bfs_successors(self._multi_graph, node._node_id))
def quantum_successors(self, node):
"""Returns iterator of the successors of a node that are
connected by a quantum edge as Opnodes and DAGOutNodes."""
return iter(
self._multi_graph.find_successors_by_edge(
node._node_id, lambda edge_data: isinstance(edge_data, Qubit)
)
)
def remove_op_node(self, node):
"""Remove an operation node n.
Add edges from predecessors to successors.
"""
if not isinstance(node, DAGOpNode):
raise DAGCircuitError(
'The method remove_op_node only works on DAGOpNodes. A "%s" '
"node type was wrongly provided." % type(node)
)
self._multi_graph.remove_node_retain_edges(
node._node_id, use_outgoing=False, condition=lambda edge1, edge2: edge1 == edge2
)
self._decrement_op(node.op)
def remove_ancestors_of(self, node):
"""Remove all of the ancestor operation nodes of node."""
anc = rx.ancestors(self._multi_graph, node)
# TODO: probably better to do all at once using
# multi_graph.remove_nodes_from; same for related functions ...
for anc_node in anc:
if isinstance(anc_node, DAGOpNode):
self.remove_op_node(anc_node)
def remove_descendants_of(self, node):
"""Remove all of the descendant operation nodes of node."""
desc = rx.descendants(self._multi_graph, node)
for desc_node in desc:
if isinstance(desc_node, DAGOpNode):
self.remove_op_node(desc_node)
def remove_nonancestors_of(self, node):
"""Remove all of the non-ancestors operation nodes of node."""
anc = rx.ancestors(self._multi_graph, node)
comp = list(set(self._multi_graph.nodes()) - set(anc))
for n in comp:
if isinstance(n, DAGOpNode):
self.remove_op_node(n)
def remove_nondescendants_of(self, node):
"""Remove all of the non-descendants operation nodes of node."""
dec = rx.descendants(self._multi_graph, node)
comp = list(set(self._multi_graph.nodes()) - set(dec))
for n in comp:
if isinstance(n, DAGOpNode):
self.remove_op_node(n)
def front_layer(self):
"""Return a list of op nodes in the first layer of this dag."""
graph_layers = self.multigraph_layers()
try:
next(graph_layers) # Remove input nodes
except StopIteration:
return []
op_nodes = [node for node in next(graph_layers) if isinstance(node, DAGOpNode)]
return op_nodes
def layers(self):
"""Yield a shallow view on a layer of this DAGCircuit for all d layers of this circuit.
A layer is a circuit whose gates act on disjoint qubits, i.e.,
a layer has depth 1. The total number of layers equals the
circuit depth d. The layers are indexed from 0 to d-1 with the
earliest layer at index 0. The layers are constructed using a
greedy algorithm. Each returned layer is a dict containing
{"graph": circuit graph, "partition": list of qubit lists}.
The returned layer contains new (but semantically equivalent) DAGOpNodes, DAGInNodes,
and DAGOutNodes. These are not the same as nodes of the original dag, but are equivalent
via DAGNode.semantic_eq(node1, node2).
TODO: Gates that use the same cbits will end up in different
layers as this is currently implemented. This may not be
the desired behavior.
"""
graph_layers = self.multigraph_layers()
try:
next(graph_layers) # Remove input nodes
except StopIteration:
return
for graph_layer in graph_layers:
# Get the op nodes from the layer, removing any input and output nodes.
op_nodes = [node for node in graph_layer if isinstance(node, DAGOpNode)]
# Sort to make sure they are in the order they were added to the original DAG
# It has to be done by node_id as graph_layer is just a list of nodes
# with no implied topology
# Drawing tools rely on _node_id to infer order of node creation
# so we need this to be preserved by layers()
op_nodes.sort(key=lambda nd: nd._node_id)
# Stop yielding once there are no more op_nodes in a layer.
if not op_nodes:
return
# Construct a shallow copy of self
new_layer = self._copy_circuit_metadata()
for node in op_nodes:
# this creates new DAGOpNodes in the new_layer
new_layer.apply_operation_back(node.op, node.qargs, node.cargs)
# The quantum registers that have an operation in this layer.
support_list = [
op_node.qargs for op_node in new_layer.op_nodes() if not op_node.op._directive
]
yield {"graph": new_layer, "partition": support_list}
def serial_layers(self):
"""Yield a layer for all gates of this circuit.
A serial layer is a circuit with one gate. The layers have the
same structure as in layers().
"""
for next_node in self.topological_op_nodes():
new_layer = self._copy_circuit_metadata()
# Save the support of the operation we add to the layer
support_list = []
# Operation data
op = copy.copy(next_node.op)
qargs = copy.copy(next_node.qargs)
cargs = copy.copy(next_node.cargs)
condition = copy.copy(next_node.op.condition)
_ = self._bits_in_condition(condition)
# Add node to new_layer
new_layer.apply_operation_back(op, qargs, cargs)
# Add operation to partition
if not next_node.op._directive:
support_list.append(list(qargs))
l_dict = {"graph": new_layer, "partition": support_list}
yield l_dict
def multigraph_layers(self):
"""Yield layers of the multigraph."""
first_layer = [x._node_id for x in self.input_map.values()]
return iter(rx.layers(self._multi_graph, first_layer))
def collect_runs(self, namelist):
"""Return a set of non-conditional runs of "op" nodes with the given names.
For example, "... h q[0]; cx q[0],q[1]; cx q[0],q[1]; h q[1]; .."
would produce the tuple of cx nodes as an element of the set returned
from a call to collect_runs(["cx"]). If instead the cx nodes were
"cx q[0],q[1]; cx q[1],q[0];", the method would still return the
pair in a tuple. The namelist can contain names that are not
in the circuit's basis.
Nodes must have only one successor to continue the run.
"""
def filter_fn(node):
return (
isinstance(node, DAGOpNode)
and node.op.name in namelist
and node.op.condition is None
)
group_list = rx.collect_runs(self._multi_graph, filter_fn)
return {tuple(x) for x in group_list}
def collect_1q_runs(self):
"""Return a set of non-conditional runs of 1q "op" nodes."""
def filter_fn(node):
return (
isinstance(node, DAGOpNode)
and len(node.qargs) == 1
and len(node.cargs) == 0
and node.op.condition is None
and not node.op.is_parameterized()
and isinstance(node.op, Gate)
and hasattr(node.op, "__array__")
)
return rx.collect_runs(self._multi_graph, filter_fn)
def collect_2q_runs(self):
"""Return a set of non-conditional runs of 2q "op" nodes."""
to_qid = {}
for i, qubit in enumerate(self.qubits):
to_qid[qubit] = i
def filter_fn(node):
if isinstance(node, DAGOpNode):
return (
isinstance(node.op, Gate)
and len(node.qargs) <= 2
and not node.op.condition
and not node.op.is_parameterized()
)
else:
return None
def color_fn(edge):
if isinstance(edge, Qubit):
return to_qid[edge]
else:
return None
return rx.collect_bicolor_runs(self._multi_graph, filter_fn, color_fn)
def nodes_on_wire(self, wire, only_ops=False):
"""
Iterator for nodes that affect a given wire.
Args:
wire (Bit): the wire to be looked at.
only_ops (bool): True if only the ops nodes are wanted;
otherwise, all nodes are returned.
Yield:
Iterator: the successive nodes on the given wire
Raises:
DAGCircuitError: if the given wire doesn't exist in the DAG
"""
current_node = self.input_map.get(wire, None)
if not current_node:
raise DAGCircuitError("The given wire %s is not present in the circuit" % str(wire))
more_nodes = True
while more_nodes:
more_nodes = False
# allow user to just get ops on the wire - not the input/output nodes
if isinstance(current_node, DAGOpNode) or not only_ops:
yield current_node
try:
current_node = self._multi_graph.find_adjacent_node_by_edge(
current_node._node_id, lambda x: wire == x
)
more_nodes = True
except rx.NoSuitableNeighbors:
pass
def count_ops(self):
"""Count the occurrences of operation names.
Returns a dictionary of counts keyed on the operation name.
"""
return self._op_names.copy()
def count_ops_longest_path(self):
"""Count the occurrences of operation names on the longest path.
Returns a dictionary of counts keyed on the operation name.
"""
op_dict = {}
path = self.longest_path()
path = path[1:-1] # remove qubits at beginning and end of path
for node in path:
name = node.op.name
if name not in op_dict:
op_dict[name] = 1
else:
op_dict[name] += 1
return op_dict
def properties(self):
"""Return a dictionary of circuit properties."""
summary = {
"size": self.size(),
"depth": self.depth(),
"width": self.width(),
"qubits": self.num_qubits(),
"bits": self.num_clbits(),
"factors": self.num_tensor_factors(),
"operations": self.count_ops(),
}
return summary
def draw(self, scale=0.7, filename=None, style="color"):
"""
Draws the dag circuit.
This function needs `pydot <https://github.com/erocarrera/pydot>`_, which in turn needs
`Graphviz <https://www.graphviz.org/>`_ to be installed.
Args:
scale (float): scaling factor
filename (str): file path to save image to (format inferred from name)
style (str):
'plain': B&W graph;
'color' (default): color input/output/op nodes
Returns:
Ipython.display.Image: if in Jupyter notebook and not saving to file,
otherwise None.
"""
from qiskit.visualization.dag_visualization import dag_drawer
return dag_drawer(dag=self, scale=scale, filename=filename, style=style)
| 39.225169
| 103
| 0.603148
|
b89481cb119288cb905b90d466236208f60eb5c4
| 1,421
|
py
|
Python
|
pipoh/concrete_factories/bayesian_folder/bayesian_strategies/EW.py
|
faprieto96/pyInvestment
|
a5c1bdb7823df0df215c3ac55dc8427fd18b2e15
|
[
"MIT"
] | null | null | null |
pipoh/concrete_factories/bayesian_folder/bayesian_strategies/EW.py
|
faprieto96/pyInvestment
|
a5c1bdb7823df0df215c3ac55dc8427fd18b2e15
|
[
"MIT"
] | 1
|
2022-02-19T20:06:17.000Z
|
2022-02-19T20:06:25.000Z
|
pipoh/concrete_factories/bayesian_folder/bayesian_strategies/EW.py
|
faprieto96/pyInvestment
|
a5c1bdb7823df0df215c3ac55dc8427fd18b2e15
|
[
"MIT"
] | null | null | null |
from numpy import array, dot
from qpsolvers import solve_qp
import bayes_opt
from bayes_opt import BayesianOptimization
#from Strategy import errorLoss, rollingWindowsValidation
#Funcion para Trasposición conjugada compleja en Python
from numpy import ndarray
class myarray(ndarray):
@property
def H(self):
return self.conj().T
from Strategy import getWeights, rollingWindowsValidation
import numpy as np
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets import make_gaussian_quantiles
class EW:
def __init__(self, data):
self.data = []
pass
#The Equally Weighted Minimum Variance approach
# This class derives from the Strategy Class and implements the
# optimization problem associated to the Markowitz's theory with
# explicit diversification in the cost function
#Description: Relative importance of the variance.
class obj:
name = 'Equally Weighted Strategy'
pass
# Description: This function runs the corresponding strategy, fitting the model weights.
def solveOptimizationProblem(obj, data, vars):
# Type: It returns the optimized weights
# Compute numbers of data points and assets
(numElements, N) = data.shape
# mean and covariance
W = np.ones((N,1))*(1/N)
return W
def config(obj,data,vars, varsCV):
return obj
| 25.375
| 92
| 0.711471
|
a287956988381b496c7b4b58d316c8bda96772ee
| 2,405
|
py
|
Python
|
qiskit_ibm_runtime/runtime_options.py
|
rathishcholarajan/qiskit-ibm-runtime
|
315a088a844dc8aa4452bde6136b53694dfb3220
|
[
"Apache-2.0"
] | 20
|
2021-11-24T07:38:45.000Z
|
2022-03-27T06:54:30.000Z
|
qiskit_ibm_runtime/runtime_options.py
|
rathishcholarajan/qiskit-ibm-runtime
|
315a088a844dc8aa4452bde6136b53694dfb3220
|
[
"Apache-2.0"
] | 188
|
2021-11-18T18:59:38.000Z
|
2022-03-31T23:35:29.000Z
|
qiskit_ibm_runtime/runtime_options.py
|
rathishcholarajan/qiskit-ibm-runtime
|
315a088a844dc8aa4452bde6136b53694dfb3220
|
[
"Apache-2.0"
] | 20
|
2021-11-18T21:28:59.000Z
|
2022-03-24T13:46:06.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Runtime options that control the execution environment."""
import re
import logging
from dataclasses import dataclass
from typing import Optional
from .exceptions import IBMInputValueError
@dataclass
class RuntimeOptions:
"""Class for representing runtime execution options.
Args:
backend_name: target backend to run on. This is required for ``ibm_quantum`` runtime.
image: the runtime image used to execute the program, specified in
the form of ``image_name:tag``. Not all accounts are
authorized to select a different image.
log_level: logging level to set in the execution environment. The valid
log levels are: ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, and ``CRITICAL``.
The default level is ``WARNING``.
"""
backend_name: Optional[str] = None
image: Optional[str] = None
log_level: Optional[str] = None
def validate(self, channel: str) -> None:
"""Validate options.
Args:
channel: channel type.
Raises:
IBMInputValueError: If one or more option is invalid.
"""
if self.image and not re.match(
"[a-zA-Z0-9]+([/.\\-_][a-zA-Z0-9]+)*:[a-zA-Z0-9]+([.\\-_][a-zA-Z0-9]+)*$",
self.image,
):
raise IBMInputValueError('"image" needs to be in form of image_name:tag')
if channel == "ibm_quantum" and not self.backend_name:
raise IBMInputValueError(
'"backend_name" is required field in "options" for ``ibm_quantum`` runtime.'
)
if self.log_level and not isinstance(
logging.getLevelName(self.log_level.upper()), int
):
raise IBMInputValueError(
f"{self.log_level} is not a valid log level. The valid log levels are: `DEBUG`, "
f"`INFO`, `WARNING`, `ERROR`, and `CRITICAL`."
)
| 35.367647
| 97
| 0.63368
|
e21a5c78b56de3fbd70750eea0156339c01e2e2f
| 2,524
|
py
|
Python
|
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py
|
banhr/neutron
|
4b3e73648327ce9f4d3437986a8663372f577f1b
|
[
"Apache-2.0"
] | 4
|
2018-08-05T00:43:03.000Z
|
2021-10-13T00:45:45.000Z
|
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | 8
|
2018-06-14T14:50:16.000Z
|
2018-11-13T16:30:42.000Z
|
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | 7
|
2018-06-12T18:57:04.000Z
|
2019-05-09T15:42:30.000Z
|
# Copyright (C) 2015 VA Linux Systems Japan K.K.
# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log as logging
from oslo_utils import excutils
import ryu.app.ofctl.api # noqa
from ryu.base import app_manager
from ryu.lib import hub
from ryu.ofproto import ofproto_v1_3
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import br_int
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import br_phys
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import br_tun
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_neutron_agent as ovs_agent
LOG = logging.getLogger(__name__)
def agent_main_wrapper(bridge_classes):
try:
ovs_agent.main(bridge_classes)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Agent main thread died of an exception")
finally:
# The following call terminates Ryu's AppManager.run_apps(),
# which is needed for clean shutdown of an agent process.
# The close() call must be called in another thread, otherwise
# it suicides and ends prematurely.
hub.spawn(app_manager.AppManager.get_instance().close)
class OVSNeutronAgentRyuApp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def start(self):
# Start Ryu event loop thread
super(OVSNeutronAgentRyuApp, self).start()
def _make_br_cls(br_cls):
return functools.partial(br_cls, ryu_app=self)
# Start agent main loop thread
bridge_classes = {
'br_int': _make_br_cls(br_int.OVSIntegrationBridge),
'br_phys': _make_br_cls(br_phys.OVSPhysicalBridge),
'br_tun': _make_br_cls(br_tun.OVSTunnelBridge),
}
return hub.spawn(agent_main_wrapper, bridge_classes, raise_error=True)
| 36.57971
| 78
| 0.723851
|
4239fc575824d838c1f9e2e35b23420292b23ae8
| 11,017
|
py
|
Python
|
pyro/infer/util.py
|
jrmcornish/pyro
|
38914d5eb596dc140e226031534ff4ea7903dc35
|
[
"MIT"
] | 2
|
2020-04-11T04:30:55.000Z
|
2021-07-29T18:45:08.000Z
|
pyro/infer/util.py
|
jrmcornish/pyro
|
38914d5eb596dc140e226031534ff4ea7903dc35
|
[
"MIT"
] | null | null | null |
pyro/infer/util.py
|
jrmcornish/pyro
|
38914d5eb596dc140e226031534ff4ea7903dc35
|
[
"MIT"
] | null | null | null |
import math
import numbers
from collections import Counter, defaultdict
import torch
from opt_einsum import shared_intermediates
from opt_einsum.sharing import count_cached_ops
from pyro.distributions.util import is_identically_zero
from pyro.ops import packed
from pyro.ops.einsum.adjoint import require_backward
from pyro.ops.rings import MarginalRing
from pyro.poutine.util import site_is_subsample
_VALIDATION_ENABLED = False
LAST_CACHE_SIZE = [Counter()] # for profiling
def enable_validation(is_validate):
global _VALIDATION_ENABLED
_VALIDATION_ENABLED = is_validate
def is_validation_enabled():
return _VALIDATION_ENABLED
def torch_item(x):
"""
Like ``x.item()`` for a :class:`~torch.Tensor`, but also works with numbers.
"""
return x if isinstance(x, numbers.Number) else x.item()
def torch_backward(x, retain_graph=None):
"""
Like ``x.backward()`` for a :class:`~torch.Tensor`, but also accepts
numbers and tensors without grad_fn (resulting in a no-op)
"""
if torch.is_tensor(x) and x.grad_fn:
x.backward(retain_graph=retain_graph)
def torch_exp(x):
"""
Like ``x.exp()`` for a :class:`~torch.Tensor`, but also accepts
numbers.
"""
if torch.is_tensor(x):
return torch.exp(x)
else:
return math.exp(x)
def detach_iterable(iterable):
if torch.is_tensor(iterable):
return iterable.detach()
else:
return [var.detach() for var in iterable]
def zero_grads(tensors):
"""
Sets gradients of list of Tensors to zero in place
"""
for p in tensors:
if p.grad is not None:
p.grad = torch.zeros_like(p.grad)
def get_plate_stacks(trace):
"""
This builds a dict mapping site name to a set of plate stacks. Each
plate stack is a list of :class:`CondIndepStackFrame`s corresponding to
an :class:`plate`. This information is used by :class:`Trace_ELBO` and
:class:`TraceGraph_ELBO`.
"""
return {name: [f for f in node["cond_indep_stack"] if f.vectorized]
for name, node in trace.nodes.items()
if node["type"] == "sample" and not site_is_subsample(node)}
class MultiFrameTensor(dict):
"""
A container for sums of Tensors among different :class:`plate` contexts.
Used in :class:`~pyro.infer.tracegraph_elbo.TraceGraph_ELBO` to simplify
downstream cost computation logic.
Example::
downstream_cost = MultiFrameTensor()
for site in downstream_nodes:
downstream_cost.add((site["cond_indep_stack"], site["log_prob"]))
downstream_cost.add(*other_costs.items()) # add in bulk
summed = downstream_cost.sum_to(target_site["cond_indep_stack"])
"""
def __init__(self, *items):
super(MultiFrameTensor, self).__init__()
self.add(*items)
def add(self, *items):
"""
Add a collection of (cond_indep_stack, tensor) pairs. Keys are
``cond_indep_stack``s, i.e. tuples of :class:`CondIndepStackFrame`s.
Values are :class:`torch.Tensor`s.
"""
for cond_indep_stack, value in items:
frames = frozenset(f for f in cond_indep_stack if f.vectorized)
assert all(f.dim < 0 and -value.dim() <= f.dim for f in frames)
if frames in self:
self[frames] = self[frames] + value
else:
self[frames] = value
def sum_to(self, target_frames):
total = None
for frames, value in self.items():
for f in frames:
if f not in target_frames and value.shape[f.dim] != 1:
value = value.sum(f.dim, True)
while value.shape and value.shape[0] == 1:
value = value.squeeze(0)
total = value if total is None else total + value
return total
def __repr__(self):
return '%s(%s)' % (type(self).__name__, ",\n\t".join([
'({}, ...)'.format(frames) for frames in self]))
class Dice(object):
"""
An implementation of the DiCE operator compatible with Pyro features.
This implementation correctly handles:
- scaled log-probability due to subsampling
- independence in different ordinals due to plate
- weights due to parallel and sequential enumeration
- weights due to local multiple sampling
This assumes restricted dependency structure on the model and guide:
variables outside of an :class:`~pyro.plate` can never depend on
variables inside that :class:`~pyro.plate`.
References:
[1] Jakob Foerster, Greg Farquhar, Maruan Al-Shedivat, Tim Rocktaeschel,
Eric P. Xing, Shimon Whiteson (2018)
"DiCE: The Infinitely Differentiable Monte-Carlo Estimator"
https://arxiv.org/abs/1802.05098
[2] Laurence Aitchison (2018)
"Tensor Monte Carlo: particle methods for the GPU era"
https://arxiv.org/abs/1806.08593
:param pyro.poutine.trace.Trace guide_trace: A guide trace.
:param ordering: A dictionary mapping model site names to ordinal values.
Ordinal values may be any type that is (1) ``<=`` comparable and (2)
hashable; the canonical ordinal is a ``frozenset`` of site names.
"""
def __init__(self, guide_trace, ordering):
log_denom = defaultdict(float) # avoids double-counting when sequentially enumerating
log_probs = defaultdict(list) # accounts for upstream probabilties
for name, site in guide_trace.nodes.items():
if site["type"] != "sample":
continue
log_prob = site["packed"]["score_parts"].score_function # not scaled by subsampling
dims = getattr(log_prob, "_pyro_dims", "")
ordinal = ordering[name]
if site["infer"].get("enumerate"):
num_samples = site["infer"].get("num_samples")
if num_samples is not None: # site was multiply sampled
if not is_identically_zero(log_prob):
log_prob = log_prob - log_prob.detach()
log_prob = log_prob - math.log(num_samples)
if not isinstance(log_prob, torch.Tensor):
log_prob = torch.tensor(float(log_prob), device=site["value"].device)
log_prob._pyro_dims = dims
# I don't know why the following broadcast is needed, but it makes tests pass:
log_prob, _ = packed.broadcast_all(log_prob, site["packed"]["log_prob"])
elif site["infer"]["enumerate"] == "sequential":
log_denom[ordinal] += math.log(site["infer"]["_enum_total"])
else: # site was monte carlo sampled
if is_identically_zero(log_prob):
continue
log_prob = log_prob - log_prob.detach()
log_prob._pyro_dims = dims
log_probs[ordinal].append(log_prob)
self.log_denom = log_denom
self.log_probs = log_probs
def _get_log_factors(self, target_ordinal):
"""
Returns a list of DiCE factors at a given ordinal.
"""
log_denom = 0
for ordinal, term in self.log_denom.items():
if not ordinal <= target_ordinal: # not downstream
log_denom += term # term = log(# times this ordinal is counted)
log_factors = [] if is_identically_zero(log_denom) else [-log_denom]
for ordinal, terms in self.log_probs.items():
if ordinal <= target_ordinal: # upstream
log_factors.extend(terms) # terms = [log(dice weight of this ordinal)]
return log_factors
def compute_expectation(self, costs):
"""
Returns a differentiable expected cost, summing over costs at given ordinals.
:param dict costs: A dict mapping ordinals to lists of cost tensors
:returns: a scalar expected cost
:rtype: torch.Tensor or float
"""
# Share computation across all cost terms.
with shared_intermediates() as cache:
ring = MarginalRing(cache=cache)
expected_cost = 0.
for ordinal, cost_terms in costs.items():
log_factors = self._get_log_factors(ordinal)
scale = math.exp(sum(x for x in log_factors if not isinstance(x, torch.Tensor)))
log_factors = [x for x in log_factors if isinstance(x, torch.Tensor)]
# Collect log_prob terms to query for marginal probability.
queries = {frozenset(cost._pyro_dims): None for cost in cost_terms}
for log_factor in log_factors:
key = frozenset(log_factor._pyro_dims)
if queries.get(key, False) is None:
queries[key] = log_factor
# Ensure a query exists for each cost term.
for cost in cost_terms:
key = frozenset(cost._pyro_dims)
if queries[key] is None:
query = torch.zeros_like(cost)
query._pyro_dims = cost._pyro_dims
log_factors.append(query)
queries[key] = query
# Perform sum-product contraction. Note that plates never need to be
# product-contracted due to our plate-based dependency ordering.
sum_dims = set().union(*(x._pyro_dims for x in log_factors)) - ordinal
for query in queries.values():
require_backward(query)
root = ring.sumproduct(log_factors, sum_dims)
root._pyro_backward()
probs = {key: query._pyro_backward_result.exp() for key, query in queries.items()}
# Aggregate prob * cost terms.
for cost in cost_terms:
key = frozenset(cost._pyro_dims)
prob = probs[key]
prob._pyro_dims = queries[key]._pyro_dims
mask = prob > 0
if torch._C._get_tracing_state() or not mask.all():
mask._pyro_dims = prob._pyro_dims
cost, prob, mask = packed.broadcast_all(cost, prob, mask)
prob = prob[mask]
cost = cost[mask]
else:
cost, prob = packed.broadcast_all(cost, prob)
expected_cost = expected_cost + scale * torch.tensordot(prob, cost, prob.dim())
LAST_CACHE_SIZE[0] = count_cached_ops(cache)
return expected_cost
def check_fully_reparametrized(guide_site):
log_prob, score_function_term, entropy_term = guide_site["score_parts"]
fully_rep = (guide_site["fn"].has_rsample and not is_identically_zero(entropy_term) and
is_identically_zero(score_function_term))
if not fully_rep:
raise NotImplementedError("All distributions in the guide must be fully reparameterized.")
| 39.916667
| 99
| 0.613869
|
059a89e17dd615424b3e6e68756a004de575873a
| 1,845
|
py
|
Python
|
pipeline/utf8_utils.py
|
riyachanduka/termite
|
62cfb58d29354915db2c3f9c726e2ef2a976d4c0
|
[
"BSD-3-Clause"
] | 73
|
2015-01-31T22:03:20.000Z
|
2022-03-15T11:39:00.000Z
|
pipeline/utf8_utils.py
|
riyachanduka/termite
|
62cfb58d29354915db2c3f9c726e2ef2a976d4c0
|
[
"BSD-3-Clause"
] | 3
|
2016-04-24T20:01:14.000Z
|
2017-05-18T15:55:28.000Z
|
pipeline/utf8_utils.py
|
riyachanduka/termite
|
62cfb58d29354915db2c3f9c726e2ef2a976d4c0
|
[
"BSD-3-Clause"
] | 27
|
2015-01-13T06:31:04.000Z
|
2020-05-14T09:27:00.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modified from 'The Python Standard Library'
13.1. csv — CSV File Reading and Writing
http://docs.python.org/2/library/csv.html
"""
import csv, codecs, cStringIO
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", delimiter="\t", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, delimiter=delimiter, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", delimiter="\t", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, delimiter=delimiter, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8", "ignore")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| 26.73913
| 84
| 0.690515
|
b0c7176c5633a42f5f35b7a81ead126d6a27df61
| 12,537
|
py
|
Python
|
tensorflow_probability/python/distributions/kumaraswamy_test.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 1
|
2020-07-12T22:40:42.000Z
|
2020-07-12T22:40:42.000Z
|
tensorflow_probability/python/distributions/kumaraswamy_test.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 2
|
2019-08-01T18:31:41.000Z
|
2019-08-01T19:42:15.000Z
|
tensorflow_probability/python/distributions/kumaraswamy_test.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 1
|
2020-04-17T18:01:47.000Z
|
2020-04-17T18:01:47.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf.compat.v1.logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
tfd = tfp.distributions
def _kumaraswamy_mode(a, b):
a = np.asarray(a)
b = np.asarray(b)
return ((a - 1) / (a * b - 1))**(1 / a)
def _kumaraswamy_moment(a, b, n):
a = np.asarray(a)
b = np.asarray(b)
return b * special.beta(1.0 + n / a, b)
def _harmonic_number(b):
b = np.asarray(b)
return special.psi(b + 1) - special.psi(1)
def _kumaraswamy_cdf(a, b, x):
a = np.asarray(a)
b = np.asarray(b)
x = np.asarray(x)
return 1 - (1 - x**a)**b
def _kumaraswamy_pdf(a, b, x):
a = np.asarray(a)
b = np.asarray(b)
x = np.asarray(x)
return a * b * x ** (a - 1) * (1 - x ** a) ** (b - 1)
@test_util.run_all_in_graph_and_eager_modes
class KumaraswamyTest(tf.test.TestCase):
def testSimpleShapes(self):
a = np.random.rand(3)
b = np.random.rand(3)
dist = tfd.Kumaraswamy(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertEqual(tf.TensorShape([3]), dist.batch_shape)
def testComplexShapes(self):
a = np.random.rand(3, 2, 2)
b = np.random.rand(3, 2, 2)
dist = tfd.Kumaraswamy(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertEqual(tf.TensorShape([3, 2, 2]), dist.batch_shape)
def testComplexShapesBroadcast(self):
a = np.random.rand(3, 2, 2)
b = np.random.rand(2, 2)
dist = tfd.Kumaraswamy(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertEqual(tf.TensorShape([3, 2, 2]), dist.batch_shape)
def testAProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = tfd.Kumaraswamy(a, b)
self.assertEqual([1, 3], dist.concentration1.shape)
self.assertAllClose(a, self.evaluate(dist.concentration1))
def testBProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = tfd.Kumaraswamy(a, b)
self.assertEqual([1, 3], dist.concentration0.shape)
self.assertAllClose(b, self.evaluate(dist.concentration0))
def testPdfXProper(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = tfd.Kumaraswamy(a, b, validate_args=True)
self.evaluate(dist.prob([.1, .3, .6]))
self.evaluate(dist.prob([.2, .3, .5]))
# Either condition can trigger.
with self.assertRaisesOpError("sample must be non-negative"):
self.evaluate(dist.prob([-1., 0.1, 0.5]))
with self.assertRaisesOpError("sample must be no larger than `1`"):
self.evaluate(dist.prob([.1, .2, 1.2]))
def testPdfTwoBatches(self):
a = [1., 2]
b = [1., 2]
x = [.5, .5]
dist = tfd.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
self.assertEqual((2,), pdf.shape)
def testPdfTwoBatchesNontrivialX(self):
a = [1., 2]
b = [1., 2]
x = [.3, .7]
dist = tfd.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
self.assertEqual((2,), pdf.shape)
def testPdfUniformZeroBatch(self):
# This is equivalent to a uniform distribution
a = 1.
b = 1.
x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)
dist = tfd.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
self.assertEqual((5,), pdf.shape)
def testPdfAStretchedInBroadcastWhenSameRank(self):
a = [[1., 2]]
b = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = tfd.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testPdfAStretchedInBroadcastWhenLowerRank(self):
a = [1., 2]
b = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = tfd.Kumaraswamy(a, b).prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testPdfXStretchedInBroadcastWhenSameRank(self):
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = tfd.Kumaraswamy(a, b).prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testPdfXStretchedInBroadcastWhenLowerRank(self):
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = tfd.Kumaraswamy(a, b).prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testKumaraswamyMean(self):
with tf.compat.v1.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = tfd.Kumaraswamy(a, b)
self.assertEqual(dist.mean().shape, (3,))
if not stats:
return
expected_mean = _kumaraswamy_moment(a, b, 1)
self.assertAllClose(expected_mean, self.evaluate(dist.mean()))
def testKumaraswamyVariance(self):
with tf.compat.v1.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = tfd.Kumaraswamy(a, b)
self.assertEqual(dist.variance().shape, (3,))
if not stats:
return
expected_variance = _kumaraswamy_moment(a, b, 2) - _kumaraswamy_moment(
a, b, 1)**2
self.assertAllClose(expected_variance, self.evaluate(dist.variance()))
def testKumaraswamyMode(self):
with tf.compat.v1.Session():
a = np.array([1.1, 2, 3])
b = np.array([2., 4, 1.2])
expected_mode = _kumaraswamy_mode(a, b)
dist = tfd.Kumaraswamy(a, b)
self.assertEqual(dist.mode().shape, (3,))
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testKumaraswamyModeInvalid(self):
with tf.compat.v1.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = tfd.Kumaraswamy(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Mode undefined for concentration1 <= 1."):
self.evaluate(dist.mode())
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = tfd.Kumaraswamy(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Mode undefined for concentration0 <= 1."):
self.evaluate(dist.mode())
def testKumaraswamyModeEnableAllowNanStats(self):
with tf.compat.v1.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = tfd.Kumaraswamy(a, b, allow_nan_stats=True)
expected_mode = _kumaraswamy_mode(a, b)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().shape)
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = tfd.Kumaraswamy(a, b, allow_nan_stats=True)
expected_mode = _kumaraswamy_mode(a, b)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().shape)
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testKumaraswamyEntropy(self):
with tf.compat.v1.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = tfd.Kumaraswamy(a, b)
self.assertEqual(dist.entropy().shape, (3,))
if not stats:
return
expected_entropy = (1 - 1. / b) + (
1 - 1. / a) * _harmonic_number(b) - np.log(a * b)
self.assertAllClose(expected_entropy, self.evaluate(dist.entropy()))
def testKumaraswamySample(self):
a = 1.
b = 2.
kumaraswamy = tfd.Kumaraswamy(a, b)
n = tf.constant(100000)
samples = kumaraswamy.sample(n)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
self.assertLess(
stats.kstest(
# Kumaraswamy is a univariate distribution.
sample_values,
lambda x: _kumaraswamy_cdf(1., 2., x))[0],
0.01)
# The standard error of the sample mean is 1 / (sqrt(18 * n))
expected_mean = _kumaraswamy_moment(a, b, 1)
self.assertAllClose(sample_values.mean(axis=0), expected_mean, atol=1e-2)
expected_variance = _kumaraswamy_moment(a, b, 2) - _kumaraswamy_moment(
a, b, 1)**2
self.assertAllClose(
np.cov(sample_values, rowvar=0), expected_variance, atol=1e-1)
# Test that sampling with the same seed twice gives the same results.
def testKumaraswamySampleMultipleTimes(self):
a_val = 1.
b_val = 2.
n_val = 100
seed = tfp_test_util.test_seed()
tf.compat.v1.set_random_seed(seed)
kumaraswamy1 = tfd.Kumaraswamy(
concentration1=a_val, concentration0=b_val, name="kumaraswamy1")
samples1 = self.evaluate(kumaraswamy1.sample(n_val, seed=seed))
tf.compat.v1.set_random_seed(seed)
kumaraswamy2 = tfd.Kumaraswamy(
concentration1=a_val, concentration0=b_val, name="kumaraswamy2")
samples2 = self.evaluate(kumaraswamy2.sample(n_val, seed=seed))
self.assertAllClose(samples1, samples2)
def testKumaraswamySampleMultidimensional(self):
a = np.random.rand(3, 2, 2).astype(np.float32)
b = np.random.rand(3, 2, 2).astype(np.float32)
kumaraswamy = tfd.Kumaraswamy(a, b)
n = tf.constant(100000)
samples = kumaraswamy.sample(n)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 3, 2, 2))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
self.assertAllClose(
sample_values[:, 1, :].mean(axis=0),
_kumaraswamy_moment(a, b, 1)[1, :],
atol=1e-1)
def testKumaraswamyCdf(self):
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = self.evaluate(tfd.Kumaraswamy(a, b).cdf(x))
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
if not stats:
return
self.assertAllClose(_kumaraswamy_cdf(a, b, x), actual, rtol=1e-4, atol=0)
def testKumaraswamyLogCdf(self):
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = self.evaluate(tf.exp(tfd.Kumaraswamy(a, b).log_cdf(x)))
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
if not stats:
return
self.assertAllClose(_kumaraswamy_cdf(a, b, x), actual, rtol=1e-4, atol=0)
if __name__ == "__main__":
tf.test.main()
| 34.067935
| 115
| 0.640504
|
270abc42818e6a01798930069bec359aed4785f5
| 1,345
|
py
|
Python
|
tviserrys/urls.py
|
DeWaster/Tviserrys
|
c177387ad145b649fed3365d139bf6274ce89db1
|
[
"MIT"
] | null | null | null |
tviserrys/urls.py
|
DeWaster/Tviserrys
|
c177387ad145b649fed3365d139bf6274ce89db1
|
[
"MIT"
] | null | null | null |
tviserrys/urls.py
|
DeWaster/Tviserrys
|
c177387ad145b649fed3365d139bf6274ce89db1
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import views as auth_views
from django.conf.urls import patterns, include, url
from django.conf.urls import url
from django.contrib import admin
from . import views
from tviserrys.settings import MEDIA_ROOT
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^register/', views.RegisterView.as_view(), name='register'),
url(r'^tviit/', include('tviit.urls', namespace='tviit')),
url(r'^admin/', admin.site.urls),
url(r'^login/$', auth_views.login),
url(r'^logout/$', auth_views.logout),
url(r'^password_change/$', auth_views.password_change),
url(r'^password_change/done/$', auth_views.password_change_done),
url(r'^password_reset/$', auth_views.password_reset),
url(r'^password_reset/done/$', auth_views.password_reset_done),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.password_reset_confirm),
url(r'^reset/done/$', auth_views.password_reset_complete),
url(r'^profile/', include('user_profile.urls', namespace='profile')),
url(r'^search/', include('haystack.urls')),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': MEDIA_ROOT, 'show_indexes': False}),
]
urlpatterns += patterns('',
url(r'^i18n/', include('django.conf.urls.i18n')),
)
| 44.833333
| 131
| 0.683271
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.