blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
160e6f9e2f4622e1af2cb9e5ff9252cf7e29d002
|
efc90aad821f8994d2c05082fab5ba67f355f699
|
/volkscv/analyzer/statistics/processor/box_processor.py
|
6e1cb1f7fddc5cb00b2f24484e68903cb8d39e17
|
[
"Apache-2.0"
] |
permissive
|
ChaseMonsterAway/volkscv
|
d1cdcbaaf1ca17076cb991862b29459c1adb2813
|
aa7e898cc29e3e5f26363e56bf56f4c56574bbd8
|
refs/heads/master
| 2023-03-10T02:54:50.269361
| 2021-02-24T02:34:53
| 2021-02-24T02:34:53
| 285,753,693
| 10
| 0
|
Apache-2.0
| 2021-02-24T02:34:54
| 2020-08-07T06:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,904
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from .base import BaseProcessor
from ..plotter import OneDimPlotter, TwoDimPlotter, SubPlotter, Compose, cdf_pdf
class BoxProcessor(BaseProcessor):
""" Process the information related to box, get several statistical distribution.
Args:
data (dict): Data to be processed.
Example:
>>> import numpy as np
>>> data = dict(
>>> bboxes=np.array([np.array([[0, 0, 10, 10], [15, 30, 40, 60]]),
>>> np.array([[10, 15, 20, 20]]),]),
>>> labels = np.array([np.array([0, 1]), np.array([1])]),
>>> )
>>> self = BoxProcessor(data)
>>> self.default_plot()
>>> # export
>>> self.export('./result', save_mode='folder')
"""
def __init__(self, data):
super(BoxProcessor, self).__init__(data)
self.processor = ['hw', 'scale', 'ratio', 'ratio_log2', 'hw_per_class']
self._sections = [[0, 1e8]]
self._box_h, self._box_w = self._extract_box()
self._box_per_class = self._box_of_each_class()
self.box_per_class = None
self.box_h, self.box_w = None, None
self._text = 'all'
if not self._box_h:
self.processor = []
def _extract_box(self):
""" Extract the box height and width in input data."""
box_h = []
box_w = []
if self.data.get('bboxes', None) is not None:
for boxs in self.data['bboxes']:
for box in boxs:
h, w = box[2:] - box[:2]
box_h.append(h)
box_w.append(w)
else:
print("Keys in data doesn't contain 'labels'.")
return box_h, box_w
def _box_of_each_class(self):
""" Divide the height and width of box into different groups based on
their class.
Returns:
_box_per_class (dict): dict(category: [[h1, h2...], [w1, w2...]])
"""
if self.data.get('labels', None) is None:
print("Keys in data doesn't contain 'labels'.")
return None
if not self._box_h:
return None
label = self.data['labels']
tmp_label = []
for l in label:
tmp_label += list(l)
if 'categories' in self.data and self.data['categories'] is not None:
categories = list(range(len(self.data['categories'])))
else:
categories = list(set(tmp_label))
self._class = categories
box_per_class = {categories[tl]: [[], []] for tl in set(tmp_label)}
for cl, ch, cw in zip(tmp_label, self._box_h, self._box_w):
box_per_class[categories[cl]][0].append(ch)
box_per_class[categories[cl]][1].append(cw)
return box_per_class
@property
def specified_class(self):
return self._class
@specified_class.setter
def specified_class(self, v):
if not isinstance(v, (list, tuple)):
v = [v]
for v_ in v:
assert isinstance(v_, int), "Use int value to specify class."
self._class = v
h, w = [], []
for sc in self.specified_class:
h += self._box_per_class[sc][0]
w += self._box_per_class[sc][1]
self.box_per_class = {sc: self._box_per_class[sc] for sc in v}
self.box_h, self.box_w = h, w
self._text = str(v)
@property
def sections(self):
""" The section of box scale (sqrt(box_w*box_h))."""
return self._sections
@sections.setter
def sections(self, v):
assert isinstance(v, (list, tuple))
assert isinstance(v[0], (int, float))
v = [0] + v + [1e8]
self._sections = [[v[idx], v[idx + 1]] for idx in range(len(v) - 1)]
@property
def hw_per_class(self):
"""Height and width distribution of each class."""
if self._box_per_class is None:
return None
if self.box_per_class is not None:
unique_class = self.box_per_class
else:
unique_class = self._box_per_class
cols = int(np.ceil(np.sqrt(len(unique_class))))
return SubPlotter(unique_class,
'box hw distribution of class %s' % self._text,
'two',
plt.scatter,
cols, cols,
axis_label=['height', 'width'],
marker='.',
alpha=0.1)
@property
def hw(self):
""" Height and width distribution of box. """
h, w = self._box_h, self._box_w
if self.box_h:
h, w = self.box_h, self.box_w
return TwoDimPlotter([h, w],
"distribution of box's hw (class %s)" % self._text,
plt.scatter,
axis_label=['height', 'width'],
marker='.', alpha=0.1)
@property
def scale(self):
""" Scale (sqrt(w*h)) distribution."""
h, w = self._box_h, self._box_w
if self.box_h:
h, w = self.box_h, self.box_w
sqrt_scale = np.sqrt(np.array(w) * np.array(h))
return OneDimPlotter(list(sqrt_scale), 'sqrt(wh) of box (class %s)' % self._text,
cdf_pdf, axis_label=['scale:sqrt(wh)', 'normalized numbers'],
bins=20)
def section_scale(self, srange=(0, 32, 96, 640)):
""" Scale (sqrt(w*H)) distribution in different sections."""
# TODO
sections = [[srange[idx], srange[idx + 1]] for idx in range(len(srange) - 1)]
print('The sections are %s' % sections)
sqrt_scale = np.sqrt(np.array(self._box_w) * np.array(self._box_h))
return OneDimPlotter(sqrt_scale, 'box nums in different section' % sections,
cdf_pdf, axis_label=['scale:sqrt(wh)', 'normalized numbers'],
bins=srange)
@property
def ratio(self):
""" Ratio (height/width) distribution."""
assert min(self._box_w) > 0
h, w = self._box_h, self._box_w
if self.box_h:
h, w = self.box_h, self.box_w
section_hw = {i: [[], []] for i in range(len(self.sections))}
for h_, w_ in zip(h, w):
for idx, section in enumerate(self.sections):
if section[0] <= np.sqrt(h_ * w_) < section[1]:
section_hw[idx][0].append(h_)
section_hw[idx][1].append(w_)
legends = []
plotters = []
for key, value in section_hw.items():
hw_ratios = np.array(value[0]) / np.array(value[1])
legends.append(self.sections[key])
plotters.append(OneDimPlotter(list(hw_ratios),
'h w ratio of box (class %s) in section %s' %
(self.sections[key], self._text),
cdf_pdf,
axis_label=['h/w ratio', 'normalized numbers'],
bins=20))
return Compose(plotters, text='Box ratio of class %s' % self._text, legend=legends)
@property
def ratio_log2(self):
""" Ratio (log2(height/width)) distribution."""
assert min(self._box_w) > 0
h, w = self._box_h, self._box_w
if self.box_h:
h, w = self.box_h, self.box_w
h_w_ratio = np.array(h) / np.array(w)
log2_ratio = np.log2(h_w_ratio)
return OneDimPlotter(list(log2_ratio),
'h/w ratio(log2) of box (class %s)' % self._text,
cdf_pdf,
axis_label=['log2(h/w)', 'normalized numbers'],
bins=20)
|
[
"jun.sun@media-smart.cn"
] |
jun.sun@media-smart.cn
|
dc6bc1e49da835ba067f04ecc87cc1a6ea910278
|
d5b4ac45bb59287fd86bbaa454a97982f159f90a
|
/DB7.py
|
864ebbd67afd21b252e08a0bc059056efff34cd5
|
[] |
no_license
|
21368236/1
|
b7d43a4d6b48033e483d61e9bc7dee6e5eecdc45
|
6f72422d6565bfbd503e36a5ffc4e0590115acc8
|
refs/heads/master
| 2022-08-24T22:43:05.169200
| 2022-08-14T21:32:59
| 2022-08-14T21:32:59
| 152,224,029
| 0
| 0
| null | 2018-10-18T15:11:26
| 2018-10-09T09:26:07
|
Python
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
s= ("7")
int (s)
print (s/7)
|
[
"noreply@github.com"
] |
21368236.noreply@github.com
|
c0750ef184ed5e43af364a74a3f17262aca88d07
|
96cfaaa771c2d83fc0729d8c65c4d4707235531a
|
/RecoHcal/HcalProm/test/runHcalTemplate.py
|
56f7ce258be40e5eeb4134a4662b388ce4da6b3f
|
[] |
no_license
|
khotilov/cmssw
|
a22a160023c7ce0e4d59d15ef1f1532d7227a586
|
7636f72278ee0796d0203ac113b492b39da33528
|
refs/heads/master
| 2021-01-15T18:51:30.061124
| 2013-04-20T17:18:07
| 2013-04-20T17:18:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("HCALTemplate")
process.load("MagneticField.Engine.uniformMagneticField_cfi")
# process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("L1Trigger.Configuration.L1Config_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1x1032.L1Menu_CRUZET200805_gr7_muon_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("PoolSource",
dropMetaData = cms.untracked.bool(True),
fileNames = cms.untracked.vstring(
'/store/data/Commissioning08/BarrelMuon/RECO/CRUZET4_v1/000/058/600/1A31FC6D-4A71-DD11-80AF-000423D60FF6.root'
)
)
process.l1 = cms.EDFilter("L1GTFilter",
trigger = cms.string('L1_SingleMu3'),
dumpTriggerTable = cms.untracked.bool (True)
)
process.hcalTemplate = cms.EDAnalyzer ("HcalTemplate")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('histHcalTemplate.root')
)
process.p1 = cms.Path(process.hcalTemplate)
process.UniformMagneticFieldESProducer.ZFieldInTesla = 0.001
|
[
"sha1-cf070837f75a2e740f971de86d9d2e3a75a81342@cern.ch"
] |
sha1-cf070837f75a2e740f971de86d9d2e3a75a81342@cern.ch
|
1ab90d2920fccca9c25b7d34341889016c5a1931
|
9224be8b75cd55325dfa69772a2734e6c81e6b5b
|
/audio/sig/fbanks.py
|
29c840455a9960ac00e9b3f28039a66170ccb26a
|
[] |
no_license
|
RaphaelOlivier/audio-smoothing
|
28bc4a7a36364ee8ca0f32680c48c3449a44c907
|
092ab54ae181cbf86787ad64de276f19be9dbf70
|
refs/heads/master
| 2022-11-29T14:20:32.726104
| 2020-06-30T17:30:09
| 2020-06-30T17:30:09
| 275,976,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,886
|
py
|
"""FilterBANKS for audio analysis and synthesis."""
import math
import numpy as np
from numpy.fft import rfft, fft
from scipy.fftpack import dct
from .auditory import hz2mel, mel2hz
from .auditory import erb_space, erb_filters, erb_fbank, erb_freqz
from .window import hamming
from .temporal import convdn, conv
class Filterbank(object):
"""An abstract class of filterbanks.
All types of filterbanks should subclass this class and implement:
* __len__(): number of filterbanks
* __getitem__(i): i-th filter object
* freqz(i): frequency response of i-th filter
* filter(sig, i): filter signal `sig` by i-th filter
"""
def __len__(self):
"""Return the number of frequency channels."""
raise NotImplementedError
def __getitem__(self, k):
"""Obtain k-th filter from the filterbank."""
raise NotImplementedError
def freqz(self, k):
"""Frequency response of the k-th filter."""
raise NotImplementedError
def filter(self, sig, k):
"""Filter a signal through k-th filter."""
raise NotImplementedError
class LinFreq(Filterbank):
"""The linear-frequency filterbank.
This class is implemented using the STFT bandpass filter view.
"""
def __init__(self, wind, nchan=None):
"""Create a bank of bandpass filters using prototype lowpass window.
Parameters
----------
wind : array_like
A window function.
"""
self.nchan = (nchan if nchan is not None else len(wind))
self.wind = wind
self.nsym = (len(wind)-1) / 2. # window point of symmetry
self.filts = np.zeros((self.nchan, len(wind)), dtype=np.complex_)
for k in range(self.nchan): # make bandpass filters
wk = 2*np.pi*k / self.nchan
self.filts[k] = wind * np.exp(1j*wk*np.arange(len(wind)))
def __len__(self):
"""Return number of filters."""
return self.nchan
def __getitem__(self, k):
"""Return k-th channel FIR filter coefficients."""
return self.filts[k]
def freqz(self, k, nfft=None):
"""Return frequency response of k-th channel filter."""
if nfft is None:
nfft = max(1024, int(2**np.ceil(np.log2(len(self.wind))))
) # at least show 1024 frequency points
ww = 2*np.pi * np.arange(nfft)/nfft
hh = fft(self.filts[k], n=nfft)
return ww, hh
def filter(self, sig, k):
"""Filter signal by k-th filter."""
demod = np.exp(-1j*(2*np.pi*k/self.nchan)*np.arange(len(sig)))
return np.convolve(sig, self.filts[k])[:len(sig)] * demod
class MelFreq(Filterbank):
"""The Mel-frequency filterbank."""
def __init__(self, sr, nfft, nchan, flower=0., fupper=.5, unity=False):
"""Construct a Mel filterbank.
Parameters
----------
sr : int or float
Sampling rate
nfft : int
DFT size
nchan : int
Number of filters in filterbank
flower : int or float <0.>
Lowest center-frequency in filterbank. Could either be in terms of
Hz or 2*pi. Default to 0. (DC).
fupper : int or float <.5>
Higest center-frequency in filterbank. Could either by in terms of
Hz or 2*pi. Default to .5 (Nyquist).
"""
self.nfft = nfft
self.nchan = nchan
# Find frequency (Hz) endpoints
if flower > 1: # assume in Hz
hzl = flower
else: # assume in normalized frequency
hzl = flower * sr
if fupper > 1:
hzh = fupper
else:
hzh = fupper * sr
# Calculate mel-frequency endpoints
mfl = hz2mel(hzl)
mfh = hz2mel(hzh)
# Calculate mel frequency range `mfrng`
# Calculate mel frequency increment between adjacent channels `mfinc`
mfrng = mfh - mfl
mfinc = mfrng * 1. / (nchan+1)
mfc = mfl + mfinc * np.arange(1, nchan+1) # mel center frequencies
# Calculate the DFT bins for [fl[0], fc[0], fc[P-1], fh[P-1]
# p+1 markers for p channels
dflim = mel2hz(
mfl + mfinc*np.array([0, 1, nchan, nchan+1])) / sr * nfft
dfl = int(dflim[0])+1 # lowest DFT bin required
dfh = min(nfft//2, int(dflim[-1])-1) # highest DFT bin required
# Map all useful DFT bins to mel-frequency centers
mfc = (hz2mel(sr * np.arange(dfl, dfh+1) * 1. / nfft)-mfl) / mfinc
if mfc[0] < 0:
mfc = mfc[1:]
dfl += 1
if mfc[-1] >= nchan+1:
mfc = mfc[:-1]
dfh -= 1
mfc_fl = np.floor(mfc)
mfc_ml = mfc - mfc_fl # multiplier for upper filter
df2 = np.argmax(mfc_fl > 0)
df3 = len(mfc_fl) - np.argmax(mfc_fl[::-1] < nchan)
df4 = len(mfc_fl)
row = np.concatenate((mfc_fl[:df3], mfc_fl[df2:df4]-1))
col = np.concatenate((range(df3), range(df2, df4)))
val = np.concatenate((mfc_ml[:df3], 1-mfc_ml[df2:df4]))
# Finally, cache values for each filter
self.filts = []
self.wgts = np.zeros((nfft//2+1, nchan))
for ii in range(self.nchan):
idx = row == ii
if unity:
dftbin, dftwgt = col[idx]+dfl, val[idx]/sum(val[idx])
else:
dftbin, dftwgt = col[idx]+dfl, val[idx]
self.filts.append((dftbin, dftwgt))
self.wgts[dftbin, ii] = dftwgt
def __len__(self):
"""Return the number of frequency channels."""
return self.nchan
def __getitem__(self, k):
"""Obtain k-th filter from the filterbank."""
return self.filts[k]
def freqz(self, k):
"""Frequency response of the k-th filter."""
ww = np.arange(self.nfft//2+1)/self.nfft*2
hh = np.zeros(self.nfft//2+1)
dfb, val = self.filts[k]
hh[dfb] = val
return ww, hh
def filter(self, sig, k):
"""Filter a signal through k-th filter."""
dfb, val = self.filts[k]
dft_sig = rfft(sig, self.nfft)
return val.dot(dft_sig[dfb])
def melspec(self, powerspec):
"""Return the mel spectrum of a signal."""
return powerspec @ self.wgts
def mfcc(self, powerspec):
"""Return mel-frequency cepstral coefficients (MFCC)."""
return dct(np.log(self.melspec(powerspec)), norm='ortho')
class Gammatone(Filterbank):
"""The Gammatone filterbank."""
def __init__(self, sr, num_chan, center_frequencies=None):
"""Instantiate a Gammatone filterbank.
Parameters
----------
sr: int
Sampling rate.
num_chan: int
Number of frequency channels.
center_frequencies: iterable, optional
Center frequencies of each filter. There are 3 options:
1. (Default) None. This sets f_lower to 100Hz, f_upper to Nyquist
frequency, and assume equal spacing on linear frequency scale for
other frequencies.
2. Tuple of (`freqlower`, `frequpper`). This takes user-defined
lower and upper bounds, and assume equal spacing on linear scale
for other frequencies.
3. Iterable of center frequencies. This allows every center
frequency to be defined by user.
"""
super(Gammatone, self).__init__()
self.sr = sr
self.num_chan = num_chan
if center_frequencies is None:
self.cf = erb_space(num_chan, 100., sr/2)
elif len(center_frequencies) == num_chan:
self.cf = center_frequencies
else:
assert len(center_frequencies) == 2,\
"Fail to interpret center frequencies!"
self.cf = erb_space(num_chan, *center_frequencies)
self.filters = []
for ii, cf in enumerate(self.cf): # construct filter coefficients
A0, A11, A12, A13, A14, A2, B0, B1, B2, gain = erb_filters(sr, cf)
self.filters.append((A0, A11, A12, A13, A14, A2, B0, B1, B2, gain))
def __len__(self):
"""Return number of channels."""
return self.num_chan
def __getitem__(self, k):
"""Get filter coefficients of k-th channel."""
return self.filters[k]
def freqz(self, k, nfft=1024, powernorm=False):
"""Compute k-th channel's frequency reponse.
Parameters
----------
k: int
ERB frequency channel.
nfft: int, None
Number of linear frequency points.
powernorm: bool, False
Normalize power to unity if True.
"""
ww, hh = erb_freqz(*self.filters[k], nfft)
if powernorm:
hh /= sum(hh.real**2 + hh.imag**2)
return ww, hh
def filter(self, sig, k, cascade=False):
"""Filter signal with k-th channel."""
return erb_fbank(sig, *self.filters[k], cascade=cascade)
def gammawgt(self, nfft, powernorm=False, squared=True):
"""Return the Gammatone weighting function for STFT.
Parameters
----------
nfft: int
Number of DFT points.
powernorm: bool, False
Normalize power of Gammatone weighting function to unity.
squared: bool, True
Apply squared Gammtone weighting.
"""
wts = np.empty((nfft//2+1, self.num_chan))
for k in range(self.num_chan):
wts[:, k] = np.abs(self.freqz(k, nfft, powernorm)[1][:nfft//2+1])
if squared:
wts = wts**2
return wts
class ConstantQ(Filterbank):
"""Direct implementation of Judith Brown's Constant Q transform (CQT)."""
def __init__(self, sr, fmin, bins_per_octave=12, fmax=None, nchan=None,
zphase=True):
"""Instantiate a constant Q transform class.
Parameters
----------
sr: int or float
Sampling rate.
fmin: int or float
Lowest center frequency of the filterbank.
Note that all other center frequencies are derived from this.
bins_per_octave: int
Number of bins per octave (double frequency).
Default to 12, which corresponds to one semitone.
fmax: int or float
Highest center frequency of the filterbank.
Default to None, which assumes Nyquist. If `nchan` is set, `fmax`
will be ignored.
nchan: int
Total number of frequency bins.
Default to None, which is determined from other parameters. If set,
`fmax` will be adjusted accordingly.
zphase: bool
Center each window at time 0 rather than (Nk-1)//2. This is helpful
for mitigating the effect of group delay at low frequencies.
Default to yes.
"""
assert fmin >= 100, "Small center frequencies are not supported."
if nchan: # re-calculate fmax
self.nchan = nchan
fmax = fmin * 2**(nchan / bins_per_octave)
assert fmax <= sr/2,\
"fmax exceeds Nyquist! Consider reducing nchan or fmin."
assert nchan == math.ceil(bins_per_octave*np.log2(fmax/fmin))
else:
fmax = fmax if fmax else sr/2
self.nchan = math.ceil(bins_per_octave * np.log2(fmax/fmin))
self.sr = sr
self.qfactor = 1 / (2**(1/bins_per_octave) - 1)
self.cfs = fmin * 2**(np.arange(self.nchan)/bins_per_octave) # fcs
self.zphase = zphase
self.filts = []
for ii, k in enumerate(range(self.nchan)): # make bandpass filters
cf = self.cfs[ii]
wk = 2*np.pi*cf / sr
wsize = math.ceil(self.qfactor*sr/cf)
if zphase and (wsize % 2 == 0): # force odd-size window for 0phase
wsize += 1
if zphase:
mod = np.exp(1j*wk*np.arange(-(wsize-1)//2, (wsize-1)//2 + 1))
else:
mod = np.exp(1j*wk*np.arange(wsize))
wind = hamming(wsize)
self.filts.append(wind/wind.sum() * mod)
def __len__(self):
"""Return number of filters."""
return self.nchan
def __getitem__(self, k):
"""Return k-th channel FIR filter coefficients."""
return self.filts[k]
def freqz(self, k, nfft=None):
"""Return frequency response of k-th channel filter."""
if nfft is None:
nfft = max(1024, int(2**np.ceil(np.log2(len(self.filts[k]))))
) # at least show 1024 frequency points
ww = 2*np.pi * np.arange(nfft)/nfft
hh = fft(self.filts[k], n=nfft)
return ww, hh
def filter(self, sig, k, fr=None, zphase=True):
"""Filter signal by k-th filter."""
wk = 2*np.pi*self.cfs[k] / self.sr
decimate = int(self.sr/fr) if fr else None
if decimate:
demod = np.exp(-1j*wk*np.arange(0, len(sig), decimate))
return convdn(sig, self.filts[k], decimate,
zphase=zphase)[:len(demod)] * demod
else:
demod = np.exp(-1j*wk*np.arange(len(sig)))
return conv(sig, self.filts[k], zphase=zphase)[:len(sig)] * demod
def cqt(self, sig, fr):
"""Return the constant Q transform of the signal.
Parameters
----------
sig: array_like
Signal to be processed.
fr: int
Frame rate (or SR / hopsize in seconds) in Hz.
"""
decimate = int(self.sr/fr) # consistent with filter definition
out = np.empty((self.nchan, math.ceil(len(sig)/decimate)),
dtype='complex_')
for kk in range(self.nchan):
out[kk] = self.filter(sig, kk, fr=fr)
return out.T
|
[
"raphael.franck.olivier@gmail.com"
] |
raphael.franck.olivier@gmail.com
|
b50026269bbbba85f9812f1668b0b260550a19d7
|
b9072925512750284c663f1b22059f94e5ccf289
|
/3-deploy_web_static.py
|
3c52cf865b22f5732832b6e23c8f12144faeea26
|
[] |
no_license
|
mmanumos/AirBnB_clone_v2
|
dad72f8a3bdf672c509dff96b57eb20954d20cbf
|
473730369dd6cfee1fecfea95f8885cfc62973d1
|
refs/heads/master
| 2022-04-25T13:47:26.538922
| 2020-04-27T06:50:40
| 2020-04-27T06:50:40
| 255,198,068
| 0
| 0
| null | 2020-04-13T00:45:52
| 2020-04-13T00:45:52
| null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
#!/usr/bin/python3
# Fabric script to deploy web_static to server
from fabric.api import *
import os
from time import strftime as ti
env.user = 'ubuntu'
env.hosts = ['34.73.242.80', '54.209.158.161']
def do_pack():
"""Fabric script to compress files in web_static"""
local("mkdir -p versions")
ver = ti("%Y%m%d%H%M%S")
arc = local("tar -cvzf versions/web_static_{}.tgz web_static".format(ver))
if arc.failed:
return False
else:
return ("versions/web_static_{}.tgz".format(ver))
def do_deploy(archive_path):
"""Fabric script to deploy web_static to servers"""
if os.path.exists(archive_path):
new_path = archive_path[9:]
de_path = '/data/web_static/releases/{}/'.format(new_path)[0:-4]
put(archive_path, '/tmp/')
run('mkdir -p {}'.format(de_path))
run('tar -xzf /tmp/{} -C {}'.format(new_path, de_path))
run('rm /tmp/{}'.format(new_path))
run('mv {}/web_static/* {}'.format(de_path, de_path))
run('rm -rf {}/web_static'.format(de_path))
run('rm -rf /data/web_static/current')
run('ln -s {} /data/web_static/current'.format(de_path))
print('New version deployed successfully!')
return True
return False
def deploy():
"""Created and distributes an archinve to two web servers"""
archive_path = do_pack()
if archive_path is False:
return False
return do_deploy(archive_path)
if __name__ == "__main__":
deploy()
|
[
"mosqueramanuel5@gmail.com"
] |
mosqueramanuel5@gmail.com
|
3f800cd02f80ce00ee6facfa70dab1e68fbb75ec
|
ea35f4925caeaa398df4f6f5245765e994ee62f7
|
/BruteForceModel/main.py
|
f05c74e397f51b174f178b803670126388384c65
|
[] |
no_license
|
jhubar/PI
|
5c19368dfaf5d095d08e5bdb7c7a6cfca9376b5e
|
2b2a0d75520e1af5408d42ea3ee10a22b3989045
|
refs/heads/master
| 2023-03-25T18:13:03.353705
| 2021-03-23T10:40:35
| 2021-03-23T10:40:35
| 297,372,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,182
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from SEIR import SEIR
import tools
if __name__ == "__main__":
# Create a model
model = SEIR()
# Import the dataset
model.import_dataset()
# Find an optimal value for initial state
#init_I = tools.initial_infected_estimator(model.dataset)
#model.fit()
predictions = model.predict(duration=model.dataset.shape[0])
print(model.get_parameters())
uncumul = []
uncumul.append(predictions[0][7])
for j in range(1, predictions.shape[0]):
uncumul.append(predictions[j][7] - predictions[j - 1][7])
# Plot:
time = model.dataset[:, 0]
# Adapt test + with sensit and testing rate
for j in range(0, len(time)):
uncumul[j] = uncumul[j] * model.s * model.t
# Plot cumul positive
plt.scatter(time, model.dataset[:, 1], c='blue', label='test+')
plt.plot(time, uncumul, c='blue', label='test+')
# Plot hospit
plt.scatter(time, model.dataset[:, 3], c='red', label='hospit pred')
plt.plot(time, predictions[:, 4], c='red', label='pred hopit')
plt.legend()
plt.show()
# Plot critical
plt.scatter(time, model.dataset[:, 5], c='green', label='critical data')
plt.plot(time, predictions[:, 5], c='green', label='critical pred')
plt.scatter(time, model.dataset[:, 6], c='black', label='fatalities data')
plt.plot(time, predictions[:, 6], c='black', label='fatalities pred')
plt.legend()
plt.show()
# Smoothing test:
unsmooth_data = model.dataset
# Import a smoothed dataset:
model.smoothing = True
model.import_dataset()
smooth_data = model.dataset
# plot the data
plt.scatter(smooth_data[:, 0], smooth_data[:, 1], color='blue', label='smoothed testing data')
plt.scatter(smooth_data[:, 0], unsmooth_data[:, 1], color='green', label='unsmoothed testing data')
plt.legend()
plt.show()
# Print initial data:
for i in range(0, 15):
print('Time: {} - smoothed: {} - original: {}'.format(i, smooth_data[i][1], unsmooth_data[i][1]))
# Check best initial number of infected:
tools.initial_infected_estimator(smooth_data)
|
[
"francoislievens@outlook.com"
] |
francoislievens@outlook.com
|
768579ce9651f223ea9ba98f96756513ebf95d7b
|
606d2d0ceb97eaa0cd442d125613ebd220621acd
|
/src/pythonDemo/dev_debug.py
|
af1fce439c4f921e2ce3e661f80c71d70db14dd4
|
[] |
no_license
|
cisin-python/Django-Demo
|
fba10a57a2ea6073e4308961312e2f94d780b492
|
fc995cfdad85da502b7f19444dc931c983193cd5
|
refs/heads/master
| 2021-01-22T11:55:02.299136
| 2015-01-16T14:46:53
| 2015-01-16T14:46:53
| 24,186,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
# # Settings for development with Debug Toolbar enabled
# #
# # To use this settings run the manage.py commands as the following:
# #
# # python manage.py COMMAND --settings=medtrics.settings.dev_debug
# #
# INSTALLED_APPS += (
# 'debug_toolbar',
# )
# MIDDLEWARE_CLASSES += (
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
# )
# DEBUG_TOOLBAR_PANELS = (
# 'debug_toolbar.panels.versions.VersionsPanel',
# 'debug_toolbar.panels.timer.TimerPanel',
# 'debug_toolbar.panels.settings.SettingsPanel',
# 'debug_toolbar.panels.headers.HeadersPanel',
# 'debug_toolbar.panels.request.RequestPanel',
# 'debug_toolbar.panels.sql.SQLPanel',
# 'debug_toolbar.panels.staticfiles.StaticFilesPanel',
# 'debug_toolbar.panels.templates.TemplatesPanel',
# 'debug_toolbar.panels.cache.CachePanel',
# 'debug_toolbar.panels.signals.SignalsPanel',
# 'debug_toolbar.panels.logging.LoggingPanel',
# 'debug_toolbar.panels.redirects.RedirectsPanel',
# )
# DEBUG_TOOLBAR_PATCH_SETTINGS = False
|
[
"cis@machin101"
] |
cis@machin101
|
397ff88a6fe7cdbd3ea47529ac39003f0d88056e
|
2b76a1307bd35d3e7de180cca4977dce382d7ddc
|
/python/Recrusive/S93.py
|
fbd070deb2166a2807469cf11f3e772daa0dddaf
|
[] |
no_license
|
liqiushui/leetcode
|
c2966135a5bb98f1d651db536144c4ed77ec63fe
|
be47d32fb11b9dcb8ab7368a4294461b5819c7b1
|
refs/heads/master
| 2020-04-09T07:10:59.202835
| 2018-12-27T11:27:33
| 2018-12-27T11:27:33
| 160,144,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
if not s:
return []
ret = []
self.helper(s, 0, [], ret)
return ret
def helper(self, s, pos, temp, result):
limit = 4
if len(temp) == limit and pos == len(s):
result.append(".".join(list(temp)))
return
if len(temp) >= limit:
return
i = pos
x = 0
while i + x < (len(s) + 1) and x <= 3:
t = s[i:i + x]
if self.isValid(t) and len(temp) < 4:
temp.append(t)
self.helper(s, pos + x, temp, result)
temp.pop()
x += 1
def isValid(self, s):
if len(s) >= 1 and len(s) <= 3:
if len(s) > 1 and s[0] == '0':
return False
n = int(s)
return n >= 0 and n <= 255
return False
if __name__ == "__main__":
s = Solution()
print(s.restoreIpAddresses("010010"))
|
[
"xidianli@qq.com"
] |
xidianli@qq.com
|
2472db8ca01f6ac1c6f038ef9d9fc2e3edd5edf4
|
2374d6d17259f481fe0412ec4176b0811eba075f
|
/scripts_indentation/add_InterpolatedImage.py
|
0bd658adb92048fe560af26b10c4f5f846640611
|
[
"MIT"
] |
permissive
|
chakra34/Optimizer
|
29cecca3898ac42e47c435442870d1905fe0a47a
|
6302e0f4357cab08e75c53db3b5527660607a7e1
|
refs/heads/master
| 2020-04-21T13:39:24.209230
| 2019-09-27T23:47:58
| 2019-09-27T23:47:58
| 169,606,357
| 2
| 2
|
MIT
| 2019-09-27T23:47:59
| 2019-02-07T16:50:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,064
|
py
|
#!/usr/bin/env python
import numpy as np
import scipy
import damask
import os,sys,string
from subprocess import call
from optparse import OptionParser
from scipy.interpolate import griddata
scriptID = string.replace('$Id: add_InterpolatedImage.py 247 2016-03-22 21:45:34Z chakra34 $','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0]
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Converts point cloud data to Regular grid and gives the resulting image.
if pix_size is 1 and size = 3.0 X 3.0 then dimension is 4 X 4.
""", version = scriptID)
parser.add_option('-c','--coords',
dest = 'coords',
type = 'string', metavar = 'string',
help = 'column label of point coordinate vector')
parser.add_option('-d','--displacement',
dest = 'disp',
type = 'string', metavar = 'string',
help = 'column label of displacement vector')
parser.add_option('--grid',
dest = 'grid',
type = 'int', nargs = 2, metavar = 'int int',
help = 'interpolation grid')
parser.add_option('--size',
dest = 'size',
type = 'float', nargs = 2, metavar = 'float float',
help = 'interpolation size')
parser.add_option('--center',
dest = 'center',
type = 'float', nargs = 2, metavar = 'float float',
help = 'coordinates of interpolation patch center')
parser.add_option('-p','--pixelsize',
dest = 'pix_size',
type = 'string', metavar = 'string',
help = 'pixel size [20.0e-6/255]')
(options,filenames) = parser.parse_args()
#---------------------------------------- sanity checks ------------------------------------------------
if options.pix_size:
options.pix_size = float(eval(options.pix_size))
if options.grid:
options.size = tuple(options.pix_size * (x - 1) for x in options.grid)
elif options.size:
options.grid = tuple(round(x/options.pix_size + 1) for x in options.size)
options.size = tuple(options.pix_size * (x - 1) for x in options.grid)
else:
parser.error("Either dimension or size has to be specified if pixel size is given.")
else:
if options.size and options.grid:
options.pix_size = options.size/options.grid
else:
parser.error("Both dimension and size has to be specified if pixel size is not given.")
# --------------------------------------- loop over input files -------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
out_file = "out_"+os.path.basename(name)
try:
table = damask.ASCIItable(name = name,
outname = out_file,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header and data ------------------------------------------
table.head_read()
table.data_readArray([options.coords,options.disp])
table.data = 1e-6*table.data
if len(table.data[0]) != 6:
continue
#-------------------------------------------- process and store output ---------------------------------------
table.data[:,:3] += table.data[:,3:6] # add displacement to coordinates
if not options.center:
options.center = 0.5*(table.data[:,:2].max(axis=0)+table.data[:,:2].min(axis=0))
# l = np.array((table.data[:,positions[0]],table.data[:,positions[1]])).T
# hull = scipy.spatial.Delaunay(l).convex_hull # finding the convex hull to find the center of the point cloud data
# ps = set()
# for x,y in hull:
# ps.add(x)
# ps.add(y)
# ps = np.array(list(ps))
# if options.center == None :
# options.center = points[ps].mean(axis=0)
grid_x, grid_y = np.meshgrid(np.linspace(options.center[0] - 0.5 * options.size[0],
options.center[0] + 0.5 * options.size[0], num=options.grid[0]),
np.linspace(options.center[1] - 0.5 * options.size[1],
options.center[1] + 0.5 * options.size[1], num=options.grid[1]))
grid = np.vstack((grid_x.flatten(),grid_y.flatten())).T
interpolation = griddata(table.data[:,:2], table.data[:,2], grid , fill_value = 0.0,method='linear')
table.data = np.vstack((grid_x.flatten().T,
grid_y.flatten().T,
interpolation.T)).T
#--------------------------------------------------- output header info --------------------------------------
table.labels_clear()
table.labels_append(['{}_gridInterpolation'.format(1+i) for i in xrange(3)])
table.info_clear()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
table.data_writeArray()
table.close()
|
[
"chakra34@egr.msu.edu"
] |
chakra34@egr.msu.edu
|
c4b26ee0fa67ac238d809d3ad4f4c64c3dbd38fd
|
836bbf6ad60fc6bb53b4306be0148b24dac9afe7
|
/hw2/train_pg_f18.py
|
a27cd6f876ae1bad4afe0c66aa59467882e0becf
|
[
"MIT"
] |
permissive
|
jperl/rl-homework
|
4517349aa58861dbadf8f31c6dd5fe37f5947537
|
6a5be151994cb8043686c563b777719f9338f314
|
refs/heads/master
| 2020-03-27T13:06:30.507241
| 2018-10-22T20:48:47
| 2018-10-22T20:49:24
| 146,590,585
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,645
|
py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
x = input_placeholder
with tf.variable_scope(scope):
for _ in range(n_layers):
x = tf.layers.dense(x, units=size, activation=activation)
output = tf.layers.dense(x, units=output_size, activation=output_activation)
return output
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # JON CHANGED TO FIX ISSUE
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, "discrete_policy", self.n_layers, self.size)
return sy_logits_na
else:
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "continuous_policy_mean", self.n_layers, self.size)
sy_logstd = tf.Variable(np.zeros(self.ac_dim), dtype=tf.float32, name="continuous_policy_std")
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = tf.multinomial(sy_logits_na, 1)
sy_sampled_ac = sy_sampled_ac[:,0] # (batch_size, 1) -> (batch_size,)
else:
sy_mean, sy_logstd = policy_parameters
batch_size = tf.shape(sy_mean)[0]
z = tf.random_normal((batch_size, self.ac_dim))
sy_std = tf.exp(sy_logstd)
sy_sampled_ac = sy_mean + sy_std * z
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
# SEE https://youtu.be/XGmd3wcyDg8?list=PLkFD6_40KJIxJMR-j5A1mkxK26gh_qg37&t=4137
if self.discrete:
# use cross entropy loss to maximize the log probability for a categorical distribution
sy_logits_na = policy_parameters
labels = tf.one_hot(sy_ac_na, self.ac_dim)
sy_logprob_n = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=sy_logits_na)
else:
# use mean squared error to maximize the log probability for a gaussian
sy_mean, sy_logstd = policy_parameters
# calculate the z-score of the sampled actions under the policy
sy_z = (sy_ac_na - sy_mean) / tf.exp(sy_logstd)
# express the loss as a negative-likilihood, so when we minimize it
# it will maximize the likilihood by pushing z towards 0, the mean of the distribution
# ex. z=10, loss=50 --> z=1, loss=0.5 --> z=0, loss=0
sy_logprob_n = 0.5 * tf.reduce_mean(tf.square(sy_z), axis=1)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
weighted_negative_likelihoods = tf.multiply(self.sy_logprob_n, self.sy_adv_n)
# the negative likelihoods are the correct sign because
# as we do gradient descent we will increase their likelihood proportional to the advantage
loss = tf.reduce_mean(weighted_negative_likelihoods)
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(shape=[None], name="target", dtype=tf.float32)
baseline_loss = tf.losses.mean_squared_error(labels=self.sy_target_n, predictions=self.baseline_prediction)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac = self.sess.run(self.sy_sampled_ac, { self.sy_ob_no: np.expand_dims(ob, axis=0) })
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
q_n = []
if self.reward_to_go:
for re_path in re_n:
# per path calculate the estimated rewards for the trajectory
path_est = []
# per time step in the path calculate the reward to go
for i, re in enumerate(re_path):
# ex. len(5) - 0 = 5
reward_to_go_len = len(re_path) - i
gamma = np.power(self.gamma, np.arange(reward_to_go_len))
re_to_go = np.sum(gamma * re_path[i:])
path_est.append(re_to_go)
# append the path's array of estimated returns
q_n.append(np.array(path_est))
else:
for re_path in re_n:
tprime_minus_one = np.arange(len(re_path))
gamma = np.power(self.gamma, tprime_minus_one)
re_discount = re_path * gamma
# all rewards are the same, so duplicate the sum per timestep
path_est = np.sum(re_discount) * np.ones_like(re_path)
# append the path's array of estimated returns
q_n.append(path_est)
q_n = np.concatenate(q_n)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, { self.sy_ob_no: ob_no })
# the target network is predicting normalized targets
# so we can rescale them to match q-values mean & std
b_n = b_n * q_n.std() + q_n.mean()
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = (adv_n - np.mean(adv_n)) / np.std(adv_n)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
target_n = (q_n - q_n.mean()) / q_n.std()
self.sess.run(self.baseline_update_op, { self.sy_ob_no: ob_no, self.sy_target_n: target_n })
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
self.sess.run(self.update_op, { self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n })
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
[
"perl.jonathan@gmail.com"
] |
perl.jonathan@gmail.com
|
cb1255443d464075c4a0a21807a42108d40cac93
|
bc9b637285f1302386f9812eb41e71759148a442
|
/AnalysisScripts/py/cv67.py
|
5dde412e14bef9bd5bca3e761dc37b5bb6ba5bf1
|
[] |
no_license
|
regkwee/LHC-Collimation
|
744aa431c60345aedfc0bc001bbe106a3104d3e2
|
22e87527c0acd724ee7e99f12d681af60c5ebfaa
|
refs/heads/master
| 2021-10-02T20:15:14.767031
| 2018-11-30T12:37:02
| 2018-11-30T12:37:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,470
|
py
|
#!/usr/bin/python
#
# reweights by pressure profile
# Sept 16
#
# R Kwee, 2016
#
# ---------------------------------------------------------------------------------
import ROOT, sys, glob, os, math, helpers
from ROOT import *
# get function to read the data if 14 columns are present
from cv32 import getdata14c
from helpers import makeTGraph, mylabel, wwwpath
# --------------------------------------------------------------------------------
def cv67():
# --------------------------------------------------------------------------------
# density profile is given in the following format:
# densities per molecule as function of s-coordinate
# x,y,z, cx, cy, cz as function of (different s-coordinate)
# merge densities with coordinates
# note, that the source routine needs fluka units, ie *cm*!
# --------------------------------------------------------------------------------
energy = "4 TeV"
bgfile = '/afs/cern.ch/work/r/rkwee/HL-LHC/beam-gas-sixtrack/pressure_profiles_2012/LSS1_B1_Fill2736_Final.csv'
bgfile = "/Users/rkwee/Documents/RHUL/work/data/4TeV/LSS1_B1_Fill2736_Final.csv"
beamintensity = 2e14
energy = " 3.5 TeV "
bgfile = "/Users/rkwee/Documents/RHUL/work/HL-LHC/runs/TCT/LSS1_B1_fill_2028-sync_rad_and_ecloud.csv"
beamintensity = 1.66e14 # https://acc-stats.web.cern.ch/acc-stats/#lhc/fill-details 2028
debug = 0
data = getdata14c(bgfile)
print 'data keys are',data.keys()
nb_s = len(data['s'])
print 'number of s values', nb_s
# atomic densities
rho_C, rho_H, rho_O = [0 for i in range(nb_s)],[0 for i in range(nb_s)],[0 for i in range(nb_s)]
s = [-9999 for i in range(nb_s)]
cf = 1.
#for i in [1, 100, 300,500]:
for i in range(1,nb_s):
# get the data, convert to cm3
try:
if debug:
print 'i = ', i
print "data['rho_H2'][i]", data['rho_H2'][i]
print "data['rho_CH4'][i]", data['rho_CH4'][i]
print "data['rho_CO'][i]", data['rho_CO'][i]
print "data['rho_CO2'][i]", data['rho_CO2'][i]
rho_H2 = cf * float(data['rho_H2'][i])
rho_CH4 = cf * float(data['rho_CH4'][i])
rho_CO = cf * float(data['rho_CO'][i])
rho_CO2 = cf * float(data['rho_CO2'][i])
# compute atomic rhos and translate nitrogen equivalent density
rho_H[i] = 2.0*rho_H2
rho_H[i] += 4.0*rho_CH4
rho_C[i] = 1.0*rho_CH4
rho_C[i] += 1.0*rho_CO
rho_C[i] += 1.0*rho_CO2
rho_O[i] = 1.0*rho_CO
rho_O[i] += 2.0*rho_CO2
s[i] = float(data['s'][i])
except ValueError:
continue
# --
# calculate the scaled number
# unscaled inf
hname, nbins, xmin, xmax = 'muons', 523, 22.5, 550
hist = TH1F(hname, hname, nbins, xmin, xmax)
hist.Sumw2()
hist.GetXaxis().SetTitle('s [m]')
datafile = '/afs/cern.ch/project/lhc_mib/valBG4TeV/ir1_BG_bs_4TeV_20MeV_b1_nprim5925000_67'
datafile = '/Users/rkwee/Documents/RHUL/work/HL-LHC/runs/TCT/ir1_BG_bs_4TeV_20MeV_b1_nprim5925000_67'
datafile = "/Users/rkwee/Documents/RHUL/work/HL-LHC/runs/TCT/beam_gas_3.5TeV_IR1_to_arc_20MeV_100M_nprim7660649_66"
bbgFile = datafile + ".root"
rfile = TFile.Open(bbgFile, "READ")
hists = []
cnt = 0
mt = rfile.Get('particle')
particleTypes = [10, 11]
hname = 'muons_flatpressure'
hist_flat = hist.Clone(hname)
hist_pint = hist.Clone("pint")
hist_e100 = hist.Clone("e100")
hist_e100p = hist.Clone("e100p")
cuts = "(particle == 10 || particle == 11) && energy_ke > 100.0"
var = 'z_interact * 0.01'
print "INFO: applying", cuts, "to", var, "in", "e100"
mt.Project("e100", var, cuts)
cuts = "(particle == 10 || particle == 11) && energy_ke > 0.02"
print "INFO: applying", cuts, "to", var, "in", hname
mt.Project(hname, var, cuts)
sigma_N = 286.e-31
sigma_N_4TeV = 289.e-31
Trev = 2*math.pi/112450
# create histogram with same axis for pint
pint_tot_atomic = calc_pint_tot(rho_C, rho_H, rho_O)
# N2Eq_tot = [ float(data['CO_N2Eq'][i]) + float(data['CO2_N2Eq'][i]) + float(data['CH4_N2Eq'][i]) + float(data['H2_N2Eq'][i]) for i in range(1,len(data['s'])) ]
# pint_tot = [sigma_N*j/Trev for j in range(len(N2Eq_tot))]
rho_tot = [ float(data['rho_CO'][i]) + float(data['rho_CO2'][i]) + float(data['rho_CH4'][i]) + float(data['rho_H2'][i]) for i in range(1,len(data['s'])) ]
pint_tot = [sigma_N*rho/Trev for rho in rho_tot]
pint_incomingbeam = {}
for i,sPos in enumerate(s):
spos = float(sPos)
if spos < 0.:
z = -spos
pint_incomingbeam[z] = pint_tot[i]
zbin = hist_pint.FindBin(z)
hist_pint.SetBinContent(zbin, pint_incomingbeam[z])
# first value is for arc
arcvalue = pint_tot[1]
startarc = 260.
startarcBin = hist_pint.FindBin(startarc)
for i in range(startarcBin, nbins-1): hist_pint.SetBinContent(i,arcvalue)
nprim = float(bbgFile.split('nprim')[-1].split('_')[0])
Trev = 2*math.pi/112450
kT = 1.38e-23*300
# compute normalisation fct for each bin
for i in range(1,nbins+1):
m = hist_flat.GetBinContent(i)
scale = beamintensity * hist_pint.GetBinContent(i)
hist.SetBinContent(i,scale * m)
hist_e100p.SetBinContent(i, scale * hist_e100.GetBinContent(i))
if i<11:
print "pint in bin", i, "is", hist_pint.GetBinContent(i)
print "pint * beamintensity is", scale
print "pint * beamintensity * m is", scale*m
cv = TCanvas( 'cv', 'cv', 2100, 900)
cv.SetGridy(1)
cv.SetGridx(1)
x1, y1, x2, y2 = 0.7, 0.65, 0.9, 0.88
mlegend = TLegend( x1, y1, x2, y2)
mlegend.SetFillColor(0)
mlegend.SetFillStyle(0)
mlegend.SetLineColor(0)
mlegend.SetTextSize(0.035)
mlegend.SetShadowColor(0)
mlegend.SetBorderSize(0)
ytitle = "particles/m/BG int."
YurMin, YurMax = 2e2, 9e6
hist.GetYaxis().SetRangeUser(YurMin,YurMax)
XurMin,XurMax = 0.,545.
hist.GetXaxis().SetRangeUser(XurMin,XurMax)
hist_flat.SetLineColor(kRed)
hist_flat.GetYaxis().SetTitle(ytitle)
hist.GetYaxis().SetTitle(ytitle)
hist_e100p.SetFillColor(kRed-3)
hist_e100p.SetLineColor(kRed-3)
# hist_flat.Draw("hist")
hist.Draw("hist")
hist_e100p.Draw("histsame")
#hist_pint.GetXaxis().SetRangeUser(1.e-13,2.5e-11)
#hist_pint.Draw("l")
lg, lm = "#mu^{#pm}", 'l'
mlegend.AddEntry(hist_flat, lg, lm)
lg, lm = "#mu^{#pm} E > 100 GeV", 'f'
mlegend.AddEntry(hist_e100p, lg, lm)
gPad.SetLogy(1)
gPad.RedrawAxis()
lab = mylabel(42)
# lab.DrawLatex(0.45, 0.9, energy+'beam-gas' )
lab.DrawLatex(0.4, 0.82, energy )
#mlegend.Draw()
pname = wwwpath + 'TCT/beamgas/pressure_profiles_2012/muonrates.pdf'
pname = "/Users/rkwee/Documents/RHUL/work/HL-LHC/LHC-Collimation/Documentation/ATS/HLHaloBackgroundNote/figures/4TeV/reweighted/muonrates.pdf"
pname = "/Users/rkwee/Documents/RHUL/work/HL-LHC/LHC-Collimation/Documentation/ATS/HLHaloBackgroundNote/figures/4TeV/reweighted/muonrates2011.pdf"
# pname = "/Users/rkwee/Documents/RHUL/work/HL-LHC/LHC-Collimation/Documentation/ATS/HLHaloBackgroundNote/figures/4TeV/reweighted/pint2011.pdf"
print('Saving file as ' + pname )
cv.Print(pname)
|
[
"Regina.Kwee@cern.ch"
] |
Regina.Kwee@cern.ch
|
2843a54ecf8ef912bbe42609b777510055fc7424
|
07065baf1ba995691ffbd19afd90eacbcb81f84c
|
/Liquid/images/generate_HTML_image_links.py
|
52a8793332b05cfa3cbdc9e9a694d6b8dcbb4d70
|
[] |
no_license
|
theloracle/laurel-code-foo
|
71e2a1d25728e3be0ad681f85706968b0cd0bac4
|
9857bff6ef8b40149d2f9d60f56d248bf4eb06da
|
refs/heads/master
| 2021-01-19T14:07:47.629223
| 2012-04-30T09:17:50
| 2012-04-30T09:17:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
while 1:
color = raw_input("Color?: ")
end = int(raw_input("# of imgs?: "))
for x in range(1, end +1):
print '<li>Bot'+str(x)+'</li>'
print '<a href="images/'+color+str(x)+'.jpg" rel="lightbox['+color+']"title="Bot'+str(x)+'">'
print '<img src="images/'+color+str(x)+'.jpg" rel="lightbox" height=50% width= 50%/></a>'
y = raw_input("Done?: ")
if y == "y":
break
|
[
"eibachla@gmail.com"
] |
eibachla@gmail.com
|
a70f7d20f7d65f7a4532b40577899f7f0a8771dc
|
d23d974fefa2ff0058b849a6584a0dc24458fd00
|
/src/gym-snake/gym_snake/core/new_world.py
|
9a797522e85a16c1df53b9afbd1bc41d9a03bae8
|
[
"MIT"
] |
permissive
|
jdubkim/Self-play-on-Multi-Snakes-Environment
|
fa672e5b5e2e88487d9426fffaf1a5a33c464867
|
8e72c66110a007d6bf0ca2ff68fc0a845f3b3a42
|
refs/heads/master
| 2020-03-22T02:45:22.167284
| 2020-01-12T13:55:54
| 2020-01-12T13:55:54
| 139,391,281
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,180
|
py
|
import random
from copy import copy
import numpy as np
class Snake:
def __init__(self, snake_id, start_pos, direction, start_length=3):
self.snake_id = snake_id
self.start_pos = start_pos
self.start_length = start_length
self.snake_length = start_length
self.alive = True
self.hunger = 0
self.snake_body = [start_pos]
self.direction = direction
current_pos = start_pos
#for i in range(1, start_length):
# current_pos = tuple(np.subtract(current_pos, self.direction))
# self.snake_body.append(current_pos)
# print("snake body is ", self.snake_body)
def step(self, action):
if len(self.snake_body) == 0:
return 0
head = self.snake_body[0]
new_head = head
print("action is ", action)
if action == 1 and self.direction != (-1, 0):
self.direction = (1, 0)
elif action == 2 and self.direction != (0, -1):
self.direction = (0, 1)
elif action == 3 and self.direction != (1, 0):
self.direction = (-1, 0)
elif action == 4 and self.direction != (0, 1):
self.direction = (0, -1)
if self.direction != (0, 0):
new_head = (head[0] + self.direction[0], head[1] + self.direction[1])
else:
print("direction is 0, 0")
return new_head
class World:
REWARD = {'dead': -1, 'move': 0, 'eat': 1}
DIRECTIONS = [(-1, 0), (0, 1), (1, 0), (0, -1)]
FOOD = 255
def __init__(self, size, n_snakes, n_fruits, seed, is_competitive=False):
self.size = size
self.dim = size[0] # or size[1]
self.world = np.zeros(size)
self.np_rand = seed
self.is_competitive = is_competitive
self.snakes = []
self.dead_snakes = []
self.fruits = []
self.time_step = 0
# Initialise snakes
for i in range(n_snakes):
self.register_snake(i)
# Initialise fruits
for i in range(n_fruits):
self.fruits.append(self.get_safe_cell())
def register_snake(self, snake_id):
pos = self.get_rand_cell()
# while not pos in self.get_available_pos():
# pos = (random.randint(snake_size, self.size[0] - snake_size),
# random.randint(snake_size, self.size[1] - snake_size))
# direction = self.DIRECTIONS[random.randrange(4)]
new_snake = Snake(snake_id, pos, (0, 0))
self.snakes.append(new_snake)
return new_snake
def move_snakes(self, actions):
reward = 0.0
done = False
# snake == self.snakes[i]
for i, snake in enumerate(self.snakes):
new_snake_head = snake.step(actions[i])
if i == 0: # for main agent
reward = self.get_status_fruit(snake, new_snake_head)
else:
self.get_status_fruit(snake, new_snake_head)
for i, snake in enumerate(self.snakes):
snake.alive = self.get_status_alive(snake)
if not snake.alive:
if not snake in self.dead_snakes:
self.dead_snakes.append(snake)
if i == 0:
done = snake.alive
return reward, done
def get_status_alive(self, snake):
if len(snake.snake_body) == 0:
return False
head = snake.snake_body[0]
if (max(head) > self.dim - 1) or (min(head) < 0):
snake.snake_body = []
return False
other_snakes = copy(self.snakes)
other_snakes.remove(snake)
for (s_idx, o_snake) in enumerate(other_snakes):
if head in o_snake.snake_body:
snake.snake_body = []
print("in other snakes")
return False
if head in snake.snake_body[1:]:
return False
return True
def get_status_fruit(self, snake, new_snake_head):
if new_snake_head == 0:
return 0
reward = 0.0
eaten_fruits = []
for i, fruit in enumerate(self.fruits):
if new_snake_head == fruit:
eaten_fruits.append(i)
reward += 1.0
snake.snake_length += 2
if len(snake.snake_body) >= snake.snake_length:
snake.snake_body.pop()
# print("new snake head is ", new_snake_head)
snake.snake_body.insert(0, new_snake_head)
for new_fruit_index in eaten_fruits:
self.fruits[new_fruit_index] = self.get_safe_cell()
return reward
def get_obs_for_snake(self, idx):
view_dim = self.dim + 2
obs = np.full((view_dim, view_dim, 3), 0, dtype='uint8')
for fruit in self.fruits:
self.render_fruit(obs, fruit)
for i, snake in enumerate(self.snakes):
if i == idx:
color = Color.get_snake_color(0)
else:
color = Color.get_snake_color(1)
self.render_snake(obs, self.snakes[i], color)
for i in range(view_dim):
color = Color.get_color('wall')
obs[i][0] = color
obs[i][self.dim + 1] = color
obs[0][i] = color
obs[self.dim + 1][i] = color
return obs
def get_obs_world(self):
view_dim = self.dim + 2
obs = np.full((view_dim, view_dim, 3), 0, dtype='uint8')
for fruit in self.fruits:
self.render_fruit(obs, fruit)
for i, snake in enumerate(self.snakes):
color = Color.get_snake_color(i)
self.render_snake(obs, snake, color)
for i in range(view_dim):
color = Color.get_color('wall')
obs[i][0] = color
obs[i][self.dim + 1] = color
obs[0][i] = color
obs[self.dim + 1][i] = color
return obs
def get_multi_snake_obs(self):
total_obs = []
for i, snake in enumerate(self.snakes):
total_obs.append(self.get_obs_for_snake(i))
total_obs = np.concatenate(total_obs, axis=2)
return total_obs
#t = np.concatenate((self.get_obs_for_snake(0), self.get_obs_for_snake(1)), axis=2)
#return t # concatenate two arrays (12 * 12 * 3) convert it to (12 * 12 * 9)
def render_snake(self, obs, snake, color):
if len(snake.snake_body) == 0 or not snake.alive:
return
head = snake.snake_body[0]
for body in snake.snake_body:
obs[body[0] + 1][body[1] + 1] = color[0]
obs[head[0] + 1][head[1] + 1] = color[1]
def render_fruit(self, obs, fruit):
obs[fruit[0] + 1][fruit[1] + 1] = Color.get_color('fruit')
def get_rand_cell(self):
return self.np_rand.randint(self.dim), self.np_rand.randint(self.dim)
def get_safe_cell(self):
available_pos = list(range(self.dim * self.dim))
for snake in self.snakes:
if len(snake.snake_body) > 0:
used_cells = list(map(lambda x: x[1] * self.dim + x[0], snake.snake_body))
available_pos = np.setdiff1d(available_pos, used_cells)
x = 0
if len(available_pos) > 0:
x = available_pos[self.np_rand.randint(len(available_pos))]
return x % self.dim, x // self.dim
def get_available_pos(self):
available_pos = set([(i, j) for i in range(self.size[0]) for j in range(self.size[1])])
for snake in self.snakes:
available_pos = available_pos - set(snake.snake_body)
return available_pos
class Color:
def get_color(key):
colors = {'fruit': [255, 0, 0],
'wall': [255, 255, 255],
'empty': [0, 0, 0]
}
return colors[key]
def get_snake_color(idx):
p_colors = {0: [[0, 204, 0], [191, 242, 191]], # Green
1: [[0, 51, 204], [128, 154, 230]], # Blue
2: [[204, 0, 119], [230, 128, 188]], # Magenta
3: [[119, 0, 204], [188, 128, 230]], # Violet
}
return p_colors[idx]
|
[
"jeewoo1998@gmail.com"
] |
jeewoo1998@gmail.com
|
3cf83f944276bc5b5775acc12ffc5cb5e1939a77
|
a9e47206ab3434212595f11c5ddb5250876093e1
|
/mapping_server/urls.py
|
c10573b1eec31defc4129dc78d9a7f6659ea63ae
|
[] |
no_license
|
aweb-pj/mapping-server2
|
5a760fcf5928bad1187500dffd250cd031b8724c
|
b400ad135debc13087026b9008a77163701b5c39
|
refs/heads/master
| 2020-03-29T00:59:41.096620
| 2017-06-22T17:09:25
| 2017-06-22T17:09:25
| 94,637,799
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
"""mapping_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^mapping/', include('action.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"913575864@qq.com"
] |
913575864@qq.com
|
bc181da1b46971b964437590de20297cc7848837
|
e21e962ed98a5a8c6c1fc0d7d6885e33c5a77469
|
/Voting_ganache/node_modules/ganache-cli/node_modules/ganache-core/node_modules/web3-providers-ws/node_modules/websocket/build/config.gypi
|
468391b5890aa15c28091ed3af9e80421d6421eb
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
perrywang123/FinalProject
|
4624d2ea26aa5bc038046d3cea5b096560ca75bf
|
f7934bcc48dda509b7d9e750d0626b43a29dfc30
|
refs/heads/master
| 2020-04-14T04:46:55.598766
| 2018-12-31T07:02:31
| 2018-12-31T07:02:31
| 163,645,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,966
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "true",
"node_shared_http_parser": "true",
"node_shared_libuv": "true",
"node_shared_nghttp2": "true",
"node_shared_openssl": "true",
"node_shared_zlib": "true",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.57",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/wpy/.node-gyp/8.11.4",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/6.4.1 node/v8.11.4 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"noproxy": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/wpy/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"preid": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npm.taobao.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"audit": "true",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"dry_run": "",
"scope": "",
"registry": "https://registry.npm.taobao.org/",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/wpy/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"audit_level": "low",
"prefer_offline": "",
"color": "true",
"sign_git_commit": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"update_notifier": "true",
"auth_type": "legacy",
"node_version": "8.11.4",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"globalconfig": "/etc/npmrc",
"dev": "",
"init_module": "/home/wpy/.npm-init.js",
"parseable": "",
"globalignorefile": "/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
|
[
"1731611872@qq.com"
] |
1731611872@qq.com
|
60316d664a4795400d93a8c1d5c0da68d691e61a
|
9483919096697dd198aa76c63e4292515874b247
|
/Basic5_Nested_Loops/1380_10826077(AC).py
|
af0751d5def5b6d425e4dfc2c897dc76c7eaa42b
|
[] |
no_license
|
ankiwoong/Code_UP
|
0811c7b9a2ce4c8e5a8dc2b33dfffcdccfca31aa
|
09cade40d9f42a915294adf39c9a2b9d3e4cae80
|
refs/heads/master
| 2020-12-01T15:49:16.615354
| 2020-04-10T01:49:08
| 2020-04-10T01:49:08
| 230,687,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
import io, sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
k = int(input())
for i in range(1, 7):
for j in range(1, 7):
if i +j == k:
print(i, j)
|
[
"ankiwoong@gmail.com"
] |
ankiwoong@gmail.com
|
5a3006c51a82a94c798fc606ce974ff6a94620fc
|
5fa9efa61eb1fb53c3507bcb1fff3522bb40837f
|
/app.py
|
4b45076ddc11c4ac7061a84bfef1de4f37c333d3
|
[] |
no_license
|
abuarbaz/Kubernetes
|
964cd93aab53fd96dee46395f0328b77022056f4
|
08e6763abd103a71c0146f7f97d0ddff2853fa54
|
refs/heads/main
| 2023-03-08T09:24:11.272969
| 2021-02-19T15:27:22
| 2021-02-19T15:27:22
| 340,408,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,752
|
py
|
#!/usr/bin/env python3
from bottle import run, get, post, response
from prometheus_client import Counter, generate_latest, CollectorRegistry
import os
import redis
rcon = redis.StrictRedis(
host=os.getenv("REDIS_HOST", default="localhost"),
port=os.getenv("REDIS_PORT", default=6379),
password=os.getenv("REDIS_PASSWORD", default=""),
socket_connect_timeout=5,
socket_timeout=5,
)
registry = CollectorRegistry()
c = Counter('http_requests_total', 'HTTP requests total', ['method', 'endpoint'], registry=registry)
@get('/info/liveness')
def liveness():
c.labels('GET', '/info/liveness').inc()
return "healthy"
@get('/info/readiness')
def readiness():
c.labels('GET', '/info/readiness').inc()
try:
rcon.ping()
except redis.exceptions.RedisError:
response.status = 503
body = "not ready"
else:
body = "ready"
return body
@post('/increment')
def increment():
c.labels('POST', '/increment').inc()
try:
rcon.incr("test-key", 1)
except redis.exceptions.RedisError:
response.status = 500
body = "Failed to increment redis key"
else:
response.status = 200
body = "ok"
return body
@get('/getkey')
def getkey():
c.labels('GET', '/getkey').inc()
try:
value = rcon.get("test-key") or "0"
except redis.exceptions.RedisError:
response.status = 500
body = "Failed to get value of a key"
else:
response.status = 200
body = value
return body
@get('/info/metrics')
def getmetrics():
return generate_latest(registry)
if __name__ == "__main__":
run(
host=os.getenv("HOST", default="0.0.0.0"),
port=os.getenv("PORT", default="8080"),
)
|
[
"arbazmohammed17@gmail.com"
] |
arbazmohammed17@gmail.com
|
59a96f4dc9ceea0df78b9b607a5ffb2d71df4e00
|
02e092a20950f1108e526950435708f653628bd5
|
/flaskr/apps/auth/view_auth.py
|
608e900ad05422ce07fa0ee8127f9345115bb4e7
|
[] |
no_license
|
chin-saya/flask-sample
|
7031beacf5cd21f366db17dd01d6a279e0d5ec15
|
f6ba3db731cd9598f8a8df5487f91a37ba81e2a5
|
refs/heads/master
| 2020-03-25T13:49:02.518296
| 2019-01-02T03:39:15
| 2019-01-02T03:39:15
| 143,844,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,368
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
from . import auth
from exts import db
from models import User
from flask import flash, redirect, render_template, request, url_for
from flask_login import login_user, logout_user
from utils.common import is_safe_url
from werkzeug.exceptions import abort
@auth.route('/register/', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
username = request.form.get("username")
password = request.form.get("password")
error = None
if not username:
error = 'Username is required.'
elif not password:
error = 'Password is required.'
elif User.query.filter(User.username == username).first() is not None:
error = 'User {} is already registered.'.format(username)
if error is None:
user = User(username=username, password=password)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
flash(error)
return render_template('auth/register.html')
@auth.route('/login/', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form.get("username")
password = request.form.get("password")
next = request.args.get("next")
error = None
user = User.query.filter(User.username == username).first()
# 登录验证失败
if user is None:
error = 'Incorrect username.'
elif not user.verify_password(password):
error = 'Incorrect password.'
# 登录验证成功
if error is None:
login_user(user)
# 验证next防止重定向攻击
if not is_safe_url(next):
abort(400)
return redirect(next or url_for('blog.index'))
flash(error)
return render_template('auth/login.html')
# before_request只能应用到属于蓝本的请求上
# 若要在蓝本中使用针对程序全局请求的钩子,使用before_app_request
# @auth.before_app_request
# def load_logged_in_user():
# user_id = session.get('user_id')
#
# if user_id is None:
# g.user = None
# else:
# g.user = User.query.get(user_id)
@auth.route('/logout/')
def logout():
logout_user()
return redirect(url_for('blog.index'))
|
[
"chenhaiwen@cecgw.cn"
] |
chenhaiwen@cecgw.cn
|
b1f3e07beb6a1adce6360ecb5ecd3b15ccd21135
|
ab19c316ba8995aebf5c0d6c861a65e3853cef34
|
/assignment4/ex38.py
|
12ae60c0ab1f6860bb7492b5dc14e4ffcf053ceb
|
[] |
no_license
|
joshuascodes/afs505_u1
|
0466721fe7300e2b48cc638900296a5be4bdc8c8
|
2a2dd2ac6c3471ff49df501cfa5c9b87b6b880f4
|
refs/heads/master
| 2020-12-15T17:20:33.017506
| 2020-07-07T21:02:35
| 2020-07-07T21:02:35
| 234,392,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
ten_things = "apples oranges crows telephone light sugar"
print("wait there are not 10 things in that list, lets fix that")
stuff = ten_things.split(' ')
more_stuff = ["day", "night", "song", "frisbee", "corn", "banana", "girl", "boy"]
while len(stuff) != 10:
next_one = more_stuff.pop()
print("adding: ", next_one)
stuff.append(next_one)
print(f"There are {len(stuff)} items now")
print("there we go: ", stuff)
print("lets do some things with stuff.")
print(stuff[1])
print(stuff[-1])
print(stuff.pop())
print('#'.join(stuff[3:5]))
|
[
"noreply@github.com"
] |
joshuascodes.noreply@github.com
|
0ed4160351434c43539ae0ed376abcfc88ea6a88
|
f0dcf51ca12abf81e92f70d4bb61267ee1934a2c
|
/herramienta/migrations/0001_initial.py
|
1a39426c9b42dc1e976a225a303b2a83287696d9
|
[] |
no_license
|
leo452/Final_ConectaTE_group4
|
de779c2d39854966b164875ded26fcd49ffdc3db
|
92f520d7a660e10ed5bc89732cc29663efd0e011
|
refs/heads/master
| 2020-03-18T13:19:16.570147
| 2018-05-21T03:28:52
| 2018-05-21T03:28:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,462
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-25 14:40
from __future__ import unicode_literals
import cuser.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_model_changes.changes
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('descripcion', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'verbose_name': 'Categoria',
'verbose_name_plural': 'Categorias',
},
),
migrations.CreateModel(
name='Herramienta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('descripcion', models.CharField(blank=True, max_length=1000, null=True)),
('informacion', models.CharField(blank=True, max_length=1000, null=True)),
('usos', models.CharField(blank=True, max_length=1000, null=True)),
('enlaces', models.CharField(blank=True, max_length=1000, null=True)),
('documentacion', models.CharField(blank=True, max_length=1000, null=True)),
('estado', models.IntegerField(choices=[(0, 'Borrador'), (1, 'Revisi\xf3n'), (2, 'Publica')], default=0)),
('licencia', models.CharField(blank=True, max_length=1000, null=True)),
('descarga_url', models.CharField(blank=True, max_length=1000, null=True)),
('creacion', models.DateField(auto_now_add=True)),
('owner', cuser.fields.CurrentUserField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_mymodels', to=settings.AUTH_USER_MODEL)),
('tipo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='herramienta.Categoria')),
],
options={
'verbose_name': 'Herramienta',
'verbose_name_plural': 'Herramientas',
},
bases=(django_model_changes.changes.ChangesMixin, models.Model),
),
migrations.CreateModel(
name='HerramientaEdicion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=100, null=True)),
('descripcion', models.CharField(blank=True, max_length=1000, null=True)),
('informacion', models.CharField(blank=True, max_length=1000, null=True)),
('usos', models.CharField(blank=True, max_length=1000, null=True)),
('enlaces', models.CharField(blank=True, max_length=1000, null=True)),
('documentacion', models.CharField(blank=True, max_length=1000, null=True)),
('licencia', models.CharField(blank=True, max_length=1000, null=True)),
('descarga_url', models.CharField(blank=True, max_length=1000, null=True)),
('creacion', models.DateField(auto_now_add=True)),
('observacion', models.CharField(blank=True, max_length=1000, null=True, verbose_name='Observacion')),
('herramienta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herramienta.Herramienta')),
],
options={
'verbose_name': 'Edicion de herramienta',
'verbose_name_plural': 'Ediciones de herramienta',
},
bases=(django_model_changes.changes.ChangesMixin, models.Model),
),
migrations.CreateModel(
name='Revision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('descripcion', models.CharField(blank=True, max_length=500, null=True)),
],
options={
'verbose_name': 'Revision',
'verbose_name_plural': 'Revisiones',
},
),
migrations.AddField(
model_name='herramientaedicion',
name='revision',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='herramienta.Revision', verbose_name='Estado de revision'),
),
migrations.AddField(
model_name='herramientaedicion',
name='tipo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='herramienta.Categoria'),
),
migrations.AddField(
model_name='herramientaedicion',
name='usuarioHerramienta',
field=cuser.fields.CurrentUserField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_edit_model', to=settings.AUTH_USER_MODEL),
),
]
|
[
"tj.marrugo10@uniandes.edu.co"
] |
tj.marrugo10@uniandes.edu.co
|
a198846c54320a116d40ab861e75c935e1bb7b70
|
d6b7981e76a6559c256ad48ffd74b6109c87fedd
|
/1/1.2/1.2.b.py
|
08d077ceafaec5beae899a579a946265efbdefbf
|
[] |
no_license
|
outtrip-kpsv/brainskills
|
ea5e88d8f231fd441835a8ebcf4e1875d15d5e31
|
b329cc2a1012d8285cda1e0d7fc952f2e95db08e
|
refs/heads/master
| 2022-12-31T19:31:36.434763
| 2020-10-15T19:52:39
| 2020-10-15T19:52:39
| 285,710,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
def sign(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
print(sign(int(input())))
|
[
"kpsv.igor@gmail.com"
] |
kpsv.igor@gmail.com
|
1add8c75002fbce3f859b38780f7581b5e520bd9
|
5641ea4f21cbb541286d26571b53c9242dbc805c
|
/model/data_providers.py
|
cc05a70ae5cbc389876ea6213a726c3fd4596cab
|
[] |
no_license
|
Masnerin/Testing_the_site_LiteCart
|
75f466fdb291aa0cd4fe616b6aa287d299d87a86
|
4f2fd1901a7f55fb879430395b31a2d7853d7beb
|
refs/heads/master
| 2020-03-25T03:11:22.960244
| 2018-08-11T12:51:52
| 2018-08-11T12:51:52
| 143,327,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,308
|
py
|
from model.input_data import Customer, Admin, Product
import os
import time
import random
# Функция вывода текущего времени в милисекундах:
def current_time_millis():
return int(round(time.time() * 1000))
# Функция создания уникального e-mail:
def gen_email():
array = [chr(i) for i in range(65, 91)]
random.shuffle(array)
key = ""
for i in range(7):
key += array.pop()
email = key.lower() + '@random-email.com'
return email
# Функция создания уникального имени пользователя:
def gen_user_name():
array = [chr(i) for i in range(65, 91)]
random.shuffle(array)
key = ""
for i in range(7):
key += array.pop()
user_name = key.title()
return user_name
# Функция создания уникального кода (32 символа: латинские буквы и цифры):
def random_kod():
kod = ''
for x in range(32):
kod = kod + random.choice(list('1234567890abcdefghigklmnopqrstuvyxwz'))
return kod
def file_address():
address = str(os.getcwd() + "\product_new.jpg")
return address
admin = [Admin(username="admin",
password="admin"
)
]
new_customer = [Customer(username1="admin",
password1="admin",
firstname="Emma",
lastname="Brown",
phone="+0123456789",
address="New Street, 123",
postcode="12345",
city="New City",
country="US",
zone="KS",
email="emma%s@brown.com" % current_time_millis(),
password="password"
)
]
new_user = [Customer(firstname="%s" % gen_user_name(),
lastname="%s" % gen_user_name(),
phone="+016907734234",
address="Old Street, 27",
postcode="64100",
city="Old City",
country="US",
zone="KS",
email="%s" % gen_email(),
password="password"
)
]
new_product = [Product(username="admin",
password="admin",
product_name="New product",
code_product="%s" % random_kod(),
quantity="10",
image="%s" % file_address(),
date_valid_from="30052018",
date_valid_to="31122018",
keywords="product, new product",
short_description="New product for sale",
trumbowyg_editor="Why do we use it?\nGirl quit if case mr sing as no have. Small for ask shade water manor think men begin.",
head_title="New product",
meta_description="Very good product.",
purchase_price="19,99",
prices_usd="34,99",
prices_eur="29,99",
)
]
|
[
"32953372+Masnerin@users.noreply.github.com"
] |
32953372+Masnerin@users.noreply.github.com
|
2b76efc562c1c3e9c2c2b81b174ef091e6cb4437
|
5604ec101eb28549d7e3fa9cdd2084722e3b1432
|
/artifacts/spark_submit_templates/spark_submit_gametrics.py
|
28860f7ee73ba48ec44eaba287aa1672b8295c8f
|
[] |
no_license
|
felipemsantos/datalake-toolkit
|
1f828e48b3a821e9ee1261d3a28968b449ae3f93
|
1e80619a0a37ce28dbe6bb7b380f6ba9003f7201
|
refs/heads/master
| 2023-01-23T14:48:16.643794
| 2020-12-09T13:00:42
| 2020-12-09T13:00:42
| 283,282,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,378
|
py
|
from __future__ import print_function
import sys
from datetime import datetime
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import *
dt = str(sys.argv[1])
s3_object_name_stage = str(sys.argv[2])
hive_database = str(sys.argv[3])
hive_table = str(sys.argv[4])
s3_target = str(sys.argv[5])
# example:
# dt = "2017-10-31"
# s3_object_name_stage = "s3://it.centauro.odl.stage/doo/ga/ga_metrics/dt=2017-10-31/GA_2017_10_31_old.csv"
# hive_database = "odl_dl"
# hive_table = "tb_ga_metrics_parquet"
# s3_target = "s3://it.centauro.odl.dl/ga_metrics_parquet/"
print("dt: " + dt)
print("s3_object_name_stage: " + s3_object_name_stage)
print("hive_database: " + hive_database)
print("hive_table: " + hive_table)
print("s3_target: " + s3_target)
if __name__ == "__main__":
sc = SparkContext()
spark = SparkSession.builder.appName(s3_object_name_stage + 'AND' + dt).config(
"spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version", "2").config("spark.speculation",
"false").config(
"hive.exec.dynamic.partition", "true").config("hive.exec.dynamic.partition.mode",
"nonstrict").enableHiveSupport().getOrCreate()
df = spark.read.option("header", "false").option("quote", "'").option("inferschema", "true").csv(
s3_object_name_stage)
df2 = df.selectExpr("_c0 as codigo1", "_c1 as codigo2", "_c2 as produto", "_c6 as data", "_c36 as data_compra")
def parse_date(argument, format_date='%d/%m/%Y %H:%M:%S'):
try:
return datetime.strptime(argument, format_date)
except:
return None
convert_date = udf(lambda x: parse_date(x, '%d/%m/%Y %H:%M:%S'), TimestampType())
df3 = df2.withColumn('data_compra', convert_date(df2.data_compra))
df4 = df3.withColumn('dt', df3['data_compra'].cast('date'))
# insert into usefull for production environment
df4.write.mode("append").insertInto(hive_database + "." + hive_table)
# Create table usefull for dev environment to infer the schema and show create table on hive or athena
# df4.write.partitionBy('dt').saveAsTable(hive_database + "." + hive_table, format='parquet', mode='append', path=s3_target)
|
[
"robot@example.com"
] |
robot@example.com
|
278d6930bd2560902d82ef0b55253720908ea2d9
|
b2c480f843501b16aa04d0a13b3a866e8cf089d8
|
/Bojung/2강/2_13 drink shop.py
|
44bd17ac566730aa790076b7c6c983ee5f907505
|
[] |
no_license
|
Forif-PythonClass/Assignments
|
31dec7ccf2314ad8474c364b43ceabe8127db462
|
2dd7750abc40109e9fb04d1e136d0a87848ed887
|
refs/heads/master
| 2021-01-19T05:04:06.046321
| 2017-06-01T10:48:24
| 2017-06-01T10:48:24
| 87,412,590
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
money = 0
while money < 10000 :
print '--OPEN--'
print '''What do you want?
We have 1. Coke, 2. Juice, 3. Energy Drink.'''
beverage = int(raw_input())
if beverage == 1 :
money = money + 1500
print 'Here is your coke.'
elif beverage == 2 :
money = money + 1200
print 'Here is your juice.'
elif beverage == 3 :
money = money + 2000
print 'Here is your energy drink.'
print '--CLOSED--'
|
[
"bjkim0125@gmail.com"
] |
bjkim0125@gmail.com
|
9c474aa08559590c685310323a8210114d2ef19a
|
4e478a4831a3a71829108ab7f6c71f71601fccf1
|
/intranet/views.py
|
eb6ff8763a6c32d9761e59a89869449f3f9b702a
|
[] |
no_license
|
marcoabonce/Intranet
|
c7e1becacc6dbfb95e44d37a06f2a71ebefa7e87
|
b653e89d61fb0ac56601e7d6e486081aac79219c
|
refs/heads/master
| 2023-01-02T06:12:40.734862
| 2020-10-29T00:11:53
| 2020-10-29T00:11:53
| 307,795,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,998
|
py
|
from django.shortcuts import render
from compression_middleware.decorators import compress_page
from django.shortcuts import redirect
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib import messages
from users.models import User
import pytz
from datetime import datetime
from mensajes.models import Mensaje
@compress_page
def index(request):
if not request.user.is_authenticated:
return redirect ('intranet:login')
time_mx = pytz.timezone('America/Mexico_City')
time = datetime.now(time_mx)
mensajes = Mensaje.objects.all()
for men in mensajes:
print (men.title)
saludo = "Hola"
if int(time.strftime("%H"))>5 and int(time.strftime("%H"))<12:
saludo = "Buenos días"
if int(time.strftime("%H"))>11 and int(time.strftime("%H"))<20:
saludo = "Buenas tardes"
if int(time.strftime("%H"))>19 and int(time.strftime("%H"))<=24:
saludo = "Buenas noches"
if int(time.strftime("%H"))>=0 and int(time.strftime("%H"))<=5:
saludo = "Buenas noches"
print(time.strftime("%H"))
return render(request,'intranet/index.html',{
'user':request.user.first_name, 'saludo':saludo, 'mensajes':mensajes
})
@compress_page
def login_view(request):
if request.user.is_authenticated:
return redirect ('intranet:index')
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
try:
user = User.objects.get(email=email)
except:
user = None
if user:
user = authenticate(username=user.username, password=password)
login(request,user)
messages.success(request,'Bienvenido {}'.format(user.first_name))
return redirect('intranet:index')
else:
messages.error(request,'Credenciales incorrectas')
return render(request,'intranet/login.html',{
})
@compress_page
def category_view(request):
if not request.user.is_authenticated:
return redirect ('intranet:login')
time_mx = pytz.timezone('America/Mexico_City')
time = datetime.now(time_mx)
saludo = "Hola"
if int(time.strftime("%H"))>5 and int(time.strftime("%H"))<12:
saludo = "Buenos días"
if int(time.strftime("%H"))>11 and int(time.strftime("%H"))<20:
saludo = "Buenas tardes"
if int(time.strftime("%H"))>19 and int(time.strftime("%H"))<=24:
saludo = "Buenas noches"
if int(time.strftime("%H"))>=0 and int(time.strftime("%H"))<=5:
saludo = "Buenas noches"
print(time.strftime("%H"))
return render(request,'intranet/category.html',{
'user':request.user.first_name, 'saludo':saludo
})
@compress_page
def logout_view(request):
name = request.user.first_name
logout(request)
messages.success(request,'Hasta pronto {}!!!'.format(name))
return redirect('intranet:login')
|
[
"marcoabonce.mt@gmail.com"
] |
marcoabonce.mt@gmail.com
|
062b57bc9ca28f0e4174396d81af62224185dada
|
516fe2c01014d4ce665949b7fb4431b172cc4019
|
/accounts/migrations/0003_auto_20200720_1244.py
|
d02bd3ad9d44cff4a6eb9fa36c1647478ce58854
|
[] |
no_license
|
Mahbub20/Django-Customer-Management-System
|
18b34c5e2abded89054bd9d19f0fa677cd29d2b6
|
a263df6755f6978d1d2de5e65633f27a66f0b6ea
|
refs/heads/master
| 2022-11-24T14:23:34.868403
| 2020-07-31T15:44:44
| 2020-07-31T15:44:44
| 280,682,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
# Generated by Django 3.0.7 on 2020-07-20 12:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_order_product'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
],
),
migrations.AddField(
model_name='order',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.Customer'),
),
migrations.AddField(
model_name='order',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.Product'),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='product',
name='tags',
field=models.ManyToManyField(to='accounts.Tag'),
),
]
|
[
"mmahbub569@gmail.com"
] |
mmahbub569@gmail.com
|
792a35d3bfcca875858834dba3e83a3c145fdc11
|
af9268e1ead8cdb491868c14a2240d9e44fb3b56
|
/last-minute-env/lib/python2.7/site-packages/django/templatetags/i18n.py
|
650e9c63029694f08f8e2fd4e8d0bee61d7e98aa
|
[] |
no_license
|
frosqh/Cousinade2017
|
d5154c24c93ca8089eeba26b53c594e92cb6bd82
|
c34d5707af02402bf2bb7405eddc91297da399ff
|
refs/heads/master
| 2021-01-20T07:57:34.586476
| 2017-10-22T18:42:45
| 2017-10-22T18:42:45
| 90,074,802
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,909
|
py
|
from __future__ import unicode_literals
import sys
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.template.base import TOKEN_TEXT, TOKEN_VAR, render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six, translation
from django.utils.safestring import SafeData, mark_safe
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
# Restore percent signs. Percent signs in template text are doubled
# so they are not interpreted as string format flags.
is_safe = isinstance(value, SafeData)
value = value.replace('%%', '%')
value = mark_safe(value) if is_safe else value
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=False, asvar=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
self.asvar = asvar
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
msg = ''.join(result)
if self.trimmed:
msg = translation.trim_whitespace(msg)
return msg, vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = context.template.engine.string_if_invalid
def render_value(key):
if key in context:
val = context[key]
else:
val = default_value % key if '%s' in default_value else default_value
return render_value_in_context(val, context)
data = {v: render_value(v) for v in vars}
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError(
"'blocktrans' is unable to format string returned by gettext: %r using %r"
% (result, data)
)
with translation.override(None):
result = self.render(context, nested=True)
if self.asvar:
context[self.asvar] = result
return ''
else:
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style list (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_translated(lang_code):
english_name = translation.get_language_info(lang_code)['name']
return translation.ugettext(english_name)
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
noop = False
asvar = None
message_context = None
seen = set()
invalid_context = {'as', 'noop'}
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'noop':
noop = True
elif option == 'context':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the context option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
if value in invalid_context:
raise TemplateSyntaxError(
"Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]),
)
message_context = parser.compile_filter(value)
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the as option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError(
"Unknown argument for '%s' tag: '%s'. The only options "
"available are 'noop', 'context' \"xxx\", and 'as VAR'." % (
bits[0], option,
)
)
seen.add(option)
return TranslateNode(message_string, noop, asvar, message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
The translated string can be stored in a variable using `asvar`::
{% blocktrans with bar=foo|filter boo=baz|filter asvar var %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
{{ var }}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
asvar = None
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
elif option == "trimmed":
value = True
elif option == "asvar":
try:
value = remaining_bits.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the asvar option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(options['count'].items())[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
trimmed = options.get("trimmed", False)
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context, trimmed=trimmed,
asvar=asvar)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
|
[
"frosqh@gmail.com"
] |
frosqh@gmail.com
|
c03039604d8be646f5dbfc7b8b383282b5703cda
|
88d5a273893b9ae1707da5d93fe44153ffca2a27
|
/화공전산(2-2)/chapter8.경계값 문제/ex8.8_비선형미방_유한차분법.py
|
a3131111c560b8929fe3cf45084efa7c74d733d9
|
[] |
no_license
|
exgs/hongikuniv_chemical-engineering
|
018ce8bfaca5d44ce1d03d529ab19eac47e6c94c
|
5e7165ec3b05710131ed558da87fc167d326d330
|
refs/heads/master
| 2023-07-16T14:20:27.718158
| 2021-08-31T22:12:44
| 2021-08-31T22:12:44
| 264,912,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
a, ya = 0, 1
b, yb = 1, 1.5
n = 20
eps1, eps2 = 1e-3, 1e-2
kmax = 100
h = 1/n
import numpy
tj = numpy.linspace(a, b + h/2, n + 1)
yk = numpy.linspace(ya, yb, n + 1)
print(tj)
print(yk)
from scipy.linalg import solve_banded
k = 0
while k < kmax:
fk, JU, JD, JL = [], [], [], []
# for j in range(0, n-1):
for j in range(1, n):
if j == 1:
y0, y1, y2 = ya, yk[j], yk[j+1]
else:
y0, y1, y2 = y1, y2, yk[j+1]
# y0, y1, y2 = yk[j], yk[j+1], yk[j+2]
fk.append(-y0 + h*y0*y1 + (2-h**2) *y1 - h*y1*y2 -y2)
JU.append(-1 + h*y1)
JD.append(h*y0 + 2 - h**2 - h*y2)
JL.append(-h*y1 - 1)
JU[0], JL[-1] = 0, 0
dy = solve_banded([1, 1], [JU, JD, JL], fk)
for j in range(1, n):
yk[j] = yk[j] -dy[j - 1]
e1 = numpy.linalg.norm(fk)
e2 = numpy.linalg.norm(dy)
if e1 < eps1 and e2 < eps2:
break
k = k + 1
print("k = ", k, '\n', yk)
|
[
"yunslee@student.42seoul.kr"
] |
yunslee@student.42seoul.kr
|
866309775c90ca2e82d7c0843595a409719723e4
|
0b69f21bcec30504338798c9294a02792e4aca44
|
/draugr/opencv_utilities/windows/default.py
|
22fee5b520fc2fadca66ef66d0c44ecf3e7dc844
|
[
"Apache-2.0"
] |
permissive
|
cnheider/draugr
|
12e5fcb274f2a86ad51d0ddc2147bf2c99781916
|
94a402cab47a2bd6241608308371490079af4d53
|
refs/heads/master
| 2023-08-04T08:31:20.337823
| 2022-12-06T12:39:53
| 2022-12-06T12:39:53
| 156,557,610
| 4
| 1
|
Apache-2.0
| 2022-12-06T20:13:14
| 2018-11-07T14:20:39
|
Python
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
from enum import Enum
from typing import Iterable
from sorcery import assigned_names
__all__ = ["ExtensionEnum", "match_return_code"]
ESC_CHAR = chr(27)
def match_return_code(ret_val, chars: Iterable[str] = ("q", ESC_CHAR)) -> bool:
"""
:param ret_val:
:type ret_val:
:param chars:
:type chars:
:return:
:rtype:
"""
if ret_val:
return any(ret_val & 0xFF == ord(c) for c in chars)
return False
class ExtensionEnum(Enum):
png, exr = assigned_names()
|
[
"christian.heider@alexandra.dk"
] |
christian.heider@alexandra.dk
|
70a67bb74721a09b03e08fbea79b3605ef133633
|
d8b83901ea5ad88ef073504ca569314d9de2b4d0
|
/Spiders/HuXiu/Spider.py
|
9c7530bf8799b9ae0d45b4bca3d9b35b431eb355
|
[] |
no_license
|
lisx9/ForestExplorer
|
19aef15d4240297b8f32e8936637317644b161cb
|
a75dbd09989629023307ad13d9b89daedd4cf32f
|
refs/heads/master
| 2022-12-03T23:20:31.941514
| 2020-09-01T07:27:44
| 2020-09-01T07:27:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,831
|
py
|
# -*- coding: utf-8 -*-
# Written by panzy
from apscheduler.schedulers.blocking import BlockingScheduler
from bs4 import BeautifulSoup
import requests
import re
import json
import os
import time
import logging
trapInfo = {}
hotKeyWordsPath = '/home/st01/ForestExplorer/hotKeyword.txt'
'''
用于实现虎嗅网的相关爬取工作
To crawl articles on huxiu.com
'''
'''
断点续爬通过更新配置文件实现
如果断点续爬使用队列的形式实现
'''
# 得到文章网址后获得网页静态内容
def getArticleContent(articleURL):
res = requests.get(articleURL)
print('Pull request to ' + articleURL)
try:
res.raise_for_status()
res.encoding = 'utf-8'
return res.text
except requests.HTTPError as e:
print(e)
print('HTTPError: Request for Article Failed.')
# 对静态文章网页进行解析,返回含标题,时间和内容的字典
def processContent(content):
soup = BeautifulSoup(content, 'html.parser')
try:
articleContent = soup.find('div', attrs = "article-content").get_text()
articleTitle = soup.find('div', attrs = "article-content-title-box").find('div', attrs = "title").get_text()
except AttributeError as e:
print(e)
return {
'title': None,
'content': None,
'time': None
}
# 静态网页的时间戳位置不确定,容错
if soup.find('div', attrs = "m-article-time") != None:
articleTime = soup.find('div', attrs = "m-article-time").get_text()
elif soup.find('div', attrs = "show-time") != None:
articleTime = soup.find('span', attrs = "show-time").get_text()
else: articleTime = None
articleInfo = {'title': articleTitle, 'content': articleContent, 'time': articleTime}
print(articleInfo['time'], articleInfo['title'])
return articleInfo
# 对网站流页面的爬取,存在翻页的问题,暂时弃用
def getStreamLink(url, formData):
try:
articleLinkRes = requests.post(url, data = formData)
articleLinkRes.raise_for_status()
articleLinkResJson = json.loads(articleLinkRes.text)
articleLinkList = {
'articleLinks': []
}
articleLinkList['last_time'] = articleLinkResJson['data']['last_time']
for articleData in articleLinkResJson['data']['datalist']:
articleLinkList['articleLinks'].append(articleData['share_url'])
print(articleLinkList['articleLinks'], len(articleLinkList['articleLinks']))
return articleLinkList
except requests.HTTPError as e:
print(e)
print('Failed to flowing visit ' + url)
except Exception as e:
print(e)
print('Other Error happened.')
# 对网站进行指定关键词检索,获得检索结果中的文章编号
def getSearchLink(url, formData):
try:
articleLinkRes = requests.post(url, data = formData)
articleLinkRes.raise_for_status()
articleLinkList = []
articleLinkRes.encoding = 'utf-8'
articleLinkResJson = json.loads(articleLinkRes.text)
for articleData in articleLinkResJson['data']['datalist']:
articleLinkList.append(articleData['aid'])
print(articleLinkList, len(articleLinkList))
return articleLinkList
except Exception as e:
print(e)
trapInfo = {
'keyword': formData['s'],
'page': formData['page']
}
return trapInfo
print('Failed to keyword visit ' + url)
except Exception as e:
print(e)
print('Other Error happened.')
# 写入到指定路径,根据文章相关信息进行写入
def writeToDisk(path, articleInfo):
if articleInfo['content'] == None:
return
with open(path, 'a', encoding='utf-8') as f:
f.write(str(articleInfo['title']) + '\n' + str(articleInfo['time']) + '\n' + str(articleInfo['content']) + '\n')
f.close()
# 获取热搜词条
def getHotKeyWords():
hotURL = 'https://article-api.huxiu.com/tag/hot'
formData = {
'platform': 'www', # 从PC平台检索
}
hotResponse = requests.post(hotURL, data=formData)
hotKeyWordsJson = json.loads(hotResponse.text)
hotKeyWordsList = hotKeyWordsJson['data']
return hotKeyWordsList
# 对获得的指令进行基于频道内容的循环爬取
def crawlOnChannel(channelURL, channelFormData, path):
while (True):
articleLinkList = getStreamLink(channelURL, channelFormData)
try:
channelFormData['last_time'] = articleLinkList['last_time']
for articleLink in articleLinkList['articleLinks']:
if os.path.exists(path + str(articleLink)[-11:-5] + '.txt'):
print('chongfu')
continue
else: writeToDisk(path + str(articleLink)[-11:-5] + '.txt', processContent(getArticleContent(articleLink)))
except TypeError as e:
print('Stream Ended.')
break
# 对获得的指令进行基于热搜词条的循环爬取
def crawlBySearch(searchURL, searchFormData, path):
hotKeyWordsList = updateKeywords()
for hotKeyWord in hotKeyWordsList:
searchFormData['s'] = hotKeyWord['keyword']
searchFormData['page'] = hotKeyWord['page']
while (True):
articleLinkList = getSearchLink(searchURL, searchFormData)
print('Search for: ' + hotKeyWord['keyword'] + ' page: ' + str(searchFormData['page']))
# 做容错处理,如果收到的是429报错或者该检索词检索结束做结束循环处理
if articleLinkList == []:
with open(hotKeyWordsPath, 'r') as f:
hotKeywordJson = json.loads(f.read())
f.close()
for hotKeyWordInfo in hotKeywordJson['awaiting']:
if hotKeyWordInfo['keyword'] == hotKeyWord['keyword']:
hotKeywordJson['awaiting'].remove(hotKeyWordInfo)
break
hotKeywordJson['finished'].append(hotKeyWord['keyword'])
with open(hotKeyWordsPath, 'w+') as f:
f.write(json.dumps(hotKeywordJson, ensure_ascii=False))
f.close()
break
elif type(articleLinkList) == dict:
print('Trapped in point: keyword = ' + str(articleLinkList['keyword']) + ' ,page = ' + str(articleLinkList['page']))
with open(hotKeyWordsPath,'r') as f:
hotKeywordJson = json.loads(f.read())
f.close()
for hotKeyWordInfo in hotKeywordJson['awaiting']:
if hotKeyWordInfo['keyword'] == articleLinkList['keyword']:
hotKeyWordInfo['page'] = articleLinkList['page']
break
with open(hotKeyWordsPath, 'w+') as f:
f.write(json.dumps(hotKeywordJson, ensure_ascii=False))
f.close()
break
for articleNum in articleLinkList:
articleURL = 'https://m.huxiu.com/article/' + str(articleNum) + '.html'
if os.path.exists(path + hotKeyWord['keyword'] + str(articleNum) + '.txt'):
print(path + hotKeyWord['keyword'] + str(articleNum) + '.txt has existed.')
continue
else:
writeToDisk(path + hotKeyWord['keyword'] + str(articleNum) + '.txt',processContent(getArticleContent(articleURL)))
searchFormData['page'] = searchFormData['page'] + 1
# 进行热搜爬取的指令
def crawlJob_search():
path = '/home/st01/ForestExplorer/HuXiuData/'
# 用于使用检索词的爬取(包含热门检索词的获取)
searchURL = 'https://search-api.huxiu.com/api/article'
searchFormData = {
'platform': 'www', # 从PC平台检索
'page': 1, # page是翻页指标
'pagesize': 20 # pagesize不会影响page的翻页,即使超过也能访问
}
# if trapInfo != {}:
# searchFormData['s'] = trapInfo['keyword']
# searchFormData['page'] = trapInfo['page']
# 关键词检索爬取
crawlBySearch(searchURL, searchFormData, path)
# 进行频道内容的爬取指令
def crawlJob_Stream():
path = '/home/st01/ForestExplorer/HuXiuData/'
# 用于使用栏目分类的爬取
channelURL = 'https://article-api.huxiu.com/web/channel/articleList'
channelFormData = {
'platform': 'www',
'last_time': '1597852200',
'channel_id': '105',
'pagesize': '22'
}
crawlOnChannel(channelURL, channelFormData, path) # 栏目流爬取
def updateKeywords():
hotKeywordsList = getHotKeyWords()
print(hotKeywordsList)
# 读取热搜数据
with open(hotKeyWordsPath, 'r') as f:
hotKeywordJson = json.loads(f.read())
f.close()
# 更新热搜爬取数据
for hotKeyword in hotKeywordsList:
exit_flag = False
for hotKeywordInfo in hotKeywordJson['awaiting']:
if hotKeyword == hotKeywordInfo['keyword']:
# 已存在等候队列
exit_flag = True
break
if hotKeyword in hotKeywordJson['finished']:
# 已完成,根据时间追加爬取?后续优化
continue
elif exit_flag == True:
# 已存在于等候队列
continue
elif hotKeywordJson['awaiting'][0]['page'] > 200:
hotKeywordJson['awaiting'].pop(0)
else:
hotKeywordInfo = {
'keyword': hotKeyword,
'page': 1
}
hotKeywordJson['awaiting'].append(hotKeywordInfo)
with open(hotKeyWordsPath, 'w+') as tmp_f:
tmp_f.write(json.dumps(hotKeywordJson, ensure_ascii = False))
tmp_f.close()
# 读取更新后的数据,返回当前需要爬取的检索词
with open(hotKeyWordsPath, 'r') as f:
hotKeywordJson = json.loads(f.read())
hotKeyWordsList = hotKeywordJson['awaiting']
f.close()
return hotKeyWordsList
if __name__ == '__main__':
# 创建调度器:BlockingScheduler
scheduler = BlockingScheduler()
# 添加任务,时间间隔20min
scheduler.add_job(crawlJob_search, 'interval', minutes = 30, id = 'crawlJob_search')
# 添加任务,时间间隔20min
# scheduler.add_job(crawlJob_Stream, 'interval', hours = 1, id = 'crawlJob_stream')
scheduler.start()
|
[
"pzy000301@gmail.com"
] |
pzy000301@gmail.com
|
eeec05a32d1e6106ac6cde195aedc739a23f1ad4
|
fba6c9f65e8bca522f1c5f7b0315a7a02e343f69
|
/src/metadata.py
|
8710ca1d6e1efe851d7ed1e62d8c62deda4b3a49
|
[] |
no_license
|
fzh4519/002_mind
|
a41cd258dce8fbfbe67ae05a2a7c70769456813f
|
b341fc3a61dd1ccb35f62ddb94e5db45b81852c6
|
refs/heads/master
| 2023-06-17T13:29:58.768365
| 2021-07-13T00:53:47
| 2021-07-13T00:53:47
| 384,894,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,398
|
py
|
import numpy as np
from os import listdir
import random
tracker_skeID = {'test1': 'skele1.p', 'test2': 'skele2.p', 'test6': 'skele2.p', 'test7': 'skele1.p',
'test_9434_1': 'skele2.p', 'test_9434_3': 'skele2.p', 'test_9434_18': 'skele1.p',
'test_94342_0': 'skele2.p', 'test_94342_1': 'skele2.p', 'test_94342_2': 'skele2.p',
'test_94342_3': 'skele2.p', 'test_94342_4': 'skele1.p', 'test_94342_5': 'skele1.p',
'test_94342_6': 'skele1.p', 'test_94342_7': 'skele1.p', 'test_94342_8': 'skele1.p',
'test_94342_10': 'skele2.p', 'test_94342_11': 'skele2.p', 'test_94342_12': 'skele1.p',
'test_94342_13': 'skele2.p', 'test_94342_14': 'skele1.p', 'test_94342_15': 'skele2.p',
'test_94342_16': 'skele1.p', 'test_94342_17': 'skele2.p', 'test_94342_18': 'skele1.p',
'test_94342_19': 'skele2.p', 'test_94342_20': 'skele1.p', 'test_94342_21': 'skele2.p',
'test_94342_22': 'skele1.p', 'test_94342_23': 'skele1.p', 'test_94342_24': 'skele1.p',
'test_94342_25': 'skele2.p', 'test_94342_26': 'skele1.p',
'test_boelter_1': 'skele2.p', 'test_boelter_2': 'skele2.p', 'test_boelter_3': 'skele2.p',
'test_boelter_4': 'skele1.p', 'test_boelter_5': 'skele1.p', 'test_boelter_6': 'skele1.p',
'test_boelter_7': 'skele1.p', 'test_boelter_9': 'skele1.p', 'test_boelter_10': 'skele1.p',
'test_boelter_12': 'skele2.p', 'test_boelter_13': 'skele1.p', 'test_boelter_14': 'skele1.p',
'test_boelter_15': 'skele1.p', 'test_boelter_17': 'skele2.p', 'test_boelter_18': 'skele1.p',
'test_boelter_19': 'skele2.p', 'test_boelter_21': 'skele1.p', 'test_boelter_22': 'skele2.p',
'test_boelter_24': 'skele1.p', 'test_boelter_25': 'skele1.p',
'test_boelter2_0': 'skele1.p', 'test_boelter2_2': 'skele1.p', 'test_boelter2_3': 'skele1.p',
'test_boelter2_4': 'skele1.p', 'test_boelter2_5': 'skele1.p', 'test_boelter2_6': 'skele1.p',
'test_boelter2_7': 'skele2.p', 'test_boelter2_8': 'skele2.p', 'test_boelter2_12': 'skele2.p',
'test_boelter2_14': 'skele2.p', 'test_boelter2_15': 'skele2.p', 'test_boelter2_16': 'skele1.p',
'test_boelter2_17': 'skele1.p',
'test_boelter3_0': 'skele1.p', 'test_boelter3_1': 'skele2.p', 'test_boelter3_2': 'skele2.p',
'test_boelter3_3': 'skele2.p', 'test_boelter3_4': 'skele1.p', 'test_boelter3_5': 'skele2.p',
'test_boelter3_6': 'skele2.p', 'test_boelter3_7': 'skele1.p', 'test_boelter3_8': 'skele2.p',
'test_boelter3_9': 'skele2.p', 'test_boelter3_10': 'skele1.p', 'test_boelter3_11': 'skele2.p',
'test_boelter3_12': 'skele2.p', 'test_boelter3_13': 'skele2.p',
'test_boelter4_0': 'skele2.p', 'test_boelter4_1': 'skele2.p', 'test_boelter4_2': 'skele2.p',
'test_boelter4_3': 'skele2.p', 'test_boelter4_4': 'skele2.p', 'test_boelter4_5': 'skele2.p',
'test_boelter4_6': 'skele2.p', 'test_boelter4_7': 'skele2.p', 'test_boelter4_8': 'skele2.p',
'test_boelter4_9': 'skele2.p', 'test_boelter4_10': 'skele2.p', 'test_boelter4_11': 'skele2.p',
'test_boelter4_12': 'skele2.p', 'test_boelter4_13': 'skele2.p',
}
event_seg_tracker = {
'test_9434_18': [[0, 749, 0], [750, 824, 0], [825, 863, 2], [864, 974, 0], [975, 1041, 0]],
'test_94342_1': [[0, 13, 0], [14, 104, 0], [105, 333, 0], [334, 451, 0], [452, 652, 0],
[653, 897, 0], [898, 1076, 0], [1077, 1181, 0], [1181, 1266, 0], [1267, 1386, 0]],
'test_94342_6': [[0, 95, 0], [96, 267, 1], [268, 441, 1], [442, 559, 1], [560, 681, 1], [
682, 796, 1], [797, 835, 1], [836, 901, 0], [902, 943, 1]],
'test_94342_10': [[0, 36, 0], [37, 169, 0], [170, 244, 1], [245, 424, 0], [425, 599, 0], [600, 640, 0],
[641, 680, 0], [681, 726, 1], [727, 866, 2], [867, 1155, 2]],
'test_94342_21': [[0, 13, 0], [14, 66, 2], [67, 594, 2], [595, 1097, 2], [1098, 1133, 0]],
'test1': [[0, 477, 0], [478, 559, 0], [560, 689, 2], [690, 698, 0]],
'test6': [[0, 140, 0], [141, 375, 0], [376, 678, 0], [679, 703, 0]],
'test7': [[0, 100, 0], [101, 220, 2], [221, 226, 0]],
'test_boelter_2': [[0, 154, 0], [155, 279, 0], [280, 371, 0], [372, 450, 0], [451, 470, 0], [471, 531, 0],
[532, 606, 0]],
'test_boelter_7': [[0, 69, 0], [70, 118, 1], [119, 239, 0], [240, 328, 1], [329, 376, 0], [377, 397, 1],
[398, 520, 0], [521, 564, 0], [565, 619, 1], [620, 688, 1], [689, 871, 0], [872, 897, 0],
[898, 958, 1], [959, 1010, 0], [1011, 1084, 0], [1085, 1140, 0], [1141, 1178, 0],
[1179, 1267, 1], [1268, 1317, 0], [1318, 1327, 0]],
'test_boelter_24': [[0, 62, 0], [63, 185, 2], [186, 233, 2], [234, 292, 2], [293, 314, 0]],
'test_boelter_12': [[0, 47, 1], [48, 119, 0], [120, 157, 1], [158, 231, 0], [232, 317, 0], [318, 423, 0],
[424, 459, 0], [460, 522, 0], [523, 586, 0], [587, 636, 0], [637, 745, 1], [746, 971, 2]],
'test_9434_1': [[0, 57, 0], [58, 124, 0], [125, 182, 1], [183, 251, 2], [252, 417, 0]],
'test_94342_16': [[0, 21, 0], [22, 45, 0], [46, 84, 0], [85, 158, 1], [159, 200, 1],
[201, 214, 0], [215, 370, 1], [371, 524, 1], [525, 587, 2], [588, 782, 2], [783, 1009, 2]],
'test_boelter4_12': [[0, 141, 0], [142, 462, 2], [463, 605, 0], [606, 942, 2],
[943, 1232, 2], [1233, 1293, 0]],
'test_boelter4_9': [[0, 27, 0], [28, 172, 0], [173, 221, 0], [222, 307, 1],
[308, 466, 0], [467, 794, 1], [795, 866, 1],
[867, 1005, 2], [1006, 1214, 2], [1215, 1270, 0]],
'test_boelter4_4': [[0, 120, 0], [121, 183, 0], [184, 280, 1], [281, 714, 0]],
'test_boelter4_3': [[0, 117, 0], [118, 200, 1], [201, 293, 1], [294, 404, 1],
[405, 600, 1], [601, 800, 1], [801, 905, 1], [906, 1234, 1]],
'test_boelter4_1': [[0, 310, 0], [311, 560, 0], [561, 680, 0], [681, 748, 0],
[749, 839, 0], [840, 1129, 0], [1130, 1237, 0]],
'test_boelter3_13': [[0, 204, 2], [205, 300, 2], [301, 488, 2], [489, 755, 2]],
'test_boelter3_11': [[0, 254, 1], [255, 424, 0], [425, 598, 1], [599, 692, 0],
[693, 772, 2], [773, 878, 2], [879, 960, 2], [961, 1171, 2], [1172, 1397, 2]],
'test_boelter3_6': [[0, 174, 1], [175, 280, 1], [281, 639, 0], [640, 695, 1],
[696, 788, 0], [789, 887, 2], [888, 1035, 1], [1036, 1445, 2]],
'test_boelter3_4': [[0, 158, 1], [159, 309, 1], [310, 477, 1], [478, 668, 1],
[669, 780, 1], [781, 817, 0], [818, 848, 1], [849, 942, 1]],
'test_boelter3_0': [[0, 140, 0], [141, 353, 0], [354, 599, 0], [600, 727, 0], [728, 768, 0]],
'test_boelter2_15': [[0, 46, 0], [47, 252, 2], [253, 298, 1], [299, 414, 2],
[415, 547, 2], [548, 690, 1], [691, 728, 1], [729, 773, 2], [774, 935, 2]],
'test_boelter2_12': [[0, 163, 0], [164, 285, 1], [286, 444, 1], [445, 519, 0],
[520, 583, 1], [584, 623, 0], [624, 660, 0],
[661, 854, 1], [855, 921, 1], [922, 1006, 2], [1007, 1125, 2], [1126, 1332, 2],
[1333, 1416, 2]],
'test_boelter2_5': [[0, 94, 0], [95, 176, 1], [177, 246, 1], [247, 340, 1],
[341, 442, 1], [443, 547, 1], [548, 654, 1], [655, 734, 0],
[735, 792, 0], [793, 1019, 0], [1020, 1088, 0], [1089, 1206, 0],
[1207, 1316, 1], [1317, 1466, 1], [1467, 1787, 2],
[1788, 1936, 1], [1937, 2084, 2]],
'test_boelter2_4': [[0, 260, 1], [261, 421, 1], [422, 635, 1], [636, 741, 1],
[742, 846, 1], [847, 903, 1], [904, 953, 1], [954, 1005, 1],
[1006, 1148, 1], [1149, 1270, 1], [1271, 1525, 1]],
'test_boelter2_2': [[0, 131, 0], [132, 226, 0], [227, 267, 0], [268, 352, 0],
[353, 412, 0], [413, 457, 0], [458, 502, 0],
[503, 532, 0], [533, 578, 0], [579, 640, 0], [641, 722, 0],
[723, 826, 0], [827, 913, 0], [914, 992, 0],
[993, 1070, 0], [1071, 1265, 0], [1266, 1412, 0]],
'test_boelter_21': [[0, 238, 1], [239, 310, 0], [311, 373, 1], [374, 457, 0], [458, 546, 2], [547, 575, 1],
[576, 748, 2], [749, 952, 2]], }
# event_seg_battery = {
# 'test_9434_18': [[0, 96, 0], [97, 361, 0], [362, 528, 0], [529, 608, 0], [609, 824, 0], [864, 1041, 0]],
# 'test_94342_1': [[0, 751, 0], [752, 876, 0], [877, 1167, 0], [1168, 1386, 0]],
# 'test_94342_6': [[0, 95, 0], [836, 901, 0]],
# 'test_94342_10': [[0, 156, 0], [157, 169, 0], [245, 274, 0], [275, 389, 0], [390, 525, 0], [526, 665, 0],
# [666, 680, 0]],
# 'test_94342_21': [[0, 13, 0], [1098, 1133, 0]],
# 'test1': [[0, 94, 0], [95, 155, 0], [156, 225, 0], [226, 559, 0], [690, 698, 0]],
# 'test6': [[0, 488, 0], [489, 541, 0], [542, 672, 0], [672, 803, 0]],
# 'test7': [[0, 70, 0], [71, 100, 0], [221, 226, 0]],
# 'test_boelter_2': [[0, 318, 0], [319, 458, 0], [459, 543, 0], [544, 606, 0]],
# 'test_boelter_7': [[0, 69, 0], [119, 133, 0], [134, 187, 0], [188, 239, 0], [329, 376, 0], [398, 491, 0],
# [492, 564, 0], [689, 774, 0], [775, 862, 0], [863, 897, 0], [959, 1000, 0],
# [1001, 1178, 0], [1268, 1307, 0], [1307, 1327, 0]],
# 'test_boelter_24': [[0, 62, 0], [293, 314, 0]],
# 'test_boelter_12': [[48, 219, 0], [220, 636, 0]],
# 'test_9434_1': [[0, 67, 0], [68, 124, 0], [252, 343, 0], [344, 380, 0], [381, 417, 0]],
# 'test_94342_16': [[0, 84, 0], [201, 214, 0]],
# 'test_boelter4_12': [[0, 32, 0], [33, 141, 0], [463, 519, 0], [520, 597, 0], [598, 605, 0],
# [1233, 1293, 0]],
# 'test_boelter4_9': [[0, 221, 0], [308, 466, 0], [1215, 1270, 0]],
# 'test_boelter4_4': [[0, 183, 0], [281, 529, 0], [530, 714, 0]],
# 'test_boelter4_3': [[0, 117, 0]],
# 'test_boelter4_1': [[0, 252, 0], [253, 729, 0], [730, 1202, 0], [1203, 1237, 0]],
# 'test_boelter3_13': [],
# 'test_boelter3_11': [[255, 424, 0], [599, 692, 0]],
# 'test_boelter3_6': [[281, 498, 0], [499, 639, 0], [696, 748, 0], [749, 788, 0]],
# 'test_boelter3_4': [[781, 817, 0]],
# 'test_boelter3_0': [[0, 102, 0], [103, 480, 0], [481, 703, 0], [704, 768, 0]],
# 'test_boelter2_15': [[0, 46, 0]],
# 'test_boelter2_12': [[0, 163, 0], [445, 519, 0], [584, 660, 0]],
# 'test_boelter2_5': [[0, 94, 0], [655, 1206, 0]],
# 'test_boelter2_4': [],
# 'test_boelter2_2': [[0, 145, 0], [146, 224, 0], [225, 271, 0], [272, 392, 0], [393, 454, 0],
# [455, 762, 0], [763, 982, 0], [983, 1412, 0]],
# 'test_boelter_21': [[239, 285, 0], [286, 310, 0], [374, 457, 0]],
# }
#
# event_seg_battery_new = {}
#
# for key, item in event_seg_tracker.items():
# item = np.array(item)
# item1 = item[item[:, 2] == 1]
# item2 = item[item[:, 2] == 2]
# item3 = item[item[:, 2] == 3]
# total = np.vstack([item1, item2, item3])
# item_b = event_seg_battery[key]
# item_b = np.array(item_b)
# if item_b.shape[0] == 0:
# item_b_new = total
# else:
# item_b_new = np.vstack([item_b, total])
# item_b_idx = np.argsort(item_b_new[:, 0])
# item_b_sort = item_b_new[item_b_idx].tolist()
# event_seg_battery_new[key] = item_b_sort
#
#
# print event_seg_battery_new
event_seg_battery = {'test1': [[0, 94, 0], [95, 155, 0], [156, 225, 0], [226, 559, 0], [560, 689, 2], [690, 698, 0]],
'test7': [[0, 70, 0], [71, 100, 0], [101, 220, 2], [221, 226, 0]],
'test6': [[0, 488, 0], [489, 541, 0], [542, 672, 0], [673, 703, 0]],
'test_94342_10': [[0, 156, 0], [157, 169, 0], [170, 244, 1], [245, 274, 0], [275, 389, 0],
[390, 525, 0],
[526, 665, 0], [666, 680, 0], [681, 726, 1], [727, 866, 2], [867, 1155, 2]],
'test_94342_1': [[0, 751, 0], [752, 876, 0], [877, 1167, 0], [1168, 1386, 0]],
'test_9434_18': [[0, 96, 0], [97, 361, 0], [362, 528, 0], [529, 608, 0], [609, 824, 0],
[825, 863, 2], [864, 1041, 0]],
'test_94342_6': [[0, 95, 0], [96, 267, 1], [268, 441, 1], [442, 559, 1], [560, 681, 1],
[682, 796, 1],
[797, 835, 1], [836, 901, 0], [902, 943, 1]],
'test_boelter_24': [[0, 62, 0], [63, 185, 2], [186, 233, 2], [234, 292, 2], [293, 314, 0]],
'test_boelter2_4': [[0, 260, 1], [261, 421, 1], [422, 635, 1], [636, 741, 1], [742, 846, 1],
[847, 903, 1], [904, 953, 1], [954, 1005, 1], [1006, 1148, 1], [1149, 1270, 1],
[1271, 1525, 1]],
'test_boelter2_5': [[0, 94, 0], [95, 176, 1], [177, 246, 1], [247, 340, 1], [341, 442, 1],
[443, 547, 1], [548, 654, 1], [655, 1206, 0], [1207, 1316, 1], [1317, 1466, 1],
[1467, 1787, 2], [1788, 1936, 1], [1937, 2084, 2]],
'test_boelter2_2': [[0, 145, 0], [146, 224, 0], [225, 271, 0], [272, 392, 0], [393, 454, 0],
[455, 762, 0], [763, 982, 0], [983, 1412, 0]],
'test_boelter_21': [[0, 238, 1], [239, 285, 0], [286, 310, 0], [311, 373, 1], [374, 457, 0],
[458, 546, 2], [547, 575, 1], [576, 748, 2], [749, 952, 2]],
'test_9434_1': [[0, 67, 0], [68, 124, 0], [125, 182, 1], [183, 251, 2], [252, 343, 0],
[344, 380, 0],
[381, 417, 0]],
'test_boelter3_6': [[0, 174, 1], [175, 280, 1], [281, 498, 0], [499, 639, 0], [640, 695, 1],
[696, 748, 0], [749, 788, 0], [789, 887, 2], [888, 1035, 1], [1036, 1445, 2]],
'test_boelter3_4': [[0, 158, 1], [159, 309, 1], [310, 477, 1], [478, 668, 1], [669, 780, 1],
[781, 817, 0], [818, 848, 1], [849, 942, 1]],
'test_boelter3_0': [[0, 102, 0], [103, 480, 0], [481, 703, 0], [704, 768, 0]],
'test_boelter2_12': [[0, 163, 0], [164, 285, 1], [286, 444, 1], [445, 519, 0],
[520, 583, 1], [584, 660, 0], [661, 854, 1], [855, 921, 1],
[922, 1006, 2], [1007, 1125, 2], [1126, 1332, 2], [1333, 1416, 2]],
'test_94342_16': [[0, 84, 0], [85, 158, 1], [159, 200, 1], [201, 214, 0], [215, 370, 1],
[371, 524, 1], [525, 587, 2], [588, 782, 2], [783, 1009, 2]],
'test_boelter2_15': [[0, 46, 0], [47, 252, 2], [253, 298, 1], [299, 414, 2], [415, 547, 2],
[548, 690, 1], [691, 728, 1], [729, 773, 2], [774, 935, 2]],
'test_boelter3_13': [[0, 204, 2], [205, 300, 2], [301, 488, 2], [489, 755, 2]],
'test_boelter3_11': [[0, 254, 1], [255, 424, 0], [425, 598, 1], [599, 692, 0], [693, 772, 2],
[773, 878, 2], [879, 960, 2], [961, 1171, 2], [1172, 1397, 2]],
'test_boelter4_12': [[0, 32, 0], [33, 141, 0], [142, 462, 2], [463, 519, 0], [520, 597, 0],
[598, 605, 0], [606, 942, 2], [943, 1232, 2], [1233, 1293, 0]],
'test_boelter4_9': [[0, 221, 0], [222, 307, 1], [308, 466, 0], [467, 794, 1], [795, 866, 1],
[867, 1005, 2], [1006, 1214, 2], [1215, 1270, 0]],
'test_boelter4_4': [[0, 183, 0], [184, 280, 1], [281, 529, 0], [530, 714, 0]],
'test_boelter4_1': [[0, 252, 0], [253, 729, 0], [730, 1202, 0], [1203, 1237, 0]],
'test_boelter4_3': [[0, 117, 0], [118, 200, 1], [201, 293, 1], [294, 404, 1], [405, 600, 1],
[601, 800, 1], [801, 905, 1], [906, 1234, 1]],
'test_boelter_12': [[0, 47, 1], [48, 119, 0], [120, 157, 1], [158, 636, 0], [637, 745, 1],
[746, 971, 2]],
'test_boelter_7': [[0, 69, 0], [70, 118, 1], [119, 133, 0], [134, 187, 0], [188, 239, 0],
[240, 328, 1], [329, 376, 0], [377, 397, 1], [398, 491, 0], [492, 564, 0],
[565, 619, 1], [620, 688, 1], [689, 774, 0], [775, 862, 0], [863, 897, 0],
[898, 958, 1], [959, 1000, 0], [1001, 1178, 0], [1179, 1267, 1],
[1268, 1307, 0], [1308, 1327, 0]],
'test_94342_21': [[0, 13, 0], [14, 66, 2], [67, 594, 2], [595, 1097, 2], [1098, 1133, 0]],
'test_boelter_2': [[0, 318, 0], [319, 458, 0], [459, 543, 0], [544, 606, 0]]}
# clips_all=listdir('/home/lfan/Dropbox/Projects/NIPS20/data/3d_pose2gaze/record_bbox/')
# print clips_all
clips_all = ['test_94342_13.p', 'test_boelter4_11.p', 'test_94342_20.p', 'test_94342_0.p', 'test_94342_23.p',
'test_boelter4_5.p', 'test_boelter_12.p', 'test_9434_3.p', 'test_boelter_15.p', 'test_94342_19.p',
'test_boelter_21.p', 'test_boelter3_2.p', 'test_boelter4_0.p', 'test_boelter_18.p', 'test6.p',
'test_boelter_1.p', 'test_boelter3_6.p', 'test_94342_21.p', 'test_boelter4_10.p', 'test_9434_1.p',
'test_94342_17.p', 'test_boelter4_9.p', 'test_94342_18.p', 'test_boelter4_12.p', 'test_boelter3_11.p',
'test_boelter4_1.p', 'test_94342_26.p', 'test_boelter_10.p', 'test_boelter4_8.p', 'test_boelter3_8.p',
'test2.p', 'test_94342_7.p', 'test_94342_16.p', 'test_boelter2_17.p', 'test_boelter_4.p',
'test_boelter3_3.p',
'test_94342_1.p', 'test_boelter_13.p', 'test_boelter_24.p', 'test_boelter3_1.p', 'test_boelter2_8.p',
'test_boelter2_2.p', 'test_boelter2_14.p', 'test_boelter2_0.p', 'test7.p', 'test_94342_3.p',
'test_boelter2_12.p', 'test_94342_8.p', 'test_boelter4_7.p', 'test_9434_18.p', 'test_94342_22.p',
'test_94342_5.p', 'test_boelter3_9.p', 'test1.p', 'test_boelter_6.p', 'test_boelter_19.p',
'test_boelter4_13.p', 'test_94342_10.p', 'test_boelter4_4.p', 'test_boelter3_4.p', 'test_boelter2_3.p',
'test_boelter_5.p', 'test_94342_12.p', 'test_boelter_14.p', 'test_boelter3_0.p', 'test_94342_6.p',
'test_94342_15.p', 'test_94342_24.p', 'test_boelter_2.p', 'test_boelter2_5.p', 'test_boelter_7.p',
'test_boelter_3.p', 'test_94342_4.p', 'test_boelter4_2.p', 'test_boelter3_13.p', 'test_94342_25.p',
'test_boelter2_16.p', 'test_boelter3_5.p', 'test_boelter4_3.p', 'test_boelter4_6.p', 'test_boelter3_10.p',
'test_boelter2_7.p', 'test_94342_14.p', 'test_boelter_22.p', 'test_boelter3_7.p', 'test_boelter2_15.p',
'test_boelter_9.p', 'test_boelter_25.p', 'test_boelter2_6.p', 'test_boelter2_4.p', 'test_boelter3_12.p',
'test_boelter_17.p', 'test_94342_11.p', 'test_94342_2.p']
clips_88 = ['test_94342_13.p', 'test_boelter4_11.p', 'test_94342_20.p', 'test_94342_0.p', 'test_94342_23.p',
'test_boelter4_5.p', 'test_boelter_12.p', 'test_9434_3.p', 'test_boelter_15.p', 'test_94342_19.p',
'test_boelter_21.p', 'test_boelter3_2.p', 'test_boelter4_0.p', 'test_boelter_18.p', 'test6.p',
'test_boelter_1.p', 'test_boelter3_6.p', 'test_94342_21.p', 'test_boelter4_10.p', 'test_9434_1.p',
'test_94342_17.p', 'test_boelter4_9.p', 'test_94342_18.p', 'test_boelter4_12.p', 'test_boelter3_11.p',
'test_boelter4_1.p', 'test_94342_26.p', 'test_boelter_10.p', 'test_boelter4_8.p', 'test_boelter3_8.p',
'test2.p', 'test_94342_7.p', 'test_94342_16.p', 'test_boelter2_17.p', 'test_boelter_4.p',
'test_boelter3_3.p',
'test_94342_1.p', 'test_boelter_13.p', 'test_boelter3_1.p', 'test_boelter2_8.p',
'test_boelter2_14.p', 'test_boelter2_0.p', 'test7.p', 'test_94342_3.p',
'test_boelter2_12.p', 'test_94342_8.p', 'test_boelter4_7.p', 'test_9434_18.p', 'test_94342_22.p',
'test_94342_5.p', 'test_boelter3_9.p', 'test1.p', 'test_boelter_6.p', 'test_boelter_19.p',
'test_boelter4_13.p', 'test_94342_10.p', 'test_boelter4_4.p', 'test_boelter3_4.p', 'test_boelter2_3.p',
'test_boelter_5.p', 'test_94342_12.p', 'test_boelter_14.p', 'test_boelter3_0.p', 'test_94342_6.p',
'test_94342_15.p', 'test_94342_24.p', 'test_boelter_2.p', 'test_boelter_7.p',
'test_boelter_3.p', 'test_94342_4.p', 'test_boelter4_2.p', 'test_boelter3_13.p', 'test_94342_25.p',
'test_boelter2_16.p', 'test_boelter3_5.p', 'test_boelter4_3.p', 'test_boelter4_6.p', 'test_boelter3_10.p',
'test_boelter2_7.p', 'test_94342_14.p', 'test_boelter3_7.p', 'test_boelter2_15.p',
'test_boelter_9.p', 'test_boelter2_6.p', 'test_boelter3_12.p',
'test_boelter_17.p', 'test_94342_11.p', 'test_94342_2.p']
# clips_with_gt_event=['test1.p', 'test7.p', 'test6.p', 'test_boelter2_12.p', 'test_94342_1.p', 'test_9434_18.p', 'test_94342_6.p', 'test_boelter_24.p', 'test_boelter2_4.p', 'test_boelter2_5.p', 'test_boelter2_2.p', 'test_boelter_21.p', 'test_9434_1.p', 'test_boelter3_6.p', 'test_boelter3_4.p', 'test_boelter3_0.p', 'test_94342_10.p', 'test_94342_16.p', 'test_boelter2_15.p', 'test_boelter3_13.p', 'test_boelter3_11.p', 'test_boelter4_12.p', 'test_boelter4_9.p', 'test_boelter4_4.p', 'test_boelter4_1.p', 'test_boelter4_3.p', 'test_boelter_12.p', 'test_boelter_7.p', 'test_94342_21.p', 'test_boelter_2.p']
# random.shuffle(clips_with_gt_event)
# print clips_with_gt_event
clips_with_gt_event = ['test_boelter2_15.p', 'test_94342_16.p', 'test_boelter4_4.p', 'test_94342_21.p',
'test_boelter4_1.p', 'test_boelter4_9.p', 'test_94342_1.p', 'test_boelter3_4.p',
'test_boelter_2.p', 'test_boelter_21.p', 'test_boelter4_12.p', 'test_boelter_7.p', 'test7.p',
'test_9434_18.p', 'test_94342_10.p', 'test_boelter3_13.p', 'test_94342_6.p', 'test1.p',
'test_boelter_12.p', 'test_boelter3_0.p', 'test6.p', 'test_9434_1.p', 'test_boelter2_12.p',
'test_boelter3_6.p', 'test_boelter4_3.p', 'test_boelter3_11.p']
# for k , v in event_seg_tracker.items():
# clips_with_gt_event.append(k+'.p')
# print len(clips_with_gt_event)
# print clips_with_gt_event
#
#
# import os
# clips = os.listdir('/home/shuwen/data/data_preprocessing2/regenerate_annotation/')
# random.shuffle(clips)
# print(clips)
# mind_clips = ['test_94342_16.p', 'test_boelter4_5.p', 'test_94342_2.p', 'test_boelter4_10.p', 'test_boelter2_3.p', 'test_94342_20.p', 'test_boelter4_9.p', 'test_boelter3_9.p', 'test_boelter3_4.p', 'test_boelter2_12.p', 'test_boelter4_6.p', 'test2.p', 'test_boelter4_2.p', 'test_boelter4_3.p', 'test_94342_24.p', 'test_94342_17.p', 'test_94342_6.p', 'test_94342_8.p', 'test_boelter3_0.p', 'test_94342_11.p', 'test_boelter3_7.p', 'test7.p', 'test_94342_18.p', 'test_boelter4_12.p', 'test_boelter_10.p', 'test_boelter3_8.p', 'test_boelter2_6.p', 'test_boelter4_7.p', 'test_boelter4_8.p', 'test_boelter_12.p', 'test_boelter4_0.p', 'test_boelter2_17.p', 'test_boelter3_12.p', 'test_boelter3_11.p', 'test_boelter3_5.p', 'test_94342_4.p', 'test_94342_15.p', 'test_94342_19.p', 'test_94342_7.p', 'test_boelter2_16.p', 'test_boelter2_8.p', 'test_94342_3.p', 'test_boelter_3.p', 'test_9434_3.p', 'test_boelter2_0.p', 'test_boelter3_13.p', 'test_9434_18.p', 'test_boelter_18.p', 'test_94342_22.p', 'test_boelter_6.p', 'test_boelter_4.p', 'test_boelter3_1.p', 'test_boelter3_2.p', 'test_boelter_7.p', 'test_boelter_13.p', 'test1.p', 'test_boelter3_3.p', 'test_boelter4_11.p', 'test_94342_1.p', 'test_94342_25.p', 'test_boelter_1.p', 'test_boelter_21.p', 'test_boelter3_6.p', 'test_boelter_14.p', 'test_94342_12.p', 'test_boelter2_14.p', 'test_boelter4_13.p', 'test_94342_10.p', 'test_boelter_9.p', 'test_94342_5.p', 'test_boelter_17.p', 'test6.p', 'test_boelter4_4.p', 'test_94342_23.p', 'test_boelter3_10.p', 'test_94342_21.p', 'test_94342_0.p', 'test_boelter_2.p', 'test_9434_1.p', 'test_boelter2_15.p', 'test_boelter4_1.p', 'test_boelter_5.p', 'test_94342_13.p', 'test_94342_14.p', 'test_boelter2_7.p', 'test_boelter_19.p', 'test_boelter_15.p', 'test_94342_26.p']
# i = 0
# count = 0
# mind_test_clips = []
# while count < int(len(mind_clips)*0.3):
# if mind_clips[i] not in clips_with_gt_event:
# mind_test_clips.append(mind_clips[i])
# i += 1
# count += 1
# else:
# i += 1
#
# print(len(mind_test_clips))
# print(mind_test_clips)
mind_test_clips = ['test_boelter4_5.p', 'test_94342_2.p', 'test_boelter4_10.p', 'test_boelter2_3.p', 'test_94342_20.p',
'test_boelter3_9.p', 'test_boelter4_6.p', 'test2.p', 'test_boelter4_2.p', 'test_94342_24.p',
'test_94342_17.p', 'test_94342_8.p', 'test_94342_11.p', 'test_boelter3_7.p', 'test_94342_18.p',
'test_boelter_10.p', 'test_boelter3_8.p', 'test_boelter2_6.p', 'test_boelter4_7.p',
'test_boelter4_8.p', 'test_boelter4_0.p', 'test_boelter2_17.p', 'test_boelter3_12.p',
'test_boelter3_5.p', 'test_94342_4.p', 'test_94342_15.p']
clips_len = {'test_94342_13.p': 1455, 'test_boelter4_11.p': 1355, 'test_94342_20.p': 1865, 'test_94342_0.p': 1940,
'test_94342_23.p': 539, 'test_boelter4_5.p': 1166, 'test_boelter_12.p': 972, 'test_9434_3.p': 323,
'test_boelter_15.p': 1055, 'test_94342_19.p': 1695, 'test_boelter_21.p': 953, 'test_boelter3_2.p': 1326,
'test_boelter4_0.p': 1322, 'test_boelter_18.p': 1386, 'test6.p': 704, 'test_boelter_1.p': 925,
'test_boelter3_6.p': 1446, 'test_94342_21.p': 1134, 'test_boelter4_10.p': 1263, 'test_9434_1.p': 418,
'test_94342_17.p': 1057, 'test_boelter4_9.p': 1271, 'test_94342_18.p': 1539, 'test_boelter4_12.p': 1294,
'test_boelter3_11.p': 1398, 'test_boelter4_1.p': 1238, 'test_94342_26.p': 527, 'test_boelter_10.p': 654,
'test_boelter4_8.p': 1006, 'test_boelter3_8.p': 1161, 'test2.p': 975, 'test_94342_7.p': 1386,
'test_94342_16.p': 1010, 'test_boelter2_17.p': 1268, 'test_boelter_4.p': 787, 'test_boelter3_3.p': 861,
'test_94342_1.p': 1387, 'test_boelter_13.p': 1004, 'test_boelter_24.p': 315, 'test_boelter3_1.p': 1351,
'test_boelter2_8.p': 1347, 'test_boelter2_2.p': 1413, 'test_boelter2_14.p': 920, 'test_boelter2_0.p': 1143,
'test7.p': 227, 'test_94342_3.p': 1776, 'test_boelter2_12.p': 1417, 'test_94342_8.p': 1795,
'test_boelter4_7.p': 1401, 'test_9434_18.p': 1042, 'test_94342_22.p': 586, 'test_94342_5.p': 2292,
'test_boelter3_9.p': 1383, 'test1.p': 699, 'test_boelter_6.p': 1435, 'test_boelter_19.p': 959,
'test_boelter4_13.p': 933, 'test_94342_10.p': 1156, 'test_boelter4_4.p': 715, 'test_boelter3_4.p': 943,
'test_boelter2_3.p': 942, 'test_boelter_5.p': 834, 'test_94342_12.p': 2417, 'test_boelter_14.p': 904,
'test_boelter3_0.p': 769, 'test_94342_6.p': 944, 'test_94342_15.p': 1174, 'test_94342_24.p': 741,
'test_boelter_2.p': 607, 'test_boelter2_5.p': 2085, 'test_boelter_7.p': 1328, 'test_boelter_3.p': 596,
'test_94342_4.p': 1924, 'test_boelter4_2.p': 1353, 'test_boelter3_13.p': 756, 'test_94342_25.p': 568,
'test_boelter2_16.p': 1734, 'test_boelter3_5.p': 851, 'test_boelter4_3.p': 1235, 'test_boelter4_6.p': 1334,
'test_boelter3_10.p': 1301, 'test_boelter2_7.p': 1505, 'test_94342_14.p': 1841, 'test_boelter_22.p': 828,
'test_boelter3_7.p': 1544, 'test_boelter2_15.p': 936, 'test_boelter_9.p': 636, 'test_boelter_25.p': 951,
'test_boelter2_6.p': 2100, 'test_boelter2_4.p': 1526, 'test_boelter3_12.p': 359, 'test_boelter_17.p': 817,
'test_94342_11.p': 1610, 'test_94342_2.p': 1968}
# no_communication=0
# follow=0
# joint=0
# cnt=0.
# for clip in clips_with_gt_event:
# clip=clip.split('.')[0]
# segs=event_seg_tracker[clip]
# for seg in segs:
# if seg[2]==0:
# no_communication+=1
# elif seg[2]==1:
# follow+=1
# elif seg[2]==2:
# joint+=1
# cnt+=1
#
# segs=event_seg_battery[clip]
# for seg in segs:
# if seg[2] == 0:
# no_communication += 1
# elif seg[2] == 1:
# follow += 1
# elif seg[2] == 2:
# joint += 1
# cnt += 1
#
# print(no_communication/cnt, follow/cnt, joint/cnt)
# pointing
import os
cnt = 0
pointing_cnt = 0
annot_path = '/home/lfan/Dropbox/Projects/NIPS20/annot/all/'
files = listdir(annot_path)
for file in files:
with open(os.path.join(annot_path, file), 'r') as f:
lines = f.readlines()
if len(lines) == 0:
continue
for line in lines:
cnt += 1
my_list = line.strip().split(' ')
if "\"pointing\"" in my_list:
pointing_cnt += 1
print('pointing {} / {}'.format(pointing_cnt, cnt))
|
[
"60700050+fengzhihong-377@users.noreply.github.com"
] |
60700050+fengzhihong-377@users.noreply.github.com
|
c7c8e0aef6b0f1ed3c228861c1309297ef5a51df
|
0875563641c4ab6105da7a9850ce5102b791045f
|
/setup.py
|
402e14f10b992d22c5a0731808c0f8024ef3cbc1
|
[
"MIT"
] |
permissive
|
ParaguayEduca/etoys.activity
|
17a7569fd6766afdfbd4a20341883924ce4119ff
|
4755e31ad440ca7247a02a45b4481d84e23af524
|
refs/heads/master
| 2022-09-17T10:57:07.707315
| 2020-06-02T00:59:32
| 2020-06-02T00:59:32
| 268,666,765
| 0
| 1
| null | 2020-06-02T00:59:33
| 2020-06-02T00:53:52
| null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
#!/usr/bin/env python
from sugar3.activity import bundlebuilder
bundlebuilder.start()
|
[
"fierrofenix@gmail.com"
] |
fierrofenix@gmail.com
|
7fe96dd13f0dbe0b06778f993c0886ee04348852
|
93ad452b4bfb55c10aed246f3f37342deaed1274
|
/scripts/generator_utils.py
|
2f9926e6ef8d115bed92d35bd68c483a6bacd940
|
[] |
no_license
|
open-craft/microsite-generator
|
2a18dc078e941d6cbe4dc0e30e3ad4656a5b68fd
|
e444a913bb883d557249c8a9874b302349777529
|
refs/heads/main
| 2023-06-29T10:29:57.605699
| 2021-07-26T18:49:08
| 2021-07-26T18:49:08
| 373,735,873
| 4
| 0
| null | 2021-07-26T18:49:09
| 2021-06-04T05:56:58
|
Python
|
UTF-8
|
Python
| false
| false
| 7,889
|
py
|
import yaml
import os
from argparse import ArgumentParser
from const import GENERATED_SHARED_CONFIG_FILE
def common_args() -> ArgumentParser:
"""
Common argument parser for each script
"""
parser = ArgumentParser()
parser.add_argument("ConfigFilePath", metavar='config_file_path', type=str, help="Configuration data file path.")
parser.add_argument("--settings", type=str, required=False, help="Settings module")
return parser
def update_model(model, **kwargs):
"""
Helper function for updaing a django model
Args:
model: Django Model
kwargs: Keyword args for each field values
"""
for attr, val in kwargs.items():
setattr(model, attr, val)
model.save()
def deep_merge(source, destination):
"""
helper function to merge two dictionaries
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
deep_merge(value, node)
else:
destination[key] = value
return destination
class Config:
"""
Configuration generation helper class
"""
microsites = {}
organizations = {}
# main domain to work with
main_domain = None
# LMS OAuth clients
oauth = {
'ecommerce_sso_client': 'custom-sites-ecommerce-sso',
}
# stores global override values
global_overrides = {
'overrides': {},
'context_overrides': {},
}
def __init__(self, config):
"""
Initialize Config class
"""
self._config = config
self.organizations = config['organizations']
self.main_domain = config['main_domain']
self.oauth = config.get(
'oauth',
self.oauth
)
self._extract_microsites(config)
self._extract_overrides(config)
def _extract_overrides(self, config):
"""
A helper method to prepare self.overrides from given config.
"""
if config['microsites']:
for key, val in config['microsites'].items():
# $ is a special key and used for global overrides
if key == '$':
self.global_overrides.update(val)
continue
# if site specifc override provided, add them to self.microsites
if val.get('overrides'):
self.microsites[key]['overrides'] = deep_merge(
self.microsites[key]['overrides'],
val['overrides']
)
# if site specifc context override provided, add them to self.microsites
if val.get('context_overrides'):
self.microsites[key]['context_overrides'] = deep_merge(
self.microsites[key]['context_overrides'],
val['context_overrides']
)
def _extract_microsites(self, config):
"""
A helper method to prepare self.microsites from given config.
"""
# if there will be a site for each organization
if config.get('site_for_each_organization', False):
for key, val in config['organizations'].items():
self.microsites[key] = {
'name': val['name'],
'overrides': {
'lms': {
'openedx.core.djangoapps.site_configuration.models.SiteConfiguration': {
'site_values': {
'course_org_filter': key
}
}
}
},
'context_overrides': {},
}
else:
# otherwise microsites can be given seperately from organizations
for key, val in config['microsites'].items():
# $ is a special key and used for global overrides
if key == '$':
continue
self.microsites[key] = {
'name': val['name'],
'overrides': {},
'context_overrides': {},
}
def get_microsite_codes(self):
"""
Get list of microsite codes
"""
return self.microsites.keys()
def get_organization_codes(self):
"""
Get list of organization codes
"""
return self.organizations.keys()
def get_organization_name(self, code):
"""
Given an organization code, returns its name
"""
return self.organizations[code]['name']
def get_context(self, code):
"""
Prepares and returns a dictionary with usefull values for generating
microsite configurations.
"""
microsite = self.microsites[code]
lms_domain = '{}.{}'.format(code.lower(), self.main_domain)
discovery_domain = 'discovery.{}'.format(lms_domain)
ecommerce_domain = 'ecommerce.{}'.format(lms_domain)
studio_domain = 'studio.{}'.format(lms_domain)
context = {
'name': microsite['name'],
'code': code,
'main_domain': self.main_domain,
'lms_domain': lms_domain,
'lms_url': 'https://{}'.format(lms_domain),
'discovery_domain': discovery_domain,
'discovery_url': 'https://{}'.format(discovery_domain),
'discovery_api_url': 'https://{}/api/v1/'.format(discovery_domain),
'ecommerce_domain': ecommerce_domain,
'ecommerce_url': 'https://{}'.format(ecommerce_domain),
'studio_domain': studio_domain,
'studio_url': 'https://{}'.format(studio_domain)
}
# apply global context overrides
context.update(self.global_overrides['context_overrides'])
# apply site specific context overrides
context.update(microsite['context_overrides'])
return context
def apply_overrides(self, code, service, model_class, data):
"""
Overrides existing value with global or site-specific value.
Args:
code (str): microsite code
service (str): service key
model_class (Model): django model
data (dict): data dictionary that will be used to update the model
"""
microsite = self.microsites[code]
global_overrides = self.global_overrides['overrides']
site_overrides = microsite['overrides']
model_path = '{}.{}'.format(model_class.__module__, model_class.__name__)
# if global override exists, apply them
if service in global_overrides and model_path in global_overrides[service]:
data = deep_merge(data, global_overrides[service][model_path])
# if site specific override exists, apply them
if service in site_overrides and model_path in site_overrides[service]:
data = deep_merge(data, site_overrides[service][model_path])
return data
def load_config(file_path) -> Config:
"""
Helper function to load configuration yaml file
"""
with open(file_path) as file:
config = yaml.load(file)
return Config(config)
def write_generated_values(data = {}):
"""
Write new config value to the generated config file.
"""
values = load_generated_values()
values.update(data)
with open(GENERATED_SHARED_CONFIG_FILE, 'w') as file:
yaml.dump(values, file)
def load_generated_values():
"""
Read config value from the generated config file.
"""
values = {}
if os.path.exists(GENERATED_SHARED_CONFIG_FILE):
with open(GENERATED_SHARED_CONFIG_FILE) as file:
values = yaml.load(file)
return values
|
[
"noreply@github.com"
] |
open-craft.noreply@github.com
|
9641b775ef1b783924590a3d1b9b8ce16399e971
|
124b68cc525fcc562e0d274bc3b5f75062e134f7
|
/IoT8_differential_comparer/tests/test_comparer.py
|
2d1d4408190cb752301e7e34b11dd84b0f852015
|
[] |
no_license
|
akane34/iot_api_adaptation_chain
|
9c88b9666d47d586123e7d6e458616d71104882e
|
50a0187c59d984cea7cf4e58dde9dabe29f194bf
|
refs/heads/master
| 2022-12-18T13:32:28.939109
| 2018-09-17T02:58:59
| 2018-09-17T02:58:59
| 148,314,118
| 0
| 0
| null | 2022-12-08T01:01:10
| 2018-09-11T12:31:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
from unittest import TestCase
from differential_comparer.comparer import processApis
class ComparerTestCase(TestCase):
def setUp(self):
self.API_ORIGINAL = \
'/home/farkaz00/Documents/MISO/IoT_Challenge8/03.Case_Study/03.SHAS_REST_API.json'
self.API_TYPE_CHANGE = \
'/home/farkaz00/Documents/MISO/IoT_Challenge8/03.Case_Study/03.SHAS_REST_API_TYPE_CHANGE.json'
self.API_DEPRECATED_METHOD = \
'/home/farkaz00/Documents/MISO/IoT_Challenge8/03.Case_Study/03.SHAS_REST_API_DEPRECATED_METHOD_UPDATE.json'
def testProcessApisTypeChange(self):
diffs = processApis(self.API_ORIGINAL, self.API_TYPE_CHANGE)
self.assertTrue(len(diffs) > 0)
dif1 = diffs[0]
self.assertEqual(dif1.api_name, 'SHAS API')
self.assertEqual(dif1.old_api_version, '0.0.1')
self.assertEqual(dif1.new_api_version, '0.0.2')
self.assertEqual(dif1.type, 'EXPECTED')
self.assertEqual(dif1.old_value, '0.0.1')
self.assertEqual(dif1.new_value, '0.0.2')
dif1 = diffs[1]
self.assertEqual(dif1.api_name, 'SHAS API')
self.assertEqual(dif1.old_api_version, '0.0.1')
self.assertEqual(dif1.new_api_version, '0.0.2')
self.assertEqual(dif1.type, 'EXPECTED')
self.assertEqual(dif1.old_value, 'integer')
self.assertEqual(dif1.new_value, 'string')
def testProcessApisDeprecatedMethod(self):
diffs = processApis(self.API_ORIGINAL, self.API_DEPRECATED_METHOD)
self.assertTrue(len(diffs) > 0)
dif1 = diffs[0]
self.assertEqual(dif1.api_name, 'SHAS API')
self.assertEqual(dif1.old_api_version, '0.0.1')
self.assertEqual(dif1.new_api_version, '0.0.3')
self.assertEqual(dif1.type, 'EXPECTED')
self.assertEqual(dif1.old_value, '0.0.1')
self.assertEqual(dif1.new_value, '0.0.3')
dif2 = diffs[1]
self.assertEqual(dif2.api_name, 'SHAS API')
self.assertEqual(dif2.old_api_version, '0.0.1')
self.assertEqual(dif2.new_api_version, '0.0.3')
self.assertEqual(dif2.type, 'UNEXPECTED')
self.assertEqual(dif2.old_value, None)
self.assertTrue(len(dif2.new_value))
dif3 = diffs[2]
self.assertEqual(dif3.api_name, 'SHAS API')
self.assertEqual(dif3.old_api_version, '0.0.1')
self.assertEqual(dif3.new_api_version, '0.0.3')
self.assertEqual(dif3.type, 'UNEXPECTED')
self.assertEqual(dif3.old_value, None)
self.assertTrue(len(dif3.new_value))
dif4 = diffs[3]
self.assertEqual(dif4.api_name, 'SHAS API')
self.assertEqual(dif4.old_api_version, '0.0.1')
self.assertEqual(dif4.new_api_version, '0.0.3')
self.assertEqual(dif4.type, 'EXPECTED')
self.assertTrue(len(dif4.old_value))
self.assertEqual(dif4.new_value, None)
|
[
"jarvein@hotmail.com"
] |
jarvein@hotmail.com
|
be1b35d1d52e9d9bfc1110abcf20f8718435742e
|
7ecbe46fc65ad557da224d5b34fadf03e0117bcf
|
/Inception_v3/bottleneckcal3.py
|
d8265e16be2e2026d965dd01c965b761f6de144c
|
[] |
no_license
|
LuoJiaji/flower-photo-classification
|
78ada93b43b4d99904cfee87aac8c3b328e61e11
|
2852cd3fa26ba7747b31362a9d41ef7ea65805e9
|
refs/heads/master
| 2020-04-07T07:50:14.951807
| 2018-11-19T09:14:51
| 2018-11-19T09:14:51
| 158,190,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,908
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 16:50:00 2018
@author: Bllue
"""
import os
import random
import numpy as np
import tensorflow as tf
datapath = 'data/tmp/bottleneck/'
n_calss = len(os.listdir(datapath))
batchsize = 256
def get_datalist(datapath,train_percentage=80,test_percentage =10):
train_datapath = []
train_label = []
test_datapath = []
test_label = []
validation_datapath = []
validation_label = []
filename = os.listdir(datapath)
for i,path in enumerate(filename):
dataname = os.listdir(datapath+path)
print(path,len(dataname))
for file in dataname:
chance = np.random.randint(100)
if chance < train_percentage:
train_datapath.append(datapath + path+ '/' + file)
train_label.append(i)
elif chance<(train_percentage+test_percentage):
test_datapath.append(datapath + path+ '/' + file)
test_label.append(i)
else:
validation_datapath.append(datapath + path + '/' + file)
validation_label.append(i)
print('train data:',len(train_datapath))
print('test data:',len(test_datapath))
return [train_datapath,train_label,test_datapath,test_label,validation_datapath,validation_label]
def get_random_batch(train_datapath,train_label,batchsize,n_class):
# train_data = np.zeros([batchsize,2048])
# train_data = train_data.astype(np.uint8)
# train_label_onehot = np.zeros([batchsize,n_calss])
train_data = []
train_label_onehot = []
l = len(train_datapath)
i = 0
for _ in range(batchsize):
# image_index = random.randrange(l)
image_index = random.randrange(65535)
image_index = image_index % len(train_datapath) # 规范图片的索引
with open(train_datapath[image_index], 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
# train_data[i,:] = bottleneck_values
# train_label_onehot[i,int(train_label[image_index])] = 1
train_data.append(bottleneck_values)
label = np.zeros(n_class, dtype=np.float32)
label[int(train_label[image_index])] = 1.0
train_label_onehot.append(label )
# print(i,image_index,train_datapath[image_index])
i += 1
return train_data,train_label_onehot
def get_test_data(test_datapath,test_label,n_class):
# test_data = np.zeros([len(test_datapath),2048])
# test_data = test_data.astype(np.uint8)
# test_label_onehot = np.zeros([len(test_datapath),n_calss])
test_data = []
test_label_onehot = []
i = 0
for path in test_datapath:
with open(path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
# test_data[i,:] = bottleneck_values
# test_label_onehot[i,test_label[i]] = 1
test_data.append(bottleneck_values)
label = np.zeros(n_class, dtype=np.float32)
label[test_label[i]] = 1.0
test_label_onehot.append(label)
i += 1
return test_data,test_label_onehot
#bottleneck_path = train_datapath[0]
#
#with open(bottleneck_path, 'r') as bottleneck_file:
# bottleneck_string = bottleneck_file.read()
# bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
BOTTLENECK_TENSOR_SIZE = 2048
n_classes = 5
bottleneck_input = tf.placeholder(
tf.float32, [None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
# 定义新的标准答案输入
ground_truth_input = tf.placeholder(
tf.float32, [None, n_classes], name='GroundTruthInput')
# 定义一层全连接层解决新的图片分类问题
with tf.name_scope('fc1'):
weights1 = tf.Variable(
tf.truncated_normal(
[BOTTLENECK_TENSOR_SIZE, 128], stddev=0.1))
biases1 = tf.Variable(tf.zeros([128]))
fc1 = tf.nn.relu(tf.matmul(bottleneck_input, weights1) + biases1)
with tf.name_scope('fc2'):
weights2 = tf.Variable(tf.truncated_normal([128,n_classes], stddev=0.1))
biases2 = tf.Variable(tf.zeros([n_classes]))
logits = tf.matmul(fc1,weights2) + biases2
final_tensor = tf.nn.softmax(logits)
# 定义交叉熵损失函数
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=ground_truth_input)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy_mean)
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy_mean)
# 计算正确率
with tf.name_scope('evaluation'):
correct_prediction = tf.equal(
tf.argmax(final_tensor, 1), tf.argmax(ground_truth_input, 1))
evaluation_step = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
train_datapath,train_label,test_datapath,test_label,validation_datapath,validation_label = get_datalist(datapath)
train_data,train_label_onehot = get_random_batch(train_datapath,train_label,256,n_calss)
test_data,test_label_onehot = get_test_data(test_datapath,test_label,n_calss)
STEPS = 6000
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_data,train_label_onehot = get_random_batch(train_datapath,train_label,256,n_calss)
for i in range(STEPS):
train_data,train_label_onehot = get_random_batch(train_datapath,train_label,256,n_calss)
sess.run( train_step,feed_dict={bottleneck_input: train_data, ground_truth_input: train_label_onehot })
# print(i)
if i % 100 == 0 or i + 1 == STEPS:
test_accuracy = sess.run(evaluation_step,feed_dict={bottleneck_input: test_data,ground_truth_input: test_label_onehot})
print(i,test_accuracy)
|
[
"lt920@126.com"
] |
lt920@126.com
|
224cedecd9373beedd0e96c6f7abe0b67b5010e3
|
c46668ed2d366905978581bd19ba60cb6356f5f5
|
/misc/test.py
|
5c43001b183cb72a2a548780d887cec5a38b89fa
|
[] |
no_license
|
ndrwchn/bitshovel
|
db5992f61b22e7bf1d1d0ed6b112bc396f93f891
|
49a8f21ae4fb0dca0a82b81beb1ae913b1d39cd8
|
refs/heads/master
| 2020-04-14T10:56:51.524738
| 2019-01-01T23:43:08
| 2019-01-01T23:43:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
import os
import sys
import base64
import json
import pprint
import requests
import redis
from sseclient import SSEClient
def with_requests(url):
"""Get a streaming response for the given event feed using requests."""
return requests.get(url, stream=True)
r = redis.StrictRedis(host="localhost", port=6379, password="", decode_responses=True)
query = {"v": 3, "q": { "find": {} }}
#squery = json.dumps(query)
squery = '{"v": 3, "q": { "find": {} }}'
print(squery)
b64 = base64.encodestring(squery)
print(b64)
url = 'https://bitsocket.org/s/'+b64
response = with_requests(url)
client = SSEClient(response)
for event in client.events():
bsock = json.loads(event.data)
r.publish("bitcoin_reader",event.data)
print(bsock)
#pprint.pprint(bsock)
|
[
"dfoderick@gmail.com"
] |
dfoderick@gmail.com
|
42ec93628543ed4b5d99e9565a39876bd8b17f99
|
5ee2155447132c91c70032b8bc980d4e41a2f762
|
/main_himawari/scripts/typhoon_case.py
|
198efa909323e056cf85886cba4c2de28231fd4d
|
[] |
no_license
|
redmundnacario/himawari-8-legacy
|
0864e047a9b822254bb976f198d6c9741e750380
|
b47786d6af8ad6ad5755e75c66644b0ba405e1cf
|
refs/heads/master
| 2020-03-18T02:34:52.326945
| 2018-05-21T00:20:32
| 2018-05-21T00:20:32
| 134,197,777
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,239
|
py
|
# -*- coding: utf-8 -*-
"""
1. Open radar data
> pre-process
> grid
Created on Tue Dec 8 22:44:01 2015
@author: red
"""
# importing functions from different folder
import sys
sys.path.insert(0, '/media/red/SAMSUNG/main/main_radar')
sys.path.insert(0, '/media/red/SAMSUNG/main/main_himawari')
from radar_preprocessing_functions import *
from rainfall_retrieval_functions import *
from radar_functions_plot import *
from himawari_functions import *
from plot_functions_basic import *
# directories of radar data
path_radar = '/media/red/SAMSUNG/radar/tagaytay/ty_lando'
# directory of himawari
path_himawari = '/media/red/SAMSUNG/himawari/ty_lando'
# read hdf5 radar data
# change directory then open list of hdf5
file_list = open_list_radar_hdf5(path_radar)
variables_da1, date, time, attr = read_hdf5_radar(file_list[0])
#radar data
#variables_da1['ZH_PIA']
# read himawari data
# change directory then open list of hdf5
file_list_H = open_hdf(path_himawari)
h08_data, h08_radiance, lon_geos, lat_geos, date_time = read_h8_hdf5(file_list_H[1])
# subset data
map_masked, map_subset,lon_subset, lat_subset = subset_center_point(h08_data['B14'],attr['coords'][0], attr['coords'][1], 120,lon_geos,lat_geos)
# BTD IR3 - IR1 or 6.2 microns - 10.4 micron #convectivity
IR_diff_2 = h08_data["IR3"] - h08_data["IR1"]
map_masked, IR_diff_2,lon_subset, lat_subset = subset_center_point(IR_diff_2,attr['coords'][0], attr['coords'][1], 120,lon_geos,lat_geos)
# BTD IR2 - IR1 or 12.3 - 10.4 #water vapor sensitivity
IR_diff = h08_data["IR2"] - h08_data["IR1"]
map_masked, IR_diff, lon_subset, lat_subset = subset_center_point(IR_diff,attr['coords'][0], attr['coords'][1], 120,lon_geos,lat_geos)
#pltimshow(map_subset,cmap =pl.cm.jet_r)
#pltimshow(h08_data['IR1'],vmin =h08_data['IR1'].min() , vmax = h08_data['IR1'].max())
#pltimshow(IR_diff_2)
#pltimshow(IR_diff)
# georeference the radar data
rlon, rlat, ralt = georef_radar(attr['coords'],attr['ranges'], attr['azimuths'], attr['elevs'])
gridded_radar = grid_map(variables_da1['ZH_PIA'],lon_subset, lat_subset, attr['coords'], rlon, rlat)
gridded_radar = N.ma.masked_invalid(gridded_radar )
#mpcolormesh(gridded_radar, lon_subset, lat_subset)
#
mpcolormesh(map_subset, lon_subset, lat_subset,cmap =pl.cm.jet_r)
mpcolormesh(IR_diff_2, lon_subset, lat_subset)
mpcolormesh(IR_diff, lon_subset, lat_subset)
#,vmin =h08_data['B14'].min() , vmax =h08_data['B14'].max() )
# cmask himawari
map_subset_1 = N.ma.array(map_subset, mask = gridded_radar.mask )
IR_diff_2 = N.ma.array(IR_diff_2, mask = gridded_radar.mask )
IR_diff = N.ma.array(IR_diff, mask = gridded_radar.mask )
#mpcolormesh(map_subset_1, lon_subset, lat_subset,cmap =pl.cm.jet_r)
#mpcolormesh(IR_diff_2 , lon_subset, lat_subset,cmap =pl.cm.jet)
#mpcolormesh(IR_diff, lon_subset, lat_subset,cmap =pl.cm.jet)
#,vmin =h08_data['B14'].min() , vmax =h08_data['B14'].max() )
# lowest rainfall rate
lower_limit = zr.z2r(trafo.idecibel(gridded_radar.min()))
# scatter plot BT vs Zh
#pltscatter(map_subset_1, gridded_radar)
#plthist2d(map_subset_1, gridded_radar)
#pltscatter(IR_diff_2, gridded_radar)
#plthist2d(IR_diff_2, gridded_radar)
#pltscatter(IR_diff, gridded_radar)
#plthist2d(IR_diff, gridded_radar)
|
[
"rednacky@gmail.com"
] |
rednacky@gmail.com
|
1e3200edf2bcd028b8c04e5c7b5af0af62fd036e
|
be364bd76ef1c25cb9f13b2e9b83c1dd802148e5
|
/misc/ServerGui.py
|
8bece371b9646a9404661e2f6bc2d9bfd375d123
|
[] |
no_license
|
minhkhoi1026/remote-monitor
|
93a73e4f0e2a500ff7ab9f102b79ed259fc8de33
|
0e965cdc4a23c1d02f7052bc8da473b7f57ffa04
|
refs/heads/master
| 2023-08-26T09:00:30.042696
| 2021-11-07T17:33:02
| 2021-11-07T17:33:02
| 418,033,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Server.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 700, 500))
self.label.setText("")
self.label.setObjectName("label")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"minhkhoi1026@gmail.com"
] |
minhkhoi1026@gmail.com
|
a62411f8d8935793da3181d5a3dd0120876a35ea
|
5f904cb4964655537aa83e5aa9658d5cbd43d221
|
/AutosClasificados/urls.py
|
7091107fe281fb9ca8f63307ffbbed4639bfaf8b
|
[
"MIT"
] |
permissive
|
joaquinpunales1992/Python-Django-WatsonVisualRecognition-WatsonNLU
|
03471db80d7351d34816ffb782585d28847acf52
|
2997359150236a7d897a3f9201f8e9404f3d7f02
|
refs/heads/master
| 2021-05-09T22:05:21.965903
| 2018-02-17T06:09:08
| 2018-02-17T06:09:08
| 118,743,295
| 0
| 0
|
MIT
| 2018-02-17T07:16:16
| 2018-01-24T09:32:06
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from .core import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^AutosClasificados/form/$', views.publicarArticulo, name='publicarArticulo'),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"joaquinpunales@gmail.com"
] |
joaquinpunales@gmail.com
|
144a57e2abf73235b54f924ded7230ea6f2f9d53
|
513d230f48d6563435fee13fa222bd4ddbf69036
|
/architectures/GroupNetworkInNetwork.py
|
3d594846e623f8e00902bb9bb0c724bb82142522
|
[
"MIT"
] |
permissive
|
AliaksandrSiarohin/FeatureLearningRotNet
|
feb4ccc80843f1909ad267c9a2825ac8431844c9
|
5ca057389984d3eb21aa032baef6cc10a6d61bf6
|
refs/heads/master
| 2020-08-21T19:59:04.780682
| 2019-10-21T15:38:10
| 2019-10-21T15:38:10
| 216,235,320
| 0
| 1
|
MIT
| 2019-10-19T16:21:19
| 2019-10-19T16:21:19
| null |
UTF-8
|
Python
| false
| false
| 7,143
|
py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from groupy.gconv.pytorch_gconv import P4ConvP4, P4ConvZ2
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, first=False):
super(BasicBlock, self).__init__()
padding = (kernel_size - 1) / 2
self.layers = nn.Sequential()
conv = P4ConvP4 if not first else P4ConvZ2
self.layers.add_module('Conv', conv(in_planes, out_planes, \
kernel_size=kernel_size, stride=1, padding=padding, bias=False))
self.layers.add_module('BatchNorm', nn.BatchNorm3d(out_planes))
self.layers.add_module('ReLU', nn.ReLU(inplace=True))
def forward(self, x):
return self.layers(x)
class PoolAndCls(nn.Module):
def __init__(self, nChannels):
super(PoolAndCls, self).__init__()
self.cls = nn.Conv1d(nChannels, 1, kernel_size=1)
def forward(self, feat):
num_channels = feat.size(1)
out = F.avg_pool3d(feat, (1, feat.size(3), feat.size(4)))
out = out.view(-1, num_channels, feat.size(2))
out = self.cls(out).squeeze(1)
return out
class GroupNetworkInNetwork(nn.Module):
def __init__(self, opt):
super(GroupNetworkInNetwork, self).__init__()
num_classes = opt['num_classes']
num_inchannels = opt['num_inchannels'] if ('num_inchannels' in opt) else 3
num_stages = opt['num_stages'] if ('num_stages' in opt) else 3
use_avg_on_conv3 = opt['use_avg_on_conv3'] if ('use_avg_on_conv3' in opt) else True
assert (num_stages >= 3)
nChannels = 192 // 2
nChannels2 = 160 // 2
nChannels3 = 96 // 2
blocks = [nn.Sequential() for i in range(num_stages)]
# 1st block
blocks[0].add_module('Block1_ConvB1', BasicBlock(num_inchannels, nChannels, 5, first=True))
blocks[0].add_module('Block1_ConvB2', BasicBlock(nChannels, nChannels2, 1))
blocks[0].add_module('Block1_ConvB3', BasicBlock(nChannels2, nChannels3, 1))
blocks[0].add_module('Block1_MaxPool', nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)))
# 2nd block
blocks[1].add_module('Block2_ConvB1', BasicBlock(nChannels3, nChannels, 5))
blocks[1].add_module('Block2_ConvB2', BasicBlock(nChannels, nChannels, 1))
blocks[1].add_module('Block2_ConvB3', BasicBlock(nChannels, nChannels, 1))
blocks[1].add_module('Block2_AvgPool', nn.AvgPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)))
# 3rd block
blocks[2].add_module('Block3_ConvB1', BasicBlock(nChannels, nChannels, 3))
blocks[2].add_module('Block3_ConvB2', BasicBlock(nChannels, nChannels, 1))
blocks[2].add_module('Block3_ConvB3', BasicBlock(nChannels, nChannels, 1))
if num_stages > 3 and use_avg_on_conv3:
blocks[2].add_module('Block3_AvgPool', nn.AvgPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)))
for s in range(3, num_stages):
blocks[s].add_module('Block' + str(s + 1) + '_ConvB1', BasicBlock(nChannels, nChannels, 3))
blocks[s].add_module('Block' + str(s + 1) + '_ConvB2', BasicBlock(nChannels, nChannels, 1))
blocks[s].add_module('Block' + str(s + 1) + '_ConvB3', BasicBlock(nChannels, nChannels, 1))
# global average pooling and classifier
blocks.append(nn.Sequential())
blocks[-1].add_module('Classifier', PoolAndCls(nChannels))
self._feature_blocks = nn.ModuleList(blocks)
self.all_feat_names = ['conv' + str(s + 1) for s in range(num_stages)] + ['classifier', ]
assert (len(self.all_feat_names) == len(self._feature_blocks))
def _parse_out_keys_arg(self, out_feat_keys):
# By default return the features of the last layer / module.
out_feat_keys = [self.all_feat_names[-1], ] if out_feat_keys is None else out_feat_keys
if len(out_feat_keys) == 0:
raise ValueError('Empty list of output feature keys.')
for f, key in enumerate(out_feat_keys):
if key not in self.all_feat_names:
raise ValueError(
'Feature with name {0} does not exist. Existing features: {1}.'.format(key, self.all_feat_names))
elif key in out_feat_keys[:f]:
raise ValueError('Duplicate output feature key: {0}.'.format(key))
# Find the highest output feature in `out_feat_keys
max_out_feat = max([self.all_feat_names.index(key) for key in out_feat_keys])
return out_feat_keys, max_out_feat
def forward(self, x, out_feat_keys=None):
"""Forward an image `x` through the network and return the asked output features.
Args:
x: input image.
out_feat_keys: a list/tuple with the feature names of the features
that the function should return. By default the last feature of
the network is returned.
Return:
out_feats: If multiple output features were asked then `out_feats`
is a list with the asked output features placed in the same
order as in `out_feat_keys`. If a single output feature was
asked then `out_feats` is that output feature (and not a list).
"""
#print (x.shape)
out_feat_keys, max_out_feat = self._parse_out_keys_arg(out_feat_keys)
out_feats = [None] * len(out_feat_keys)
feat = x
for f in range(max_out_feat + 1):
feat = self._feature_blocks[f](feat)
key = self.all_feat_names[f]
if key in out_feat_keys:
out_feats[out_feat_keys.index(key)] = feat.view(feat.shape[0], -1, feat.shape[-2], feat.shape[-1]) if key != 'classifier' else feat
out_feats = out_feats[0] if len(out_feats) == 1 else out_feats
return out_feats
def weight_initialization(self):
for m in self.modules():
if isinstance(m, P4ConvP4) or isinstance(m, P4ConvZ2):
if m.weight.requires_grad:
n = 4 * m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
if m.weight.requires_grad:
m.weight.data.fill_(1)
if m.bias.requires_grad:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
if m.bias.requires_grad:
m.bias.data.zero_()
def create_model(opt):
return GroupNetworkInNetwork(opt)
if __name__ == '__main__':
size = 32
opt = {'num_classes': 4, 'num_stages': 5}
net = create_model(opt)
x = torch.autograd.Variable(torch.FloatTensor(1, 3, size, size).uniform_(-1, 1))
out = net(x, out_feat_keys=net.all_feat_names)
for f in range(len(out)):
print('Output feature {0} - size {1}'.format(
net.all_feat_names[f], out[f].size()))
out = net(x)
print('Final output: {0}'.format(out.size()))
|
[
"you@example.com"
] |
you@example.com
|
dc83274eed03fba89086dbecd9f871c6037a6f44
|
b71afb6809bb8e0c4c9993840e88505d50123938
|
/src/Team.py
|
237efdb8b449a1086e3bc8af21a9406832ce29ab
|
[] |
no_license
|
rafalsiniewicz/deep_learning_project
|
ec59e7fe151fc11f17f44619a7e8c7ce1e8e6562
|
eb13046c3b44bddb2de6437fd59d09f141c2ce50
|
refs/heads/master
| 2021-04-02T13:06:39.067835
| 2020-05-22T16:41:05
| 2020-05-22T16:41:05
| 248,278,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from Player import *
from Game import *
class Team:
def __init__(self, games=[], players=[], stats=pd.DataFrame()):
super().__init__(stats)
self.games = games #Game
self.players = players #Player
def get_data(self):
pass
|
[
"rafalsiniewicz@gmail.com"
] |
rafalsiniewicz@gmail.com
|
2a796a36b90bf571d8b9b1bf7abeabd2232aa657
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_route_tables_operations.py
|
1c9a94c29cdc2efe60006d96c3e30c66ba36f568
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 29,608
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.RouteTable"]:
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
89e1333380f672897a9ec01d8b3306d735939835
|
84c4778bde1fc399e834883afe62ffc36f2d2cd6
|
/Egypt.py
|
8a0d1077498e9eae3fdf4f52a111051e6c9d7abb
|
[] |
no_license
|
mebsahle/ICPC_Solved-
|
edbfde745084ccc028b7cb446f3e55c941ca474f
|
4d805b075a522b51944127845e91feb25b315758
|
refs/heads/master
| 2023-02-11T05:43:59.074750
| 2021-01-10T04:31:08
| 2021-01-10T04:31:08
| 217,336,000
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
while True:
a,b,c = [int(i) for i in input().split()]
if a == 0 and b == 0 and c == 0:
break
else:
if a**2+b**2==c**2 or a**2+c**2==b**2 or a**2==b**2+c**2:
print("right")
else:
print("wrong")
|
[
"mebatsionsahle@gmail.com"
] |
mebatsionsahle@gmail.com
|
1ae1c64e80e8fbb9a8c92151c4703c8ba5a8e8b2
|
2de33ba731066a63352080dd19da1e4582bb00c4
|
/collective.cover/src/collective/cover/tests/test_collection_tile.py
|
b045e92d615b8556d5004a7d3f0a99dd65042b89
|
[] |
no_license
|
adam139/plonesrc
|
58f48e7cdfc8fbed7398011c40649f095df10066
|
cbf20045d31d13cf09d0a0b2a4fb78b96c464d20
|
refs/heads/master
| 2021-01-10T21:36:44.014240
| 2014-09-09T04:28:04
| 2014-09-09T04:28:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,894
|
py
|
# -*- coding: utf-8 -*-
from collective.cover.testing import INTEGRATION_TESTING
from collective.cover.tiles.base import IPersistentCoverTile
from collective.cover.tiles.collection import CollectionTile
from plone.app.testing import login
from plone.app.testing import setRoles
from plone.app.testing import TEST_USER_ID
from plone.app.testing import TEST_USER_NAME
from plone.uuid.interfaces import IUUID
from zope.interface.verify import verifyClass
from zope.interface.verify import verifyObject
import unittest
class CollectionTileTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.request = self.layer['request']
self.cover = self.portal['frontpage']
self.tile = CollectionTile(self.cover, self.request)
# XXX: tile initialization
self.tile.__name__ = 'collective.cover.collection'
def test_interface(self):
self.assertTrue(IPersistentCoverTile.implementedBy(CollectionTile))
self.assertTrue(verifyClass(IPersistentCoverTile, CollectionTile))
tile = CollectionTile(None, None)
self.assertTrue(IPersistentCoverTile.providedBy(tile))
self.assertTrue(verifyObject(IPersistentCoverTile, tile))
def test_default_configuration(self):
self.assertTrue(self.tile.is_configurable)
self.assertTrue(self.tile.is_editable)
self.assertTrue(self.tile.is_droppable)
def test_tile_is_empty(self):
self.assertTrue(self.tile.is_empty())
def test_populate_tile_with_object(self):
obj = self.portal['my-collection']
self.tile.populate_with_object(obj)
self.assertEqual(self.tile.data.get('uuid'), IUUID(obj))
def test_populate_tile_with_invalid_object(self):
obj = self.portal['my-document']
self.tile.populate_with_object(obj)
# tile must be still empty
self.assertTrue(self.tile.is_empty())
def test_accepted_content_types(self):
self.assertEqual(self.tile.accepted_ct(), ['Collection'])
def test_collection_tile_render(self):
obj = self.portal['my-collection']
self.tile.populate_with_object(obj)
rendered = self.tile()
self.assertIn("<p>The collection doesn't have any results.</p>", rendered)
def test_delete_collection(self):
obj = self.portal['my-collection']
self.tile.populate_with_object(obj)
self.tile.populate_with_object(obj)
rendered = self.tile()
self.assertIn("<p>The collection doesn't have any results.</p>", rendered)
setRoles(self.portal, TEST_USER_ID, ['Manager', 'Editor', 'Reviewer'])
login(self.portal, TEST_USER_NAME)
self.portal.manage_delObjects(['my-collection'])
rendered = self.tile()
self.assertIn("Please drop a collection here to fill the tile.", rendered)
|
[
"plone@localhost.localdomain"
] |
plone@localhost.localdomain
|
d24c9809aadb1aa5dbb5b1da5b40a993ec9cfc61
|
817cad93c3ef277e2651e5ad2a5888fb8b903d32
|
/DA2Lite/compression/filter_decomposition/methods/vmbf.py
|
8c3f1c871339b2475c1cd89d08a6285b2facf44a
|
[
"MIT",
"Python-2.0"
] |
permissive
|
TrendingTechnology/DA2Lite
|
6a601703ff6958b089ac84e4f8b392ebefa9573f
|
b95bb47bd13b4b0ddeefe5e0d93122f384f2774d
|
refs/heads/main
| 2023-04-17T09:34:11.168433
| 2021-05-04T03:47:43
| 2021-05-04T03:47:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,164
|
py
|
from __future__ import division
import numpy as np
from scipy.sparse.linalg import svds
from scipy.optimize import minimize_scalar
def VBMF(Y, cacb, sigma2=None, H=None):
"""Implementation of the analytical solution to Variational Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free energy.
If H is unspecified, it is set to the smallest of the sides of the input Y.
To estimate cacb, use the function EVBMF().
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
cacb : int
Product of the prior variances of the matrices that factorize the input.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012.
"""
L,M = Y.shape #has to be L<=M
if H is None:
H = L
#SVD of the input matrix, max rank of H
U,s,V = np.linalg.svd(Y)
U = U[:,:H]
s = s[:H]
V = V[:H].T
#Calculate residual
residual = 0.
if H<L:
residual = np.sum(np.sum(Y**2)-np.sum(s**2))
#Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
upper_bound = (np.sum(s**2)+ residual)/(L+M)
if L==H:
lower_bound = s[-1]**2/M
else:
lower_bound = residual/((L-H)*M)
sigma2_opt = minimize_scalar(VBsigma2, args=(L,M,cacb,s,residual), bounds=[lower_bound, upper_bound], method='Bounded')
sigma2 = sigma2_opt.x
print("Estimated sigma2: ", sigma2)
#Threshold gamma term
#Formula above (21) from [1]
thresh_term = (L+M + sigma2/cacb**2)/2
threshold = np.sqrt( sigma2 * (thresh_term + np.sqrt(thresh_term**2 - L*M) ))
#Number of singular values where gamma>threshold
pos = np.sum(s>threshold)
#Formula (10) from [2]
d = np.multiply(s[:pos],
1 - np.multiply(sigma2/(2*s[:pos]**2),
L+M+np.sqrt( (M-L)**2 + 4*s[:pos]**2/cacb**2 )))
#Computation of the posterior
post = {}
zeta = sigma2/(2*L*M) * (L+M+sigma2/cacb**2 - np.sqrt((L+M+sigma2/cacb**2)**2 - 4*L*M))
post['ma'] = np.zeros(H)
post['mb'] = np.zeros(H)
post['sa2'] = cacb * (1-L*zeta/sigma2) * np.ones(H)
post['sb2'] = cacb * (1-M*zeta/sigma2) * np.ones(H)
delta = cacb/sigma2 * (s[:pos]-d- L*sigma2/s[:pos])
post['ma'][:pos] = np.sqrt(np.multiply(d, delta))
post['mb'][:pos] = np.sqrt(np.divide(d, delta))
post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])
post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
post['sigma2'] = sigma2
post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) + (residual+np.sum(s**2))/sigma2 - (L+M)*H
+ np.sum(M*np.log(cacb/post['sa2']) + L*np.log(cacb/post['sb2'])
+ (post['ma']**2 + M*post['sa2'])/cacb + (post['mb']**2 + L*post['sb2'])/cacb
+ (-2 * np.multiply(np.multiply(post['ma'], post['mb']), s)
+ np.multiply(post['ma']**2 + M*post['sa2'],post['mb']**2 + L*post['sb2']))/sigma2))
return U[:,:pos], np.diag(d), V[:,:pos], post
def VBsigma2(sigma2,L,M,cacb,s,residual):
H = len(s)
thresh_term = (L+M + sigma2/cacb**2)/2
threshold = np.sqrt( sigma2 * (thresh_term + np.sqrt(thresh_term**2 - L*M) ))
pos = np.sum(s>threshold)
d = np.multiply(s[:pos],
1 - np.multiply(sigma2/(2*s[:pos]**2),
L+M+np.sqrt( (M-L)**2 + 4*s[:pos]**2/cacb**2 )))
zeta = sigma2/(2*L*M) * (L+M+sigma2/cacb**2 - np.sqrt((L+M+sigma2/cacb**2)**2 - 4*L*M))
post_ma = np.zeros(H)
post_mb = np.zeros(H)
post_sa2 = cacb * (1-L*zeta/sigma2) * np.ones(H)
post_sb2 = cacb * (1-M*zeta/sigma2) * np.ones(H)
delta = cacb/sigma2 * (s[:pos]-d- L*sigma2/s[:pos])
post_ma[:pos] = np.sqrt(np.multiply(d, delta))
post_mb[:pos] = np.sqrt(np.divide(d, delta))
post_sa2[:pos] = np.divide(sigma2*delta, s[:pos])
post_sb2[:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
F = 0.5*(L*M*np.log(2*np.pi*sigma2) + (residual+np.sum(s**2))/sigma2 - (L+M)*H
+ np.sum(M*np.log(cacb/post_sa2) + L*np.log(cacb/post_sb2)
+ (post_ma**2 + M*post_sa2)/cacb + (post_mb**2 + L*post_sb2)/cacb
+ (-2 * np.multiply(np.multiply(post_ma, post_mb), s)
+ np.multiply(post_ma**2 + M*post_sa2,post_mb**2 + L*post_sb2))/sigma2))
return F
def EVBMF(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free energy.
If H is unspecified, it is set to the smallest of the sides of the input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012.
"""
L,M = Y.shape #has to be L<=M
print(L)
print(M)
if H is None:
H = L
alpha = L/M
tauubar = 2.5129*np.sqrt(alpha)
#SVD of the input matrix, max rank of H
U,s,V = np.linalg.svd(Y)
U = U[:,:H]
s = s[:H]
V = V[:H].T
#Calculate residual
residual = 0.
if H<L:
residual = np.sum(np.sum(Y**2)-np.sum(s**2))
#Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
xubar = (1+tauubar)*(1+alpha/tauubar)
eH_ub = int(np.min([np.ceil(L/(1+alpha))-1, H]))-1
upper_bound = (np.sum(s**2)+residual)/(L*M)
lower_bound = np.max([s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M])
scale = 1. #/lower_bound
s = s*np.sqrt(scale)
residual = residual*scale
lower_bound = lower_bound*scale
upper_bound = upper_bound*scale
print(lower_bound)
print(upper_bound)
sigma2_opt = minimize_scalar(EVBsigma2, args=(L,M,s,residual,xubar), bounds=[lower_bound, upper_bound], method='Bounded')
sigma2 = sigma2_opt.x
#Threshold gamma term
threshold = np.sqrt(M*sigma2*(1+tauubar)*(1+alpha/tauubar))
pos = np.sum(s>threshold)
#Formula (15) from [2]
d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt((1-np.divide((L+M)*sigma2, s[:pos]**2))**2 -4*L*M*sigma2**2/s[:pos]**4) )
#Computation of the posterior
post = {}
post['ma'] = np.zeros(H)
post['mb'] = np.zeros(H)
post['sa2'] = np.zeros(H)
post['sb2'] = np.zeros(H)
post['cacb'] = np.zeros(H)
tau = np.multiply(d, s[:pos])/(M*sigma2)
delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau)
post['ma'][:pos] = np.sqrt(np.multiply(d, delta))
post['mb'][:pos] = np.sqrt(np.divide(d, delta))
post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])
post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M))
post['sigma2'] = sigma2
post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) + (residual+np.sum(s**2))/sigma2
+ np.sum(M*np.log(tau+1) + L*np.log(tau/alpha +1) - M*tau))
return U[:,:pos], np.diag(d), V[:,:pos], post
def EVBsigma2(sigma2,L,M,s,residual,xubar):
H = len(s)
alpha = L/M
x = s**2/(M*sigma2)
z1 = x[x>xubar]
z2 = x[x<=xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum( np.log( np.divide(tau_z1+1, z1)))
term4 = alpha*np.sum(np.log(tau_z1/alpha+1))
obj = term1+term2+term3+term4+ residual/(M*sigma2) + (L-H)*np.log(sigma2)
return obj
def phi0(x):
return x-np.log(x)
def phi1(x, alpha):
return np.log(tau(x,alpha)+1) + alpha*np.log(tau(x,alpha)/alpha + 1) - tau(x,alpha)
def tau(x, alpha):
return 0.5 * (x-(1+alpha) + np.sqrt((x-(1+alpha))**2 - 4*alpha))
|
[
"kangsinhan@nate.com"
] |
kangsinhan@nate.com
|
772b99702c24090a612c5cb84aebeeeadbf61b34
|
cf7883026dd1520ef5b01d377c50d38885fa3d4c
|
/JUNECO/SNACKUP.py
|
c27325d7c064ea46d9d2349d648414bd70114b71
|
[] |
no_license
|
krishnadey30/Competitive_Coding
|
7e885bb9bc2ee3e06d7a0b722c48dde5131ad371
|
7e4a4ce145f4975ffa51ef0727b9d78f132d03a4
|
refs/heads/master
| 2020-03-08T03:13:43.429082
| 2018-04-04T18:44:54
| 2018-04-04T18:44:54
| 127,884,428
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
test=int(input())
for t in range(0,test):
n=int(input())
l=[]
for i in range(1,n):
l.append((i,i+1))
l.append((n,1))
print(n)
for i in range(0,n):
print(n)
for j in range(0,n):
print(j+1,end=" ")
for x in l[j]:
print(x,end=" ")
print()
l.append(l[0])
l.remove(l[0])
|
[
"krishnakumar.d16@iiits.in"
] |
krishnakumar.d16@iiits.in
|
da5d1b860142f323050bc7b2cb8a083b79ef2e94
|
9887b201a356d9f2d56bd95e4fb59533b7fc8529
|
/actions/procesamiento/factor_strategies.py
|
ab439ea23e01c8df6c40bdada883a341612e861b
|
[] |
no_license
|
matiasguerrero/ProcessActionBot
|
ea6ae02b7aadaf561d33d328ea7c37a9df871e8b
|
a0261b408d57d21c742266e1c15fa99ca6c27f28
|
refs/heads/master
| 2023-06-03T05:50:42.393538
| 2021-06-25T21:16:29
| 2021-06-25T21:16:29
| 371,838,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,052
|
py
|
import abc
import datetime
from typing import Dict, Any
from actions.procesamiento.tarea import Tarea
from actions.procesamiento.fase import Fase
from actions.event_handling import EventPublisher
class CalculationStrategy(metaclass=abc.ABCMeta):
"""Interfaz que define el comportamiento basico requerido por una estrategia
usada en el calculo de metricas.
Autor: Matias G.
"""
@abc.abstractmethod
def process_event(self, event: Dict) -> None:
"""Procesa el evento.
Autor: Bruno.
:param event: evento a procesar.
:return: None.
"""
raise NotImplementedError
@abc.abstractmethod
def calculate_value(self) -> Any:
"""Calcula el valor de la estrategia.
Autor: Bruno.
:return: Any.
"""
raise NotImplementedError
@abc.abstractmethod
def get_name(self) -> str:
"""Calcula el valor de la estrategia.
Autor: Matias G.
:return: Any.
"""
raise NotImplementedError
class MeetingParticipations(CalculationStrategy):
"""Cuenta las participaciones de todos los TeamMembers/AgileBots que forman
parte de un proyecto en AgileTalk.
Autor: Matias G.
"""
def __init__(self): # Componente.getReuniones()
"""Constructor.
Args:
"""
self._n_occurrences = 0
self._d_ocurrences = {}
self.result=""
def __str__(self) -> str:
return "(MeetingParticipations: {})".format(self._n_occurrences)
def get_name(self) -> str:
return "MeetingParticipations"
def process_event(self, event: Dict) -> None:
"""A partir de un evento cuenta las participaciones generales por
persona.
Autor: Matias G.
:param event: evento a procesar. El formato del evento es:
{'Participations': [{'1': {'cant_particip': '3'}},
{'2': {'cant_particip': '2'}}, {'3': {'cant_particip': '3'}}]}
:return: None.
"""
self.result=""
participations = event["Participations"]
for x in participations:
for key, value in x.items():
self.result=self.result+"El miembro del equipo "+str(key)+" participó "+str(value["cant_particip"])+" veces en la reunión. "
def calculate_value(self) -> str:
"""Devuelve todas las participaciones en reuniones dentro de un
proyecto.
Autor: Matias G.
:return: Str.
"""
return self.result
class MeetAsistance(CalculationStrategy):
"""Calcula el porcentaje de asistencia a una reunion.
Autor: Matias G.
"""
def __init__(self): # Componente.getReuniones()
self._n_asistance = 0
def __str__(self) -> str:
return "(MeetAsistance: {})".format(self._n_asistance)
def get_name(self) -> str:
return "MeetAsistance"
def process_event(self, event: Dict) -> None:
"""Establece el porcentaje de TeamMembers/AgileBots que participaron en
la reunion.
Autor: Matias G.
:param event: evento a procesar. El formato del evento es:
{"event_id": "", "time": "", "id_reunion": "",
"participaciones": {"bruno": 5, "matias": 7}}
:return: None.
"""
# TODO Se requiere que todos los TeamMembers/AgileBots que tengan que
# participar en la reunion aparezcan en event["participaciones"],
# aunque sea con un valor de cero participaciones.
reunion = event["participaciones"]
total_asistance = 0
for meet_user, ocurrence in reunion.items():
if ocurrence > 0:
total_asistance += 1
cant = len(reunion)
if cant > 0:
self._n_asistance = total_asistance / cant
def calculate_value(self) -> float:
"""Devuelve el porcentaje de asistencia a la reunion.
Autor: Matias G.
:return: Dict.
"""
return self._n_asistance
class EstimatedDeadline(CalculationStrategy):
"""Calcula el porcentaje de asistencia a una reunion.
Autor: Matias G.
"""
def __init__(self): # Componente getFase
"""Constructor.
Args:
"""
# La fase self.meet debería ser provista por un componente que brinde
# el artefacto
self.fecha_init= datetime.datetime.utcnow()
self.fecha_fin= datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
self._fase = Fase(1, self.fecha_init.strftime("%Y-%m-%d %H:%M:%S"), self.fecha_fin.strftime("%Y-%m-%d %H:%M:%S"))
self._fase.add_actor("actor1")
self._fase.add_actor("actor1")
self._fase.add_actor("actor1")
self._estimated_time = datetime.date.today()
self._real_time = datetime.date.today()
def __str__(self) -> str:
return " "
def get_name(self) -> str:
return "EstimatedDeadline"
def process_event(self, event: dict) -> None:
"""Compara el plazo de finalización estimado de una fase con su
finalizacion real.
Autor: Matias G.
:param event: evento "FinFase" a procesar.
:return: None.
"""
d={"id":1,"fecha_start":"fecha","fecha_ended":"fecha"}
date_format = "%Y-%m-%d %H:%M:%S"
self._fase.set_id(event["id"])
self._fase.finalizar()
#real_end_date=self._fase.get_fecha_fin()
end_date = datetime.datetime.strptime(
str(self._fase.get_duracion_estimada()), date_format)
start_date = datetime.datetime.strptime(
str(self._fase.get_fecha_inicio()), date_format)
real_end_date=datetime.datetime.strptime(
str(event["fecha_ended"]), date_format)
real_start_date=datetime.datetime.strptime(
str(event["fecha_start"]), date_format)
self._estimated_time = end_date - start_date
self._real_time = real_end_date - real_start_date
def calculate_value(self) -> int:
"""Retorna la cantidad de segundos existentes entre el plazo estimado
y el plazo real de finalizacion.
Si la cantidad es negativa -> realTime < estimatedTime
Si la cantidad es positiva -> realTime > estimatedTime
Autor: Matias G.
:return: int.
"""
self._real_time = self._real_time.total_seconds()
self._estimated_time = self._estimated_time.total_seconds()
difference_sec = self._real_time - self._estimated_time
return difference_sec
class ControlTask(CalculationStrategy):
"""Calcula el porcentaje de asistencia a una reunion.
Autor: Matias G.
"""
def __init__(self): # Componente.getReuniones()
self._n_asistance = 0
self.tareas=[]
self.result={}
self.valor=""
self.horashechas=0
def __str__(self) -> str:
return "(ControlTask: {})".format(self._n_asistance)
def get_name(self) -> str:
return "ControlTask"
def process_event(self, event: dict) -> None:
#event={Tasks: ["task_id": {hours_worked: value, total_hours: value}]
self.valor=""
self.horashechas=0
list_tareas=event["Tareas"]
for x in list_tareas:
for key, value in x.items():
horas=int(value["horas_totales"]) - int(value["horas_trabajadas"])
self.valor=self.valor+"La tarea "+ str(key)+ " necesita "+str(horas)+" hora/s más para ser finalizada. "
self.horashechas=self.horashechas+int(value["horas_trabajadas"])
def calculate_value(self) -> str:
resultado=self.valor+" El miembro del equipo trabajó "+str(self.horashechas)+" horas diarias."
#if self.horashechas < agilebot.get_horasminimas():
# EventPublisher().publish("message",
# { "message": "El miembro del equipo no trabajó las horas minimas diarias",
# "from":"ProcessActionBot",
# "to":"Scrum Master"})
return resultado
|
[
"maguerrero@alumnos.exa.unicen.edu.ar"
] |
maguerrero@alumnos.exa.unicen.edu.ar
|
6b73163a5f9e5940fdf3a55ac661f2276cc2bfe2
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/e521d476743c42ac9b3d37225576e9b9.py
|
0573a84aba567a7d294818af2b4f8c2a561bdbc8
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# -*- coding: utf-8 -*-
'''
Author: Postprandial
Purpose: Version2 of the Bob file:
Bob now answers all questions with 'Sure' and all shouting (caps) with 'Whoa, chill out!'
lowercase questions or questions ending in whitespace are also answered with 'sure'.
Bob also still looks
All statements (upper & lowercase) are answered.
'''
def hey(what):
prompt=what.strip()
answer=''
answerFine=[' \t',""]
if prompt in answerFine:
answer='Fine. Be that way!'
elif prompt.isupper():
answer='Whoa, chill out!'
elif prompt[-1]=='?':
answer='Sure.'
else:
answer='Whatever.'
return answer
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
10cd3ec2bf6aed6db6fa2683eebb85e68ff3df10
|
7b7bfbfebd627a3ccfdd52bb7164fa4f94cda7fc
|
/optic_store/patches/v0_6/set_spec_parts_cl.py
|
da352328e9d34fba734fd5b1b615a11d138a1001
|
[
"MIT"
] |
permissive
|
f-9t9it/optic_store
|
d117b7ef7c4107ec15d8194fc57d66a18aff5945
|
4682ae99cdb2cbfb1ff99196398d7379b4b6c8f1
|
refs/heads/master
| 2022-07-01T10:29:54.783550
| 2022-06-21T14:34:40
| 2022-06-21T14:34:40
| 171,165,708
| 23
| 43
|
NOASSERTION
| 2022-06-21T14:21:16
| 2019-02-17T19:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from optic_store.doc_events.sales_order import get_parts
from optic_store.patches.v0_6.set_spec_parts import _get_docs
def execute():
settings = frappe.get_single("Optical Store Settings")
frames = map(lambda x: x.item_group, settings.frames)
lenses = map(lambda x: x.item_group, settings.lens)
for doctype in ["Sales Order", "Sales Invoice"]:
for doc in _get_docs(doctype):
if doc.orx_type == "Contact Lens":
frame, lens_right, lens_left = get_parts(doc.items)
for item in doc.items:
if not item.os_spec_part:
if not frame and item.item_group in frames:
frappe.db.set_value(
item.doctype, item.name, "os_spec_part", "Frame"
)
frame = item
elif not lens_right and item.item_group in lenses:
frappe.db.set_value(
item.doctype, item.name, "os_spec_part", "Lens Right"
)
lens_right = item
elif not lens_left and item.item_group in lenses:
frappe.db.set_value(
item.doctype, item.name, "os_spec_part", "Lens Left"
)
lens_left = item
|
[
"sun@libermatic.com"
] |
sun@libermatic.com
|
de7d37f5795ee1cb6a9597629efc6cf0a5d219a9
|
587581b377a823a9bbb3c75e88fd31c7cf05fb01
|
/assignment2/ensemble.py
|
9d4c8612dec8ed4c8288c860491d546966b74da9
|
[] |
no_license
|
Miopas/BioNLP
|
48b6a512ba9eb53888c9f7c5e060992dc5c99b9a
|
bf1c7c1d824f381b001d71c158cc11eb8ccf1061
|
refs/heads/master
| 2022-12-28T06:09:17.462986
| 2020-10-12T03:54:39
| 2020-10-12T03:54:39
| 295,817,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
from sklearn import svm
from sklearn.model_selection import StratifiedKFold
from feature_generator import FeatureGenerator, Record
from sklearn.model_selection import KFold
import argparse
from cls_utils import *
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
if __name__ == '__main__':
# Load the data
f_path = 'pdfalls.csv'
data = loadDataAsDataFrame(f_path)
# SPLIT THE DATA (we could use sklearn.model_selection.train_test_split)
training_set_size = int(0.8 * len(data))
training_data = data[:training_set_size]
test_data = data[training_set_size:]
# K-Fold split
kf = KFold(n_splits=5)
kf.get_n_splits(training_data)
scores = []
for train_index, dev_index in kf.split(training_data):
ttp_train_data = get_sub(training_data, train_index)
ttp_dev_data = get_sub(training_data,dev_index)
feature_generator = FeatureGenerator(ttp_train_data)
train_data_vectors, train_classes = feature_generator.transform(ttp_train_data)
dev_data_vectors, dev_classes = feature_generator.transform(ttp_dev_data)
test_data_vectors, test_classes = feature_generator.transform(test_data)
# TRAIN THE MODEL
cls_models = []
#cls_models.append(GaussianNB())
cls_models.append(LinearSVC(random_state=0))
#cls_models.append(RandomForestClassifier(bootstrap=True, max_depth=10, max_features='auto',
# min_samples_leaf=1, min_samples_split=2, n_estimators=10,
# random_state=0, n_jobs=-1))
#cls_models.append(MLPClassifier(activation='tanh', hidden_layer_sizes=(16,)))
#cls_models.append(KNeighborsClassifier(algorithm='ball_tree', n_neighbors=11, weights='uniform'))
#cls_models.append(LogisticRegression(class_weight='balanced', fit_intercept=True,
#solver='liblinear'))
for clf in cls_models:
clf.fit(train_data_vectors, train_classes)
predictions = get_voting(cls_models, dev_data_vectors)
dev_metrics = get_metrics(predictions, dev_classes)
predictions = get_voting(cls_models, test_data_vectors)
test_metrics = get_metrics(predictions, test_classes)
scores.append(test_metrics)
print_metrics(scores)
|
[
"gyt_guoyuting@126.com"
] |
gyt_guoyuting@126.com
|
4672b88873af075d8bf74a280c3e3548c05c72f5
|
3b41d40a0285f3f29ff52f673a37ba6e368eb490
|
/OneArticle/settings.py
|
72d9351e9a96b370d405d2494f603b6b903d5d92
|
[] |
no_license
|
X-Wei/OneArticleCrawler
|
020ce1adb7c8a47b50183c5524a3b06c28b92c48
|
3e69bfdb22d45f07f1543a83bf37864a98c8a7ba
|
refs/heads/master
| 2016-09-15T20:22:32.797774
| 2016-03-29T07:32:38
| 2016-03-29T07:32:38
| 34,137,487
| 0
| 1
| null | 2016-03-29T07:32:39
| 2015-04-17T20:03:49
|
Python
|
UTF-8
|
Python
| false
| false
| 503
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for OneArticle project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'OneArticle'
SPIDER_MODULES = ['OneArticle.spiders']
NEWSPIDER_MODULE = 'OneArticle.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'OneArticle (+http://www.yourdomain.com)'
|
[
"wistar.sjtu@gmail.com"
] |
wistar.sjtu@gmail.com
|
82f9cc6486cffac22234ff1e553f1f5831bfd5c9
|
15d0cf422e01e6e3e2cd1770e42974fecd12fc38
|
/new_JSON.py
|
45898bc3b1d900f787afa6610322133c5575b97b
|
[] |
no_license
|
pluxury8state/new_JSON
|
9f8787204e6d222544b981002a5d75e942c41562
|
0217edd85f97845d403203e07097c1effd9eb19e
|
refs/heads/master
| 2022-09-21T01:49:37.396989
| 2020-05-31T16:09:04
| 2020-05-31T16:09:04
| 268,314,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
import json
def sort_and_top10(ob):
new_mas = sorted(ob, key=lambda mas: mas[1])
a = int(len(new_mas)) - 1
p = 0
print('top 10')
while p != 10:
print(new_mas[a])
a -= 1
p += 1
def counter(ob):
mas4 = []
for ind in ob:
p = 0
for i in mas4:
if ind[0] != i[0]:
continue
else:
p = 1
break
if p != 0:
continue
else:
mas4.append(ind)
sort_and_top10(mas4)
def add(ob):
mas3 = []
for ind in ob:
a = [ind]
a.append(ob.count(ind))
mas3.append(a)
counter(mas3)
def more_then_6(ob):
mas2 = []
for ind in ob:
for temp in ind:
if len(temp) > 6:
mas2.append(temp)
add(mas2)
def import_file(file):
mas = []
a = file['rss']
b = a['channel']
c = b['items']
for descr in c:
mas.append(descr['description'].split(' '))
more_then_6(mas)
#begin
with open('newsafr.json','r',encoding='utf-8') as f:
file = json.load(f)
import_file(file)
|
[
"ssdffgmlg@gmail.com"
] |
ssdffgmlg@gmail.com
|
0eb41e66c1e6eae15f241cdd168bc70a77cbbdda
|
4d1ae7f21c4f179f8a894038a9871417297c4fb8
|
/src/feature_engineering.py
|
16a644969920de3db7ad3cd2a778a128b2b2204d
|
[] |
no_license
|
chuck1l/long_short_term_memory
|
a7ebba0b9d74bbbf2b8566dfdf1c9811f7ba4372
|
b8333b5ecf37fdfe61c0d1413dc7f23e98b7f916
|
refs/heads/main
| 2023-04-24T05:09:37.909859
| 2021-05-13T14:49:23
| 2021-05-13T14:49:23
| 367,079,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yfinance as yf
from datetime import date
from pandas_datareader import data as pdr
class CreateDataFrame(object):
'''
This class is used to create the rolling average on all features and
targets in the input dataframe. This is necessary because stock data
will usually have a upward trend that greatly impacts the error in
a model like LSTM or Random Forest.
Parameters:
The dataframe
The days back for rolling average
Returns:
A new dataframe with rolling averages minus true values, and true values
'''
def __init__(self, ticker, data, days_back):
self.ticker = ticker
self.df = data
self.days_back = days_back
def new_features(self):
self.df.columns = map(str.lower, self.df.columns)
self.df.columns = self.df.columns.str.replace(' ', '_')
self.df['tmr_high'] = self.df['high'].shift(periods=-1)
self.df['tmr_low'] = self.df['low'].shift(periods=-1)
self.df['tmr_high'].fillna(self.df['high'], inplace=True)
self.df['tmr_low'].fillna(self.df['low'], inplace=True)
self.df['avg_price'] = self.df[['high', 'low', 'open', 'close']].sum(axis=1)/4
cols = list(self.df.columns)
# Create all rolling averages for all columns in the dataframe
for col in cols:
self.df['rolling_avg_' + col] = self.df[col].rolling(self.days_back, center=True).mean()
self.df = self.df.dropna(axis=0, how='any')
# Prepare the feature column lists
cols_t = [cols.pop(cols.index('tmr_high')), cols.pop(cols.index('tmr_low'))]
rolling_avg_feature_cols = []
feature_cols = []
# Prepare the target column lists
rolling_avg_target_cols = ['rolling_avg_tmr_high', 'rolling_avg_tmr_low']
target_cols = ['target_tmr_high', 'target_tmr_low']
for col in cols:
rolling_avg_feature_cols.append('rolling_avg_' + col)
feature_cols.append('feature_' + col)
# Create the features (cols) by feature minus associated rolling avg, concat with df
for i in range(len(cols)):
feature_cols[i] = pd.Series(self.df.apply(lambda row: row[rolling_avg_feature_cols[i]] - row[cols[i]], axis=1), name=feature_cols[i])
self.df = pd.concat([self.df, feature_cols[i]], axis=1)
# Create the targets (cols_targets) by target minus associated rolling avg, concat with df
for i in range(len(target_cols)):
target_cols[i] = pd.Series(self.df.apply(lambda row: row[rolling_avg_target_cols[i]] - row[cols_t[i]], axis=1), name=target_cols[i])
self.df = pd.concat([self.df, target_cols[i]], axis=1)
self.df.to_csv(f'../data/{self.ticker}_prepared_dataframe.csv')
return self.df
if __name__ == '__main__':
# Import the stock data
start_date = '2000-01-01'
end_date = date.today()
ticker = 'SPY'
data = pdr.get_data_yahoo(ticker, start=start_date, end=end_date)
CreateDataFrame(ticker, data, 5).new_features()
|
[
"chucks_apple@Chucks-MacBook-Pro.local"
] |
chucks_apple@Chucks-MacBook-Pro.local
|
ed80eecc6d927982d83f6e023d454b76afd420e3
|
aa65c8e24e8f3b0d16173ab72feb392855a481ac
|
/workspace/.c9/metadata/workspace/test/python week 8 lecture test/argv0.py
|
2f33c17f283644c381923e6f29ee230a7be41930
|
[] |
no_license
|
olafironfoot/CS50_introduction-to-computer-science
|
d7688a29378fd2beeb71b73dab1433b99d670789
|
2dd45f4abfec2c9ae4c65f0045217dc9cbc960a7
|
refs/heads/master
| 2021-05-20T00:26:20.231700
| 2020-04-01T13:25:44
| 2020-04-01T13:25:44
| 252,106,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,970
|
py
|
{"filter":false,"title":"argv0.py","tooltip":"/test/python week 8 lecture test/argv0.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":2,"column":14},"end":{"row":2,"column":15},"action":"insert","lines":["v"],"id":55}],[{"start":{"row":2,"column":16},"end":{"row":2,"column":17},"action":"insert","lines":[" "],"id":56}],[{"start":{"row":2,"column":17},"end":{"row":2,"column":18},"action":"insert","lines":["="],"id":57}],[{"start":{"row":2,"column":18},"end":{"row":2,"column":19},"action":"insert","lines":["="],"id":58}],[{"start":{"row":2,"column":19},"end":{"row":2,"column":20},"action":"insert","lines":[" "],"id":59}],[{"start":{"row":2,"column":20},"end":{"row":2,"column":21},"action":"insert","lines":["2"],"id":60}],[{"start":{"row":2,"column":21},"end":{"row":3,"column":0},"action":"insert","lines":["",""],"id":61}],[{"start":{"row":3,"column":0},"end":{"row":3,"column":4},"action":"insert","lines":[" "],"id":62}],[{"start":{"row":3,"column":4},"end":{"row":3,"column":5},"action":"insert","lines":["p"],"id":63}],[{"start":{"row":3,"column":5},"end":{"row":3,"column":6},"action":"insert","lines":["r"],"id":64}],[{"start":{"row":3,"column":6},"end":{"row":3,"column":7},"action":"insert","lines":["i"],"id":65}],[{"start":{"row":3,"column":7},"end":{"row":3,"column":8},"action":"insert","lines":["n"],"id":66}],[{"start":{"row":3,"column":8},"end":{"row":3,"column":9},"action":"insert","lines":["t"],"id":67}],[{"start":{"row":3,"column":9},"end":{"row":3,"column":11},"action":"insert","lines":["()"],"id":68}],[{"start":{"row":3,"column":10},"end":{"row":3,"column":12},"action":"insert","lines":["\"\""],"id":69}],[{"start":{"row":3,"column":11},"end":{"row":3,"column":12},"action":"insert","lines":["h"],"id":70}],[{"start":{"row":3,"column":12},"end":{"row":3,"column":13},"action":"insert","lines":["e"],"id":71}],[{"start":{"row":3,"column":13},"end":{"row":3,"column":14},"action":"insert","lines":["l"],"id":72}],[{"start":{"row":3,"column":14},"end":{"row":3,"column":15},"action":"insert","lines":["l"],"id":73}],[{"start":{"row":3,"column":15},"end":{"row":3,"column":16},"action":"insert","lines":["o"],"id":74}],[{"start":{"row":3,"column":16},"end":{"row":3,"column":17},"action":"insert","lines":[" "],"id":75}],[{"start":{"row":3,"column":17},"end":{"row":3,"column":18},"action":"insert","lines":["{"],"id":76}],[{"start":{"row":3,"column":18},"end":{"row":3,"column":19},"action":"insert","lines":["}"],"id":77}],[{"start":{"row":3,"column":20},"end":{"row":3,"column":21},"action":"insert","lines":[","],"id":78}],[{"start":{"row":3,"column":20},"end":{"row":3,"column":21},"action":"remove","lines":[","],"id":79}],[{"start":{"row":3,"column":20},"end":{"row":3,"column":21},"action":"insert","lines":["."],"id":80}],[{"start":{"row":3,"column":21},"end":{"row":3,"column":22},"action":"insert","lines":[" "],"id":81}],[{"start":{"row":3,"column":21},"end":{"row":3,"column":22},"action":"remove","lines":[" "],"id":82}],[{"start":{"row":3,"column":21},"end":{"row":3,"column":22},"action":"insert","lines":["f"],"id":83}],[{"start":{"row":3,"column":22},"end":{"row":3,"column":23},"action":"insert","lines":["o"],"id":84}],[{"start":{"row":3,"column":23},"end":{"row":3,"column":24},"action":"insert","lines":["r"],"id":85}],[{"start":{"row":3,"column":24},"end":{"row":3,"column":25},"action":"insert","lines":["m"],"id":86}],[{"start":{"row":3,"column":25},"end":{"row":3,"column":26},"action":"insert","lines":["a"],"id":87}],[{"start":{"row":3,"column":26},"end":{"row":3,"column":27},"action":"insert","lines":["t"],"id":88}],[{"start":{"row":3,"column":27},"end":{"row":3,"column":29},"action":"insert","lines":["()"],"id":89}],[{"start":{"row":3,"column":28},"end":{"row":3,"column":29},"action":"insert","lines":["a"],"id":90}],[{"start":{"row":3,"column":29},"end":{"row":3,"column":30},"action":"insert","lines":["r"],"id":91}],[{"start":{"row":3,"column":30},"end":{"row":3,"column":31},"action":"insert","lines":["v"],"id":92}],[{"start":{"row":3,"column":30},"end":{"row":3,"column":31},"action":"remove","lines":["v"],"id":93}],[{"start":{"row":3,"column":30},"end":{"row":3,"column":31},"action":"insert","lines":["g"],"id":94}],[{"start":{"row":3,"column":31},"end":{"row":3,"column":32},"action":"insert","lines":["v"],"id":95}],[{"start":{"row":3,"column":32},"end":{"row":3,"column":33},"action":"insert","lines":["1"],"id":96}],[{"start":{"row":3,"column":32},"end":{"row":3,"column":33},"action":"remove","lines":["1"],"id":97}],[{"start":{"row":3,"column":31},"end":{"row":3,"column":32},"action":"remove","lines":["v"],"id":98}],[{"start":{"row":3,"column":30},"end":{"row":3,"column":31},"action":"remove","lines":["g"],"id":99}],[{"start":{"row":3,"column":29},"end":{"row":3,"column":30},"action":"remove","lines":["r"],"id":100}],[{"start":{"row":3,"column":28},"end":{"row":3,"column":29},"action":"remove","lines":["a"],"id":101}],[{"start":{"row":3,"column":28},"end":{"row":3,"column":29},"action":"insert","lines":["s"],"id":102}],[{"start":{"row":3,"column":29},"end":{"row":3,"column":30},"action":"insert","lines":["y"],"id":103}],[{"start":{"row":3,"column":30},"end":{"row":3,"column":31},"action":"insert","lines":["s"],"id":104}],[{"start":{"row":3,"column":31},"end":{"row":3,"column":32},"action":"insert","lines":["."],"id":105}],[{"start":{"row":3,"column":32},"end":{"row":3,"column":33},"action":"insert","lines":["a"],"id":106}],[{"start":{"row":3,"column":33},"end":{"row":3,"column":34},"action":"insert","lines":["r"],"id":107}],[{"start":{"row":3,"column":34},"end":{"row":3,"column":35},"action":"insert","lines":["g"],"id":108}],[{"start":{"row":3,"column":35},"end":{"row":3,"column":36},"action":"insert","lines":["v"],"id":109}],[{"start":{"row":3,"column":36},"end":{"row":3,"column":38},"action":"insert","lines":["[]"],"id":110}],[{"start":{"row":3,"column":37},"end":{"row":3,"column":38},"action":"insert","lines":["1"],"id":111}],[{"start":{"row":3,"column":41},"end":{"row":4,"column":0},"action":"insert","lines":["",""],"id":112},{"start":{"row":4,"column":0},"end":{"row":4,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":4},"action":"remove","lines":[" "],"id":113}],[{"start":{"row":4,"column":0},"end":{"row":5,"column":0},"action":"insert","lines":["",""],"id":114}],[{"start":{"row":5,"column":0},"end":{"row":5,"column":1},"action":"insert","lines":["e"],"id":115}],[{"start":{"row":5,"column":1},"end":{"row":5,"column":2},"action":"insert","lines":["l"],"id":116}],[{"start":{"row":5,"column":2},"end":{"row":5,"column":3},"action":"insert","lines":["s"],"id":117}],[{"start":{"row":5,"column":3},"end":{"row":5,"column":4},"action":"insert","lines":["e"],"id":118}],[{"start":{"row":5,"column":4},"end":{"row":5,"column":5},"action":"insert","lines":[" "],"id":119}],[{"start":{"row":5,"column":5},"end":{"row":5,"column":6},"action":"insert","lines":["p"],"id":120}],[{"start":{"row":5,"column":6},"end":{"row":5,"column":7},"action":"insert","lines":["r"],"id":121}],[{"start":{"row":5,"column":7},"end":{"row":5,"column":8},"action":"insert","lines":["i"],"id":122}],[{"start":{"row":5,"column":8},"end":{"row":5,"column":9},"action":"insert","lines":["n"],"id":123}],[{"start":{"row":5,"column":9},"end":{"row":5,"column":10},"action":"insert","lines":["t"],"id":124}],[{"start":{"row":5,"column":10},"end":{"row":5,"column":12},"action":"insert","lines":["()"],"id":125}],[{"start":{"row":5,"column":11},"end":{"row":5,"column":12},"action":"insert","lines":["h"],"id":126}],[{"start":{"row":5,"column":12},"end":{"row":5,"column":13},"action":"insert","lines":["e"],"id":127}],[{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"insert","lines":["l"],"id":128}],[{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"remove","lines":["l"],"id":129}],[{"start":{"row":5,"column":12},"end":{"row":5,"column":13},"action":"remove","lines":["e"],"id":130}],[{"start":{"row":5,"column":11},"end":{"row":5,"column":12},"action":"remove","lines":["h"],"id":131}],[{"start":{"row":5,"column":11},"end":{"row":5,"column":13},"action":"insert","lines":["\"\""],"id":132}],[{"start":{"row":5,"column":12},"end":{"row":5,"column":13},"action":"insert","lines":["h"],"id":133}],[{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"insert","lines":["e"],"id":134}],[{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"insert","lines":["e"],"id":135}],[{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"insert","lines":["l"],"id":136}],[{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"insert","lines":["l"],"id":137}],[{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"remove","lines":["l"],"id":138}],[{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"remove","lines":["l"],"id":139}],[{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"remove","lines":["e"],"id":140}],[{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"insert","lines":["e"],"id":141}],[{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"insert","lines":["l"],"id":142}],[{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"insert","lines":["l"],"id":143}],[{"start":{"row":5,"column":17},"end":{"row":5,"column":18},"action":"insert","lines":["o"],"id":144}],[{"start":{"row":5,"column":18},"end":{"row":5,"column":19},"action":"insert","lines":["o"],"id":145}],[{"start":{"row":5,"column":19},"end":{"row":5,"column":20},"action":"insert","lines":[" "],"id":146}],[{"start":{"row":5,"column":20},"end":{"row":5,"column":21},"action":"insert","lines":["w"],"id":147}],[{"start":{"row":5,"column":21},"end":{"row":5,"column":22},"action":"insert","lines":["o"],"id":148}],[{"start":{"row":5,"column":22},"end":{"row":5,"column":23},"action":"insert","lines":["r"],"id":149}],[{"start":{"row":5,"column":23},"end":{"row":5,"column":24},"action":"insert","lines":["l"],"id":150}],[{"start":{"row":5,"column":24},"end":{"row":5,"column":25},"action":"insert","lines":["d"],"id":151}],[{"start":{"row":2,"column":21},"end":{"row":2,"column":22},"action":"insert","lines":[":"],"id":152}],[{"start":{"row":5,"column":4},"end":{"row":5,"column":5},"action":"remove","lines":[" "],"id":153},{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "],"id":154}],[{"start":{"row":5,"column":4},"end":{"row":5,"column":5},"action":"insert","lines":[":"],"id":155}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":6,"column":26},"end":{"row":6,"column":26},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1488647746714,"hash":"7affa0108acd43ca0652300a45ff7f97489caeab"}
|
[
"Erik.gwl@gmail.com"
] |
Erik.gwl@gmail.com
|
bc127285406c1c1cc417c1d35248d28a447b1270
|
d935d3d4285aef15f17c0ec77e62689ee78a38f3
|
/script/pylib/analysis/tfidf.py
|
4ccf8f6f5cae0acd01d3dddcb847ae1cb2bfaf95
|
[] |
no_license
|
18dubu/ChunhuaLab_2016BBW
|
43d279ee74cfcac60e0e63e8da6adbc8cbdc23ac
|
c368c255b46908d7110fdf0beac01e126089ae9c
|
refs/heads/master
| 2016-09-01T16:20:29.003305
| 2016-01-29T19:28:40
| 2016-01-29T19:28:40
| 50,610,583
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,228
|
py
|
__author__ = 'mahandong'
import pickle
from pylib.onlineresource.ctgov import *
from pylib.onlineresource.sales import *
from pylib.onlineresource.pdr import *
import itertools
import signal
import string
import collections
from pylib.db.findTrials import *
from pylib.db.writeTrainingDB import *
from pylib.util.file import *
from pylib.util.stat import *
from pylib.analysis.preprocessing import *
from pylib.util.web import *
import MySQLdb as mdb
from stat_parser import Parser
import os
import sys
import math
import csv
import urllib2
from lxml import etree
import urllib
from bs4 import BeautifulSoup
import time
import nltk
from pylib.onlineresource.extractXML import *
import numpy as np
from nltk.corpus import stopwords
#return format is a list, with drug names as keys and number of occurrence in all the years as values
#ex: {'diovan': 8, 'epipen': 2, 'biaxin xl': 4, 'cellcept': 8, 'nuvaring': 5...}
def drugList_occursAtLeastOnceInAllYears():
path = '../result/topSales/'
dirList=os.listdir(path)
if len(dirList)<2:
print 'Warning! Too little data in directory: ', path
continueOrNot()
yearInRecord = 0
drugList = {}
for file in dirList:
if os.path.isfile(path+file) and not file.startswith('.'):
year = file.split('_')[1]
yearInRecord += 1
content = ''
try:
content = open(path+file,'r').read().rstrip().split('\n')
except Exception:
print 'error 37'
print Exception
for line in content:
drugName = line.split('\t')[1].lower()
if drugName in drugList.keys():
drugList[drugName].append(int(year))
else:
drugList[drugName] = [int(year)]
for drug in drugList.keys():
drugList[drug] = str(sorted(drugList[drug]))
print 'The number of years with SALES record is: ', yearInRecord, "years"
return drugList
#input format is drug array
#output format is 2 drug lists (bbw, robust) with BBW info as values
def drugList_findBBWInfo(drugList, detailInfo = True):
path = '../result/PDR/'
targetFile = 'drugLabelContent'
content = ''
drugList_withBBWInfo = {}
drugList_robust = {}
###
manuallyAssertEqualDic={
'taclonex-topical-suspension':'taclonex',
'exelon-patch':'exelon',
'zomig-zomig-zmt':'zomig',
'clobex-spray':'clobex',
'prempro-premphase':'prempro',
'flovent-hfa':'flovent',
'tobradex-st':'tobradex',
'differin-lotion':'differin',
'norvir-oral-solution-and-tablets':'norvir',
'prilosec-otc':'prilosec',
'nexium-iv':'nexium',
'xalatan-ophthalmic-solution-single-bottle':'xalatan',
'zithromax-for-injection':'zithromax',
'depakote-tablets':'depakote',
'dovonex-scalp-solution':'dovonex',
'sporanox-oral-solution':'sporanox',
'zofran-odt-orally-disintegrating-tablets-oral-solution-and-tablets':'zofran',
'androgel-162':'androgel',
'bactroban-ointment':'bactroban',
'boniva-tablets':'boniva',
'xopenex-inhalation-solution-concentrate':'xopenex',
'lidoderm-patch':'lidoderm',
'ddavp-tablets':'ddavp',
'protonix-iv':'protonix',
'aviane-28':'aviane',
'asacol-hd':'asacol',
'keppra-xr':'keppra',
'premarin-tablets':'premarin tabs',
'fentanyl-citrate':'fentanyl oral citrate',
'duac':'duac care system',
'humulin-70-30':'humulin 70/30',
'humalog':'humalog kwikpen',
'ortho-tri-cyclen-ortho-cyclen':'ortho tri-cyclen lo',
'ortho-evra':'ortho evra',
'humalog-mix75-25':'humalog mix 75/25 pen',
'vancocin-hydrochloride':'vancocin hcl',
'effexor-xr':'effexor xr',
'inderal-la':'inderal la',
'advair-diskus':'advair diskus',
'allegra-d-12-hour':'allegra-d 12 hour',
'budeprion-xl':'budeprion xl',
'premarin-vaginal-cream':'premarin vaginal',
'invega-sustenna':'invega sustenna',
'augmentin-xr':'augmentin xr',
'actoplus-met':'actoplus met',
'miacalcin-nasal-spray':'miacalcin nasal',
'ultram-er':'ultram er',
'diovan-hct':'diovan hct',
'ciprodex':'ciprodex otic',
'adderall-xr':'adderall xr',
'rhinocort-aqua':'rhinocort aqua',
'lantus':'lantus solostar',
'seroquel-xr':'seroquel xr',
'percocet':'percocet-10',
'glucotrol-xl':'glucotrol xl',
'proventil-hfa':'proventil hfa',
'pulmicort-respules':'pulmicort respules',
'combivent-respimat':'combivent respimat',
'lamisil-tablets': 'lamisil oral',
'wellbutrin-sr':'wellbutrin sr',
'fosamax-plus-d':'fosamax plus d',
'coumadin':'coumadin tabs',
'allegra-d-24-hour':'allegra-d 24 hour',
'vivelle-dot':'vivelle dot',
'micardis-hct':'micardis hct',
'prevnar-13':'prevnar 13',
'proair-hfa':'proair hfa',
'xopenex-hfa':'xopenex hfa',
'maxalt':'maxalt mlt',
'travatan-z':'travatan z',
'serevent-diskus':'serevent diskus',
'nasacort-allergy-24hr':'nasacort aq',
'imitrex-injection': 'imitrex inj',
'entocort-ec':'entocort ec',
'alphagan-p':'alphagan p',
'childrens-zyrtec-syrup':'zyrtec syrup',
'ketek':'ketek pack',
'yasmin':'yasmin 28',
'dexilant':'dexilant/kapidex',
'geodon':'geodon oral',
'glucophage-glucophage-xr':'glucophage xr',
'detrol-la':'detrol la',
'paxil-cr':'paxil cr',
"exelon-patch":'exelon patch',
'budeprion-sr':'budeprion sr',
'risperdal-consta':'risperdal consta',
'imitrex-tablets':'imitrex oral',
'novolog-mix-70-30':'novolog mix 70/30',
}
###
try:
content = open(path+targetFile,'r').read().rstrip().split('\n')
except Exception:
print 'error 61'
print Exception
if len(content)>1:
totalUnmatchedDrug = []
totalUnsureDrug = []
totalMatchedDrug=[]
unsurePair = {}
for currentDrug in drugList:
findFlag = 0
unsureFlag = 0
for line in content:
line = line.split('\t')
drugName = line[0].lower()
BBW_Flag = line[2]
text = line[3]
if drugName == currentDrug or drugName == currentDrug.replace(' ', '-'):
findFlag = 1
elif drugName.find(currentDrug) >= 0:
unsureFlag = 1
unsurePair[currentDrug] = ':'.join(["\'"+drugName+"\'", "\'"+currentDrug+"\',"])
totalUnsureDrug.append(currentDrug)
if findFlag == 1:
if currentDrug in drugList_withBBWInfo.keys():
print 'Warning135: find duplicate drug entries in PDR database for drug:', currentDrug
continueOrNot()
else:
totalMatchedDrug.append(currentDrug)
if int(BBW_Flag) == 1:
drugList_withBBWInfo[currentDrug] = text
if int(BBW_Flag) == 0:
drugList_robust[currentDrug] = text
break
if 1:
if drugName in manuallyAssertEqualDic.keys() and currentDrug == manuallyAssertEqualDic[drugName]:
unsureFlag = 0
findFlag = 1
if currentDrug in drugList_withBBWInfo.keys():
print 'Warning148: find duplicate drug entries in PDR database for drug:', currentDrug
continueOrNot()
else:
totalMatchedDrug.append(currentDrug)
if int(BBW_Flag) == 1:
drugList_withBBWInfo[currentDrug] = text
if int(BBW_Flag) == 0:
drugList_robust[currentDrug] = text
break
if findFlag == 0 and unsureFlag == 0:
totalUnmatchedDrug.append(currentDrug)
#print "can not match drug: ", currentDrug, 'in PDR database'
totalUnsureDrug = set(totalUnsureDrug)
totalUnmatchedDrug = set(totalUnmatchedDrug)
totalMatchedDrug = set(totalMatchedDrug)
print 'Out of ', len(drugList), 'drugs, ',len(totalMatchedDrug), ' matched (',len(totalUnsureDrug)-len(totalUnsureDrug-totalMatchedDrug), 'find inexact matches and are manually asserted true), ', len(totalUnmatchedDrug), ' can not be found any matches. ',len(totalUnsureDrug-totalMatchedDrug),' are still unsure/rejected'
print 'Out of ', len(totalMatchedDrug), ' matched drugs, ', len(set(drugList_withBBWInfo.keys())), 'are BBW drugs; ', len(set(drugList_robust.keys())), ' are robust drugs'
if detailInfo:
print "Unmatched Drugs: ", totalUnmatchedDrug
print "Matched Drugs: ", totalMatchedDrug
print "#####"
print "There are ",len(totalUnsureDrug-totalMatchedDrug),"unsure drugs that is not covered: ", list(totalUnsureDrug-totalMatchedDrug)
for i in list(totalUnsureDrug-totalMatchedDrug):
print unsurePair[i]
print "Add the pairs that you think refers to the same drug to the manuallyAssertEqualDic"
print "#####"
return drugList_withBBWInfo, drugList_robust
else:
print 'Warning: no content in file', targetFile
continueOrNot()
#period 0:pre; 1:post
#return format list:drug as keys and ctlist as values
def getCTListWithPeriodFromDrugList_local(drugList, period=0):
ctList = {}
for drug in drugList:
try:
ct = findCTListByDrug_local(fh_ctgov_drugTrialList_tab,drug, period)
if len(ct) > 0:
ctList[drug] = ct
else:
ctList[drug] = ''
except Exception:
print 'Warning 118: '
return ctList
#input format is two drug-trialList list like this: {'arimidex': '', 'zyrtec-d': '', 'singulair': '', 'pentasa': ['NCT00545740', 'NCT00751699'],...}
#return two same format lists with eligible entries
def selectEligibleDrugs(ctList_pre, ctList_post, lowerLimitPerPeriod):
a_pre, tmp1 = table(ctList_pre)
a_post, tmp2 = table(ctList_post)
eliDrugList_pre = {}
eliDrugList_post = {}
drugSet = set(a_pre.keys())
drugSet.union(set(a_post.keys()))
discardDrug = []
for drug in drugSet:
if a_pre[drug] > lowerLimitPerPeriod and a_post > lowerLimitPerPeriod:
if drug in eliDrugList_pre.keys() or drug in eliDrugList_post.keys():
print 'warning: possible wrong input list, need further analysis codes'
sys.exit()
eliDrugList_post[drug] = ctList_post[drug]
eliDrugList_pre[drug] = ctList_pre[drug]
else:
discardDrug.append(drug)
print 'there are ', str(len(drugSet)), ' drugs in input lists, and ', str(len(discardDrug)),' are discarded! ', str(len(drugSet)-len(discardDrug)), ' remaining!'
return eliDrugList_pre, eliDrugList_post
def saveSelectedList(target, outDir, varName):
mkdir(outDir)
fh_out = open(str(outDir)+varName, 'w')
if isinstance(target,dict):
for key in target.keys():
if isinstance(target[key],tuple):
target[key] = list(target[key])
if isinstance(target[key],list):
for ele in target[key]:
fh_out.write(key+'\t'+ele+'\n')
if isinstance(target[key],dict):
for ele in target[key].keys():
fh_out.write(key+'\t'+ele+target[key][ele]+'\n')
if isinstance(target[key],str):
fh_out.write(key+'\t'+target[key]+'\n')
print 'variable List: '+varName+ ' successfully saved!'
fh_out.close()
def extractComponentFromXML(ctList):
CTXMLDict = retrieveCTXMLFromCTlist(ctList)
print "##The length of the input queries is: ", str(len(ctList))
print '##the length of the retrieved CT number(unique) is: ', str(len(CTXMLDict.keys()))
CTCompDict = {}
count = 0
for key in CTXMLDict.keys():
count+=1
#print 'processing the ', count,' trials: ', key
try:
(id, brief_title, official_title, conditions, agency, agency_class, source, authority, brief_summary, overall_status, start_date, gender, minimum_age, maximum_age, study_pop, criteria, enrollment, phase, study_type, location, intervention_type, intervention_name, enrollment_type) = extract_component(CTXMLDict[key])
except Exception:
print Exception
print 'skip ', key
CTCompDict[key] = ''
continue
if not criteria.startswith('Please contact site') and criteria.strip() !='' and len(enrollment)>0 :#refinement!
CTCompDict[key] = (id, brief_title, official_title, conditions, agency, agency_class, source, authority, brief_summary, overall_status, start_date, gender, minimum_age, maximum_age, study_pop, criteria, enrollment, phase, study_type, location, intervention_type, intervention_name, enrollment_type)
else:
CTCompDict[key] = ''
return CTCompDict
#for timeout use
class Timeout():
"""Timeout class using ALARM signal"""
class Timeout(Exception): pass
def __init__(self, sec):
self.sec = sec
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, *args):
signal.alarm(0) # disable alarm
def raise_timeout(self, *args):
raise Timeout.Timeout()
def extractComponentFromXML_parse(ctList, instantSaver, doParsing = False):
if not os.path.exists(os.path.dirname(instantSaver)):
mkdir(instantSaver)
tmpSaver = open(instantSaver, 'ab+')
tmpSaver.seek(0, os.SEEK_SET) ###
# current content in tmp file (cache)
alreadyHere = []
tmpContent ={}
lostList = []
for i in tmpSaver:
try:
#print re.search('^\|(NCT\d+)\|',i).group(1)
NCTid = re.search('^\|(NCT\d+)\|',i).group(1)
tmpContent[NCTid] = i.rstrip().split('|')[1:]
alreadyHere.append(NCTid)
except AttributeError:
print 'Instant saved file error: format error'
if continueOrNot():
continue
print 'NCT already in tmp file: ', len(alreadyHere)
#entries requires to be searched online
needOnlineSearch = list(set(ctList)-set(alreadyHere))
print 'NCT requires a online search: ', len(needOnlineSearch)
CTXMLDict = retrieveCTXMLFromCTlist(needOnlineSearch) # do not have all keys in ctList if (1. exists a local data; 2. can not retrieve web page)
print "##The length of the input ctList (unique) is: ", str(len(set(ctList)))
CTCompDict = {}
count = 0
for key in ctList: # only entry in list will be returned, tmp file may contain extra entries, make sure to use the out file for analysis
#retrieve locally
tmp = []
if key in alreadyHere:
#print 'Pass (retrieved locally): ', key
CTCompDict[key] = tmpContent[key]
continue
count += 1
#retrieve online
if key in CTXMLDict.keys():
try:
CTCompDict[key] = extract_component(CTXMLDict[key])
tmp = list(extract_component(CTXMLDict[key]))
except Exception:
print str(Exception)+'skip ', key
continue
else:
lostList.append(key)
'''
##parsing, slow
(id, brief_title, official_title, conditions, agency, agency_class, source, authority, brief_summary, overall_status, start_date, gender, minimum_age, maximum_age, study_pop, criteria, enrollment, phase, study_type, location, intervention_type, intervention_name, enrollment_type) = CTCompDict[key]
if not criteria.startswith('Please contact site') and criteria.strip() !='' and len(enrollment)>0 :#refinement!
if doParsing:
rules = []
try:
print 'parsing: ',id
flaggedCri = setFlag(criteria)
for inc in flaggedCri[0]:
try:
with Timeout(60):
parsed = parse_stat_sentence(inc, None, True)
rules.append(parsed)
except Timeout.Timeout:
print 'skip a lone sentence in trial: ', id
continue
#print 'inc', parse_stat_sentence(inc, None, True)
for exc in flaggedCri[1]:
try:
with Timeout(60):
negate = ['*NEGATE* ' + str(i) for i in parse_stat_sentence(exc, None, True)]
rules.append(negate)
except Timeout.Timeout:
print 'skip a lone sentence in trial: ', id
continue
#print 'exc', negate
rules = list(itertools.chain(*rules))
#print rules
except Exception:
print Exception, 'parsing error at ', id
continue
CTCompDict[key] = (id, brief_title, official_title, conditions, agency, agency_class, source, authority, brief_summary, overall_status, start_date, gender, minimum_age, maximum_age, study_pop, criteria, enrollment, phase, study_type, location, intervention_type, intervention_name, enrollment_type, rules)
tmp = [id, brief_title, official_title, conditions, agency, agency_class, source, authority, brief_summary, overall_status, start_date, gender, minimum_age, maximum_age, study_pop, criteria, enrollment, phase, study_type, location, intervention_type, intervention_name, enrollment_type, rules]
'''
if len(tmp) > 0:
try:
all = ''
for i in tmp:
all += "|"+str(i)
tmpSaver.write(all+'\n')
except Exception:
print 'instant saver error: can not write file at:', id
sys.exit()
tmpSaver.close()
print '##the length of the retrieved ctList(unique) is: ', str(len(CTCompDict.keys()))
print '##the length of the lost ctList(unique) is: ', str(len(set(lostList)))
return CTCompDict
def saveComp(targetDict, outDir, varName):
try:
file = outDir+varName
print varName+'result saves to: '+ file
write_csv(file, targetDict.values())
except:
print 'error writing csv, trying to store in temp file...'
try:
f = open(outDir+varName+'.pckl', 'w')
pickle.dump(targetDict, f)
f.close()
print 'pickled: '+ varName
except:
print 'failed in storing '+ varName
#ref: http://www.cs.duke.edu/courses/spring14/compsci290/assignments/lab02.html
import nltk
import string
import os
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.metrics.pairwise import linear_kernel
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
#remove stopwords
stemmer = PorterStemmer()
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
tokens = nltk.word_tokenize(text)
token_removeStop = [i for i in tokens if i not in stopwords]
#stemming
stems = stem_tokens(token_removeStop, stemmer)
return stems
#http://stats.stackexchange.com/questions/29578/jensen-shannon-divergence-calculation-for-3-prob-distributions-is-this-ok
def jsd(x,y): #Jensen-shannon divergence
import warnings
warnings.filterwarnings("ignore", category = RuntimeWarning)
x = np.array(x)
print x
y = np.array(y)
print y
d1 = x*np.log2(2*x/(x+y))
print d1
d2 = y*np.log2(2*y/(x+y))
print d2
d1[np.isnan(d1)] = 0
d2[np.isnan(d2)] = 0
print sum(d1)
print sum(d2)
d = 0.5*np.sum(d1+d2)
return d
#jsd(np.array([0.5,0.5,0]),np.array([0,0.1,0.9]))
def kld( p, q):
from numpy import zeros, array
from math import sqrt, log
""" Compute KL divergence of two vectors, K(p || q)."""
return sum(_p * log(_p / _q) for _p, _q in zip(p, q) if _p != 0)
#a = dict(zip(feature_names, corpusList))
#str = 'this sentence has unseen text such as computer but also king lord juliet'
#response = tfidf.transform([str])
#for col in response.nonzero()[1]:
# print feature_names[col], ' - ', response[0, col]
'''
from nltk.stem.porter import PorterStemmer
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
return stems
source = '/Users/mahandong/Dropbox/research/chunhua project/EliTES/result/selected_drug_trial_List/backup/ctList_BBW_post_comp'
fhIn = open(source,'r')
stemmer = PorterStemmer()
token_dict = []
for line in fhIn:
if line.split('\",\"')[15] != "":
criteria = line.split('\",\"')[15]
lowers = criteria.lower()
no_punctuation = lowers.translate(None, string.punctuation)
token_dict.append(no_punctuation)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=1)
matrix = vectorizer.fit_transform(token_dict).toarray()
feature_names = [x.encode('ascii') for x in vectorizer.get_feature_names()]
corpusList = matrix.sum(axis=0)
corpusTotal = sum(corpusList)
row=1
# new_corpusList = []
# new_rowNormList = []
# new_featureNames = []
# rowNormList = matrix[row]
# for i in range(len(rowNormList)):
# if rowNormList[i] != 0:
# new_corpusList.append(corpusList[i])
# new_rowNormList.append(rowNormList[i])
# new_featureNames.append(feature_names[i])
#dictionary = dict(zip(feature_names, countList))
'''
|
[
"handongma.work@gmail.com"
] |
handongma.work@gmail.com
|
db45d397f67e7159649779081aab67d4a025c466
|
296deb151838a750ed06c3da8f2131db522cf6a6
|
/Dashboard.py
|
4690ad2e5a1f4b307c116b3986bc8b10b3cf9523
|
[] |
no_license
|
mbolisov/mb
|
3c6e8d5cd8ffcfa266554fb61fbff3e2f99f387c
|
02ff58b123f9f01823f5d98b9ee50f97b1667bde
|
refs/heads/master
| 2020-06-03T20:42:22.436813
| 2019-06-13T08:40:47
| 2019-06-13T08:40:47
| 191,723,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
from selenium.webdriver.common.by import By
from under_the_hood.multyusable import GBtool
from selenium.common.exceptions import WebDriverException
from under_the_hood.Constructor import Constructor
import time
from nose.tools import assert_equal, assert_true
from selenium import webdriver
import re
class Locators(object):
WELCOME = (By.CSS_SELECTOR, '.welcome') # Блок добро пожаловать
TOP_BOOST = (By.CSS_SELECTOR, '.dashboard-page__left .dashboard-page__card') # Блок топ бустеров
BALANCE = (By.CSS_SELECTOR, '.balance') # Блок баланс
class BasePage(object):
def __init__(self, driver):
self.driver = driver
class Dashboard(BasePage):
""" Дэшборд """
def check_components(self):
"""Проверка компонентов"""
gb_tool = GBtool(self.driver)
deposit_funds = self.driver.find_element(Locators.DEPOSIT_FUNDS)
gb_tool.check_exists_by_css(css=deposit_funds, name='Блок внести средства')
history = self.driver.find_element(Locators.HISTORY)
gb_tool.check_exists_by_css(css=history, name='Блок история')
balance = self.driver.find_element(Locators.BALANCE)
gb_tool.check_exists_by_css(css=balance, name='Блок баланс')
|
[
"noreply@github.com"
] |
mbolisov.noreply@github.com
|
f9bfc9c998a07887de15b2674a198cdb6bcc93cf
|
385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28
|
/source/tuyoo/src/poker/entity/game/rooms/normal_room.py
|
bc38658272423936f9d7fc9e3a1c5a20a67e60da
|
[] |
no_license
|
csirui/hall37
|
17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db
|
5c4eb4b2bf57bbbee4731470c830d8d81915d603
|
refs/heads/master
| 2021-09-04T03:55:12.460035
| 2018-01-15T15:12:30
| 2018-01-15T15:12:30
| 117,560,615
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,860
|
py
|
# coding=UTF-8
'''普通房间类
'''
from poker.entity.game.game import TYGame
__author__ = [
'"Zhouhao" <zhouhao@tuyoogame.com>',
'Zqh'
]
from random import choice
from freetime.core.tasklet import FTTasklet
import freetime.util.log as ftlog
from freetime.util.log import getMethodName
from poker.entity.configure import gdata
from poker.entity.game.rooms.room import TYRoom
from poker.entity.dao import daobase
from poker.entity.dao.lua_scripts import room_scripts
class TYNormalRoom(TYRoom):
'''普通房间类'''
def __init__(self, roomDefine):
super(TYNormalRoom, self).__init__(roomDefine)
# GT重启创建Room对象时清空牌桌评分历史数据
daobase.executeTableCmd(self.roomId, 0, "DEL", self.getTableScoresKey(self.roomId))
def getTableScoresKey(self, shadowRoomId):
return "ts:" + str(shadowRoomId)
def doReloadConf(self, roomDefine):
'''GT刷新配置时,如果桌子数变了需要清空桌子评分历史数据,
此处桌子实例数量未改变,redis中也无需改变,换句话而言,不允许动态桌子'''
# if self.roomDefine.tableCount != roomDefine.tableCount:
# daobase.executeTableCmd(self.roomId, 0, "ZREM", self.getTableScoresKey(self.roomId))
super(TYNormalRoom, self).doReloadConf(roomDefine)
def doQuickStart(self, msg):
'''
Note:
1> 由于不同游戏评分机制不同,例如德州会根据游戏阶段评分,所以把桌子评分存到redis里,方便各游戏服务器自由刷新。
2> 为了防止同一张桌子同时被选出来分配座位,选桌时会把tableScore里选出的桌子删除,玩家坐下成功后再添加回去,添回去之前无需刷新该桌子的评分。
3> 玩家自选桌时,可能选中一张正在分配座位的桌子,此时需要休眠后重试,只到该桌子完成分配或者等待超时。
'''
assert self.roomId == msg.getParam("roomId")
userId = msg.getParam("userId")
shadowRoomId = msg.getParam("shadowRoomId")
tableId = msg.getParam("tableId")
exceptTableId = msg.getParam('exceptTableId')
clientId = msg.getParam("clientId")
ftlog.hinfo(getMethodName(), "<<", "|userId, clientId, roomId, shadowRoomId, tableId:", userId, clientId,
self.roomId, shadowRoomId, tableId)
if tableId == 0: # 服务器为玩家选择桌子并坐下
shadowRoomId = choice(self.roomDefine.shadowRoomIds)
tableId = self.getBestTableId(userId, shadowRoomId, exceptTableId)
else: # 玩家自选桌子坐下
assert isinstance(shadowRoomId, int) and gdata.roomIdDefineMap()[
shadowRoomId].bigRoomId == self.roomDefine.bigRoomId
tableId = self.enterOneTable(userId, shadowRoomId, tableId)
if not tableId:
ftlog.error(getMethodName(), "getFreeTableId timeout", "|userId, roomId, tableId:", userId, self.roomId,
tableId)
return
if ftlog.is_debug():
ftlog.info(getMethodName(), "after choose table", "|userId, shadowRoomId, tableId:", userId, shadowRoomId,
tableId)
extParams = msg.getKey('params')
self.querySitReq(userId, shadowRoomId, tableId, clientId, extParams)
def getBestTableId(self, userId, shadowRoomId, exceptTableId=None):
'''原子化从redis里获取和删除评分最高的桌子Id
Return:
None: tableScores 队列为空, 所有桌子都在分配座位中
'''
def getBestTableIdFromRedis(shadowRoomId):
'''从redis里取出并删除一个评分最高的牌桌
'''
tableId, tableScore = 0, 0
datas = daobase.executeTableLua(shadowRoomId, 0, room_scripts.ALIAS_GET_BEST_TABLE_ID_LUA, 1,
self.getTableScoresKey(shadowRoomId), 0)
if datas and len(datas) == 2:
tableId, tableScore = datas[0], datas[1]
return tableId, tableScore
if ftlog.is_debug():
ftlog.debug("<<", "|shadowRoomId, exceptTableId:", shadowRoomId, exceptTableId, caller=self)
pigTables = []
tableId = 0
for _ in xrange(5): # 所有桌子有可能正在分配座位,如果取桌子失败,需要休眠后重试
if gdata.roomIdDefineMap()[shadowRoomId].tableCount == 1:
tableId = shadowRoomId * 10000 + 1
tableScore = 100
else:
tableId, tableScore = getBestTableIdFromRedis(shadowRoomId) # 从redis取一个牌桌
# 该牌桌被客户端指定排除了,另外再取一个牌桌
if exceptTableId and tableId and exceptTableId == tableId:
tableId1, tableScore1 = getBestTableIdFromRedis(shadowRoomId)
# 把之前从redis取出的牌桌加回redis
self._updateTableScore(shadowRoomId, tableScore, tableId, force=True)
tableId, tableScore = tableId1, tableScore1
if ftlog.is_debug():
ftlog.debug('getBestTableId shadowRoomId, tableId, tableScore=', shadowRoomId, tableId, tableScore)
if tableId:
if TYGame(self.gameId).isWaitPigTable(userId, self, tableId):
pigTables.append([shadowRoomId, tableScore, tableId])
tableId = 0
continue
else:
break
else:
FTTasklet.getCurrentFTTasklet().sleepNb(0.2)
if ftlog.is_debug():
ftlog.debug('getBestTableId pigTables=', pigTables)
if pigTables:
for pig in pigTables:
self._updateTableScore(pig[0], pig[1], pig[2], False)
return tableId
def enterOneTable(self, userId, shadowRoomId, tableId):
'''指定桌子坐下
Returns
False: 重试超过次数
'''
if ftlog.is_debug():
ftlog.debug("<< |userId, roomId, shadowRoomId, tableId", userId, self.roomId, shadowRoomId, tableId,
caller=self)
if gdata.roomIdDefineMap()[shadowRoomId].tableCount == 1:
return tableId
for _ in xrange(5): # 这张桌子有可能正在分配座位,如果取桌子失败,需要休眠后重试
result = daobase.executeTableCmd(shadowRoomId, 0, "ZREM", self.getTableScoresKey(shadowRoomId), tableId)
if ftlog.is_debug():
ftlog.debug("after ZREM tableId", "|userId, shadowRoomId, tableId, result:",
userId, shadowRoomId, tableId, result, caller=self)
if result == 1:
return tableId
FTTasklet.getCurrentFTTasklet().sleepNb(1)
return 0
def _updateTableScore(self, shadowRoomId, tableScore, tableId, force=False):
rkey = self.getTableScoresKey(shadowRoomId)
force = 1 if force else 0
res = daobase.executeTableLua(shadowRoomId, tableId, room_scripts.ALIAS_UPDATE_TABLE_SCORE_LUA,
4, rkey, tableId, tableScore, force)
if ftlog.is_debug():
ftlog.debug('_updateTableScore->shadowRoomId, tableScore, tableId, force=', shadowRoomId, tableScore,
tableId, force, res)
def updateTableScore(self, tableScore, tableId, force=False):
'''更新redis中的table score, TODO: 改成LUA原子化操作
Args:
force:
True 强制往redis里添加或更新评分,只有玩家sit时做此操作
False 表示只有redis有该牌桌评分时,才可以更新
'''
self._updateTableScore(self.roomId, tableScore, tableId, force)
# if force :
# result = daobase.executeTableCmd(self.roomId, 0, "ZADD", self.getTableScoresKey(self.roomId), tableScore, tableId)
# ftlog.debug("force ZADD tableId", "|roomId, tableId, result:", self.roomId, tableId, result,
# caller=self)
# return
#
# result = daobase.executeTableCmd(self.roomId, 0, "ZSCORE", self.getTableScoresKey(self.roomId), tableId)
# ftlog.debug("checkold ZSCORE tableId", "|roomId, tableId, result:", self.roomId, tableId, result,
# caller=self)
# if result == None:
# result = daobase.executeTableCmd(self.roomId, 0, "ZADD", self.getTableScoresKey(self.roomId), tableScore, tableId)
# ftlog.debug("after ZADD tableId", "|roomId, tableId, result:", self.roomId, tableId, result,
# caller=self)
|
[
"cg@ibenxi.com"
] |
cg@ibenxi.com
|
f4f9c13205eeef3b47879fe03e9487fe5b216969
|
c82158c7c6b008dc417a4a391bd714735f875a69
|
/manager/campaign_sel.py
|
bb4ee735a3a9c326a3835d707884753bdcc1c691
|
[] |
no_license
|
Mattvasquez22/Sem_Proj2
|
9d2fe312edc7ddc93c711695eb0be32170438a84
|
e6d59e040af60f3ba1bd31b9ab516f5e570b8df8
|
refs/heads/master
| 2020-05-07T13:57:59.542228
| 2019-06-25T06:40:20
| 2019-06-25T06:40:20
| 180,570,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
#########################################################
# Script used to define functions used to obtain the #
# campaigns, manage the campaign selection accordingly #
# and puts client back into pool if necessary #
#########################################################
from connection_char import placeinPool,TIMESTAMPS
def fetchCampaigns():
#campaigns = {'a':1,'b':2}
campaigns = '1,2,3'
return campaigns
def checkSelection(response):
#To be defined more specifically later
if(response in CAMPAIGNS):
return True
else:
return False
def campaignSelection(client):
client.client_sock.send(fetchCampaigns())
counter = 3
while counter > 0:
read_data = client.client_sock.recv(255)
selected_campaigns = read_data.rstrip()
if(checkSelection(selected_campaigns)):
print("VALID CAMPAIGN IS: " + selected_campaigns)
counter = 3
break
else:
counter -= 1
print("INVALID CAMPAIGN, TRIES LEFT: {}".format(str(counter)))
if(counter == 0):
print("NO MORE TRIES LEFT")
placeinPool(client.client_ID,TIMESTAMPS[client.client_ID])
CAMPAIGNS = fetchCampaigns()
|
[
"leonel.vasquez@eurecom.fr"
] |
leonel.vasquez@eurecom.fr
|
3958bf3f4fade40cb900cf460be2dd1ecfc1c305
|
fc1487ea5c2999a7e3020a42099acaa9fb7378bd
|
/Subject/migrations/0002_subject_m_subject_code.py
|
a69042f0874a351b31c8f6420488c12d8c4378fd
|
[] |
no_license
|
shadreckmukuka/SRMS_Django
|
2a961dbbd121de4420dfb90798252230a53192f9
|
99c436f0c0c424ca634eb532f1081d1d4ae3f8c6
|
refs/heads/master
| 2023-06-25T06:28:19.270333
| 2021-07-24T07:41:39
| 2021-07-24T07:41:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 3.2.3 on 2021-07-20 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Subject', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='subject_m',
name='subject_code',
field=models.CharField(default='', max_length=30),
),
]
|
[
"39671178+SumitJamnani@users.noreply.github.com"
] |
39671178+SumitJamnani@users.noreply.github.com
|
ec8b44146c38a98c983c6b3ab090d8e6feff727e
|
6abb92d99ff4218866eafab64390653addbf0d64
|
/AtCoder/abc/abc180/b.py
|
3dd5250c5676e7d938a6207c7c033d8e8d4ed8eb
|
[] |
no_license
|
Johannyjm/c-pro
|
38a7b81aff872b2246e5c63d6e49ef3dfb0789ae
|
770f2ac419b31bb0d47c4ee93c717c0c98c1d97d
|
refs/heads/main
| 2023-08-18T01:02:23.761499
| 2023-08-07T15:13:58
| 2023-08-07T15:13:58
| 217,938,272
| 0
| 0
| null | 2023-06-25T15:11:37
| 2019-10-28T00:51:09
|
C++
|
UTF-8
|
Python
| false
| false
| 223
|
py
|
n = int(input())
x = list(map(int, input().split()))
res1 = 0
res3 = -1
for e in x:
res1 += abs(e)
res3 = max(res3, abs(e))
res2 = 0
for e in x:
res2 += e * e
res2 **= 0.5
print(res1)
print(res2)
print(res3)
|
[
"meetpastarts@gmail.com"
] |
meetpastarts@gmail.com
|
904e0904076d47255579ccf0a619de76d3f6deba
|
d3e5f96f07bbebdd09ae3764356a14ccf63a7f13
|
/test/test_video_recorder.py
|
89025ba9896829c1f0e2a170928142bac2dec180
|
[] |
no_license
|
ijeriomit/ROS-Video-Recorder
|
e8c1941ba650b7303be3313daf9bf2c43f543d14
|
99dbbf22d7445926a6c619041d23c4478b872dbd
|
refs/heads/main
| 2023-06-21T07:15:48.186967
| 2021-07-08T18:20:42
| 2021-07-08T18:20:42
| 372,615,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,218
|
py
|
#!/usr/bin/env python2
# System Imports #
import unittest
# ROS Imports #
import os
import time
import rospkg
import rospy
import rostest
import subprocess
import os
import re
import shutil
from random import seed
from random import random
import cv2
from cv_bridge import CvBridge
import threading
from sensor_msgs.msg import Image
from robot_video_recorder.video_recorder import VideoRecorder
from robot_video_recorder.image_manipulator import *
from robot_video_recorder.mock_camera_publisher import MockCamera
class TestVideoRecorder(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestVideoRecorder, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
camera_topic = "/camera_input"
cls.test_video_folder = os.path.join(rospkg.RosPack().get_path('robot_video_recorder'), "videos")
rospy.init_node("test_node")
pkg_path = rospkg.RosPack().get_path('robot_video_recorder')
frame_image = cv2.imread(os.path.join(pkg_path, 'images', 'test_image.png'))
cls.file_prefix = 'test'
cls.file_postfix = ''
cls.file_type = 'mp4'
cls.codec = 'MP4V'
cls.fps = 4
cls.max_delay = 0.1
cls.sent_images = 0
cls.recorder = VideoRecorder(camera_topic=camera_topic, folder_path= cls.test_video_folder, image_height=frame_image.shape[0], image_width=frame_image.shape[1], fps=cls.fps, add_time_stamps=True, video_length=60, file_prefix=cls.file_prefix, file_postfix = cls.file_postfix, file_type = cls.file_type, video_codec= cls.codec)
if not os.path.exists(cls.test_video_folder):
os.makedirs(cls.test_video_folder)
clean_folder(cls.test_video_folder)
cls.mock_camera = MockCamera(fps=cls.fps, topic=camera_topic, image_path=os.path.join(pkg_path, 'images', 'test_image.png'))
cls.mock_camera.start()
@classmethod
def tearDownClass(cls):
# time.sleep(5)
rospy.loginfo("shutting down ros HEHEHEHEHEH")
rospy.signal_shutdown("test over")
cls.mock_camera.stop_camera()
cls.recorder.stop_recording()
def setUp(self):
self.recorder.record()
def tearDown(self):
self.recorder.stop_recording()
def test_pad_images(self):
self.recorder.stop_recording()
num_pad_images = 4
frame_number = self.recorder.get_real_frame_number()
self.recorder.pad_video(num_pad_images)
self.assertEqual(frame_number + num_pad_images, self.recorder.get_real_frame_number())
def test_image_size_correction(self):
test_image = np.zeros((900, 1200, 3))
self.assertEqual((768, 1024, 3), image_size_correction(test_image, 1024, 768).shape)
def test_image_recieved(self):
rospy.sleep(2)
self.assertGreater(len(self.recorder.get_frame_buffer()), 1)
def test_num_of_images_recieved_equals_num_of_images_sent(self):
num_images_sent = self.sent_images
rospy.sleep(1)
self.assertEqual(self.recorder.get_real_frame_number(), self.fps)
def test_create_file_name(self):
timestamp = time.strftime(self.recorder.timestamp_format)
filename = self.recorder.create_file_name(timestamp)
delimeter = "\\"
self.assertEqual(filename, "{0}{1}{2}_{3}_{4}.{5}".format(self.test_video_folder, delimeter, self.file_prefix, timestamp, self.file_postfix, self.file_type))
def test_create_directory(self):
pass
def test_video_recorded(self):
pass
def test_recorded_video_has_min_number_of_frames(self):
pass
def clean_folder(folder):
""" Delete the files in the directory """
if(os.path.exists(folder) and os.path.isdir(folder)):
for f in os.listdir(folder):
f = os.path.join(folder, f)
if os.path.isfile(f):
rospy.loginfo("Cleaning file {}".format(f))
os.remove(f)
elif os.path.isdir(f):
rospy.loginfo("Cleaning folder {}".format(f))
shutil.rmtree(f)
if __name__ == "__main__":
rostest.rosrun("robot_video_recorder", 'test_video_recorder', TestVideoRecorder)
|
[
"iiomitogun@uh.edu"
] |
iiomitogun@uh.edu
|
69c355ce7434a1a1a8919fd35926fa3157709e70
|
53f8a044b81e3bb7cd058162d12c2eacf2038d6c
|
/Variable assignment.py
|
9e80942ef86d61a7c81b0333d7da6e5b46f73bb1
|
[] |
no_license
|
NehaChandwani/test-gate-
|
72b53ccd56a4ecfdfa8b05d1dc662d23e6a1a1ac
|
eb877f3a5ecf17d1ce0f4a16941b62d5b7ce9a3d
|
refs/heads/main
| 2023-02-03T23:39:08.797141
| 2020-12-23T20:02:23
| 2020-12-23T20:02:23
| 323,830,396
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#attributes os song
instrument="Piano and guitar" #instrument used in song
lyrics="Gulzar" # song written by
lengthinseconds=380 #length of song
title="Keh du tumhe" # Title of song
producer="T-series"
singer="AnuMalik"
genere="Jazz"
print(instrument)
print(lyrics)
print(lengthinseconds)
print(title)
print(producer)
print(singer)
print(genere)
|
[
"noreply@github.com"
] |
NehaChandwani.noreply@github.com
|
5c26542d8366a51e50177dd19e240fb09ea48219
|
de159ab9a9cc8d5c7460f2ef1cf549c6e50aba4d
|
/course6/avltree.py
|
d8f9809cdcce1511954cba98680f7822bcc94a76
|
[] |
no_license
|
g10guang/MIT_6.006
|
56869a79ed863b55b46c4f3011fdb0de21992dec
|
2fbdb6123a9541ef00de80f627fe05a3c34ceffa
|
refs/heads/master
| 2021-09-05T01:23:28.944260
| 2018-01-23T10:47:33
| 2018-01-23T10:47:33
| 117,412,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,564
|
py
|
#!/usr/bin/env python3
# coding=utf-8
# author: Xiguang Liu<g10guang@foxmail.com>
# 2017-11-10 08:19
class AVLTree:
def __init__(self, NodeType=AVLNode) -> None:
super().__init__()
self.root = None
def insert(self, k):
"""
向 AVL T ree 中插入新节点
:param k:
:return:
"""
if self.root:
self.root.insert(k)
else:
self.root = AVLNode(None, k)
class AVLNode:
def __init__(self, parent, key) -> None:
super().__init__()
self.parent = parent
self.key = key
self.left = None
self.right = None
# self.height = 0 # 没有左右孩子的树的高度为 0
self.balance = 0 # balance = max_depth(left) - max_depth(right) it should be [-1, 1]
def disconnect(self):
"""
接触与父节点、左孩子、右孩子的关系
:return:
"""
self.parent = None
self.left = None
self.right = None
def insert(self, k):
"""
向 AVL Tree 该 node 中插入一个新节点
导致 AVL 失衡的四种情况:
1 左孩子的左孩子插入新节点
2 右孩子的右孩子插入新节点
3 右孩子的左孩子插入新节点
4 左孩子的右孩子插入新节点
:param k:
:return: 新插入的节点
"""
if self.key < k:
# 将 k 插入到左子树
if self.left:
new = self.left.insert(k)
else:
# 左子树为空,直接插入
new = AVLNode(self, k)
self.left = new
else:
# 将 k 插入到右子树
if self.right:
new = self.right.insert(k)
else:
new = AVLNode(self, k)
self.right = new
# 更新 balance
self.balance = self.left.calc_height() if self.left else -1 - self.right.calc_height() if self.right else -1
self.judge_rotate(new)
return new
def judge_rotate(self, new):
"""
判断二叉树是否失衡,如果失衡采取什么旋转方式
:return:
"""
if self.balance == 2:
if self.left.left is new:
self.left_rotate()
elif self.left.right is new:
self.left_right_rotate()
elif self.balance == -2:
if self.right.right is new:
self.right_rotate()
elif self.right.left is new:
self.right_left_rotate()
def calc_height(self):
"""
计算该节点的高度
:return:
"""
if self.left:
if self.right:
return max(self.left.calc_height(), self.right.calc_height()) + 1
return self.left + 1
elif self.right:
return self.right.calc_height() + 1
else:
# 没有左孩子,也没有右孩子
return 0
def left_rotate(self):
"""
右子树的右子树插入新节点,导致 AVL 失衡 ==> 单向左旋
:return:
"""
right = self.right
if self.parent:
if self.parent.left is self:
self.parent.left = right
else:
self.parent.right = right
right.parent = self.parent
self.right = right.left
if right.left_right_rotate():
right.left.parent = self
self.parent = right
right.left = self
def right_rotate(self):
"""
左子树的左子树插入新节点,导致 AVL 失衡 ==> 单向右旋
:return:
"""
left = self.left
if self.parent:
if self.parent.left is self:
self.parent.left = left
else:
self.parent.right = left
left.parent = self.parent
self.left = left.right
if left.right:
left.right.parent = self
self.parent = left
left.right = self
def left_right_rotate(self):
"""
左子树的右子树插入新节点,导致 AVL 失衡 ==> 左子树左旋,整体右旋
:return:
"""
self.left.left_rotate()
self.right_rotate()
def right_left_rotate(self):
"""
右子树的左子树插入新节点,导致 AVL 失衡 ==> 右子树右旋,整体左旋
:return:
"""
self.right.right_rotate()
self.left_rotate()
|
[
"g10guang@gmail.com"
] |
g10guang@gmail.com
|
331df3698f95431259f82bc02d2be2f0a859cb97
|
df2435ffc5c2916d9bbe99a0d53086569c34dc5e
|
/patchconv/setup.py
|
b89ab2d494c238a39509c0d3c55aba4d503b869d
|
[] |
no_license
|
lucassanttoss/codingtools
|
73d810f1e3e5498af1cfb0d3fd5bffc43bd8d055
|
5fa857c16149801acc7bad2f41597a5264f7015e
|
refs/heads/master
| 2021-06-17T03:13:43.460518
| 2017-01-12T15:16:33
| 2017-01-12T15:16:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
# This file is part of Adblock Plus <https://adblockplus.org/>,
# Copyright (C) 2006-2016 Eyeo GmbH
#
# Adblock Plus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# Adblock Plus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
setup(
name='patchconv',
version='0.1',
py_modules=['patchconv'],
entry_points={
'console_scripts': ['patchconv=patchconv:main']
}
)
|
[
"vasily@adblockplus.org"
] |
vasily@adblockplus.org
|
d9c7ee008ad673217b75a03c9bd9aa2b6d4c4bc6
|
25872e1ba4f86cbbf77d0130f341b21e5dd9e692
|
/ValidParentheses.py
|
246578df7ae6388798faaa7fda377b4bd2652d76
|
[] |
no_license
|
zongxinwu92/leetcode
|
dc3d209e14532b9b01cfce6d4cf6a4c2d7ced7de
|
e1aa45a1ee4edaf72447b771ada835ad73e7f508
|
refs/heads/master
| 2021-06-10T21:46:23.937268
| 2017-01-09T09:58:49
| 2017-01-09T09:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
'''
Created on 1.12.2017
@author: Jesse
''''''
Given a string containing just the characters ( , ) , { , } , [ and ] , determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"
'''
|
[
"darrencheng0817@gmail.com"
] |
darrencheng0817@gmail.com
|
95893bf0b00c2ce101c951b6965603548445099c
|
2475e641635d626764ee8759c9c03840a7ea8d83
|
/migrations/versions/1658bdd37b17_.py
|
c78e326dac4674c74ba3dce4d4ee3e2343d8f4b2
|
[] |
no_license
|
donggiLee1012/flask_pybo
|
f11b89d0e5d40bf5ea233b83f8af602fac62a425
|
f346940d6f188d2022e0be9c1d6a0c03bc158feb
|
refs/heads/master
| 2023-01-04T16:30:57.320725
| 2020-10-30T04:58:24
| 2020-10-30T04:58:24
| 296,055,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
"""empty message
Revision ID: 1658bdd37b17
Revises: ff8aed8fa5b3
Create Date: 2020-10-20 19:36:49.072582
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1658bdd37b17'
down_revision = 'ff8aed8fa5b3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('structureprice', schema=None) as batch_op:
batch_op.add_column(sa.Column('size', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('structureprice', schema=None) as batch_op:
batch_op.drop_column('size')
# ### end Alembic commands ###
|
[
"multidong1017@gmail.com"
] |
multidong1017@gmail.com
|
483b866833396c462eb05f924fd41b4139c7f05f
|
0432f80e0a058262af8c0b560dca268f52001644
|
/pygcam/built_ins/query_plugin.py
|
f08af4cf608c66c4341233d6350d8e9a730d59d2
|
[
"MIT"
] |
permissive
|
heishanmao/pygcam
|
095387fdf9dd6e049f26b6074cc1dea2f6f5bf21
|
20e48133ffadbb8db8e84c26b54e3db59b4a7b68
|
refs/heads/master
| 2021-01-08T08:36:02.928185
| 2020-02-14T20:00:30
| 2020-02-14T20:00:30
| 241,973,039
| 1
| 0
|
NOASSERTION
| 2020-02-20T19:37:12
| 2020-02-20T19:37:11
| null |
UTF-8
|
Python
| false
| false
| 5,670
|
py
|
"""
.. Support for querying GCAM's XML database and processing results.
.. codeauthor:: Rich Plevin <rich@plevin.com>
.. Copyright (c) 2016 Richard Plevin
See the https://opensource.org/licenses/MIT for license details.
"""
from ..subcommand import SubcommandABC, clean_help
class QueryCommand(SubcommandABC):
def __init__(self, subparsers):
kwargs = {'fromfile_prefix_chars' : '@', # use "@" before value to substitute contents of file as arguments
'help' : '''Run one or more GCAM database queries by generating and running the named XML queries.'''}
super(QueryCommand, self).__init__('query', subparsers, kwargs, group='project')
def addArgs(self, parser):
parser.add_argument('queryName', nargs='*',
help=clean_help('''A file or files, each holding an XML query to run. (The ".xml" suffix will be
added if needed.) If an argument is preceded by the "@" sign, it is read and its
contents substituted as the values for this argument. That means you can store queries
to run in a file (one per line) and just reference the file by preceding the filename
argument with "@".'''))
parser.add_argument('-b', '--batchFile',
help=clean_help('''An XML batch file to run. The file will typically contain
multiple queries. By default, output is written to
{outputDir}/{batchFile basename}.csv. Use '-B' to change this.'''))
parser.add_argument('-B', '--batchOutput', default='',
help=clean_help('''Where to write the output of the XML batch file given by
the '-b' flag. Non-absolute paths are treated as relative to the given outputDir.'''))
parser.add_argument('-d', '--xmldb',
help=clean_help('''The XML database to query (default is computed as
{GCAM.SandboxDir}/output/{GCAM.DbFile}. Overrides the -w flag.'''))
parser.add_argument('-D', '--noDelete', action="store_true",
help=clean_help('''Don't delete any temporary file created by extracting a query from a query file. Used
mainly for debugging.'''))
parser.add_argument('-g', '--groupDir', default='',
help=clean_help('''The scenario group directory name, if any. Used with to compute default
for --workspace argument.'''))
parser.add_argument('-n', '--noRun', action="store_true",
help=clean_help("Show the command to be run, but don't run it"))
parser.add_argument('-o', '--outputDir',
help=clean_help('Where to output the result (default taken from config parameter "GCAM.OutputDir")'))
parser.add_argument('-p', '--prequery', action="store_true",
help=clean_help('''Generate the XMLDBDriver.properties file and associated batch file to be
run by GCAM when GCAM.BatchMultipleQueries or GCAM.InMemoryDatabase are True.'''))
parser.add_argument('-q', '--queryXmlFile',
help=clean_help('''An XML file holding a list of queries to run, with optional mappings specified to
rewrite output. This file has the same structure as the <queries> element in project.xml.'''))
parser.add_argument('-Q', '--queryPath',
help=clean_help('''A semicolon-delimited list of directories or filenames to look in to find query files.
Defaults to value of config parameter GCAM.QueryPath'''))
parser.add_argument('-r', '--regions',
help=clean_help('''A comma-separated list of regions on which to run queries found in query files structured
like Main_Queries.xml. If not specified, defaults to querying all 32 regions.'''))
parser.add_argument('-R', '--regionMap',
help=clean_help('''A file containing tab-separated pairs of names, the first being a GCAM region
and the second being the name to map this region to. Lines starting with "#" are
treated as comments. Lines without a tab character are also ignored. This arg
overrides the value of config variable GCAM.RegionMapFile.'''))
parser.add_argument('-s', '--scenario', default='Reference',
help=clean_help('''The scenario to run the query/queries for (default is "Reference")
Note that this must refers to a scenarios in the XML database.'''))
parser.add_argument('-S', '--rewriteSetsFile',
help=clean_help('''An XML file defining query maps by name (default taken from
config parameter "GCAM.RewriteSetsFile")'''))
parser.add_argument('-w', '--workspace', default='',
help=clean_help('''The workspace directory in which to find the XML database.
Defaults computed as {GCAM.SandboxDir}/{groupDir}/{scenario}.
Overridden by the -d flag.'''))
return parser
def run(self, args, tool):
from ..query import queryMain
queryMain(args)
|
[
"rich@plevin.com"
] |
rich@plevin.com
|
b80cee7cc5858b41cedec45e66dcf36ac75cf6b6
|
b67ce4473d51a0273f6ef6c2bc30d531c31cae7d
|
/pset6/readability/readability.py
|
3d2d8342d325cc40b537537f43d0094b889f1ddf
|
[] |
no_license
|
lucas404x/cs50-exercises
|
4681eb9b52395e926426e20cf693eeabb2644414
|
00eff92aefb0422176d96d90fc495fdfd1c8eb7f
|
refs/heads/master
| 2022-11-18T11:16:38.468585
| 2020-07-19T20:52:21
| 2020-07-19T20:52:21
| 280,948,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
from cs50 import get_string
from sys import exit
import re
def main():
text = get_string("Text: ")
grade = round(coleman_liau_index(L(text), S(text)))
if grade < 1:
print("Before Grade 1")
elif grade < 16:
print(f"Grade {grade}")
else:
print("Grade 16+")
exit(0)
def coleman_liau_index(L, S):
return 0.0588 * L - 0.296 * S - 15.8
def L(text):
words = len([
word for word in re.split("[^A-z-']", text) if word != ''
])
letters = sum([
len(letter) for letter in re.split("[^A-z]", text)
])
return (letters / words) * 100
def S(text):
words = len([
word for word in re.split("[^A-z-']", text) if word != ''
])
sentences = len(re.findall("([\.\!]$|[\.\?\!]\s)", text))
return (sentences / words) * 100
main()
|
[
"joselucas0303@gmail.com"
] |
joselucas0303@gmail.com
|
06fa91e1b41f2ce0d895152caf7f8d1828297a61
|
7414e0795e47436587b7ad76a6f36e319fe47695
|
/day4_homework/homeworkquestion1.py
|
5c7c667c584fe4a392e54bf1647c9843c88628c6
|
[] |
no_license
|
jhaversat/qbb2016-answers
|
04a34b1d54f4cbb4c3bb2eaee4d4f23c1f9c8c3b
|
757bd118057dd2ce93286db9ce11254746d5ab01
|
refs/heads/master
| 2020-09-26T23:16:17.724758
| 2016-12-19T23:03:01
| 2016-12-19T23:03:01
| 66,853,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,473
|
py
|
#!/usr/bin/env Python
"""
How to run:
./homeworkquestion1.py <metadata.csv> <ctab_dir>
Generates:
Line plot of
"""
import sys
import pandas as pd
import matplotlib.pyplot as plt
Sample_file = pd.read_csv(sys.argv[1])
replicate_file = pd.read_csv(sys.argv[2])
ctab_dir = sys.argv[3]
female_Sxl = []
male_Sxl = []
female_reps = []
male_reps = []
female_samples2 = replicate_file["sex"] == "female"
for sample in replicate_file[ female_samples2 ]["sample"] :
filename = ctab_dir + "/" + sample + "/t_data.ctab"
df = pd.read_table(filename)
Sxl_samples = df[ "t_name"] == "FBtr0331261"
#print type(df[df_roi2]["FPKM"].values)
female_reps.append(df[Sxl_samples]["FPKM"].values)
#.values returns just a number without all the fancy wrapping
female_samples = Sample_file["sex"] == "female"
for sample in Sample_file[ female_samples ]["sample"] :
filename = ctab_dir + "/" + sample + "/t_data.ctab"
df = pd.read_table(filename)
Sxl_samples = df[ "t_name"] == "FBtr0331261"
#print type(df[df_roi2]["FPKM"].values)
female_Sxl.append(df[Sxl_samples]["FPKM"].values)
#.values returns just a number without all the fancy wrapping
dev_stage = (Sample_file[female_samples]["stage"].values)
male_samples = Sample_file["sex"] == "male"
for sample in Sample_file[ male_samples ]["sample"] :
filename = ctab_dir + "/" + sample + "/t_data.ctab"
df = pd.read_table(filename)
Sxl_samples = df[ "t_name"] == "FBtr0331261"
#print type(df[df_roi2]["FPKM"].values)
male_Sxl.append(df[Sxl_samples]["FPKM"].values)
#.values returns just a number without all the fancy wrapping
male_samples2 = replicate_file["sex"] == "male"
for sample in replicate_file[ male_samples2 ]["sample"] :
filename = ctab_dir + "/" + sample + "/t_data.ctab"
df = pd.read_table(filename)
Sxl_samples = df[ "t_name"] == "FBtr0331261"
#print type(df[df_roi2]["FPKM"].values)
male_reps.append(df[Sxl_samples]["FPKM"].values)
#.values returns just a number without all the fancy wrapping
Replica = [4, 5, 6, 7]
plt.figure()
plt.plot(female_Sxl, color = 'r')
plt.plot(male_Sxl)
plt.scatter(Replica, female_reps, color = 'r')
plt.scatter(Replica, male_reps)
plt.title("Sxl abundance by developmental stage")
plt.xticks( range(len(dev_stage)), dev_stage)
plt.xlabel("developmental stage (days)")
plt.ylabel("FPKM (abundance)")
#plt.show()
plt.savefig("Day4Homework1.png")
plt.close()
|
[
"Jhaversat@gmail.com"
] |
Jhaversat@gmail.com
|
d3fd5a70f236f2a0ae3b837807f273fc78db2d66
|
54c3981ad7dfbeeabf79087746f143bcde4190d3
|
/bot.py
|
9a4084035f31149768e02a60b71c2bdf596c704b
|
[
"Apache-2.0"
] |
permissive
|
Sunetz/galinabot
|
c2ba0497c16ff8c5ecdc9b0e759ffc61dc30940c
|
a17d031c3838d4407fee8484cab39d5e1a0966f6
|
refs/heads/main
| 2023-04-05T11:16:48.346301
| 2021-04-13T10:49:04
| 2021-04-13T10:49:04
| 357,506,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
import config
import os
from random import choice
from time import sleep
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits
import telebot
from telebot import types
from selenium import webdriver
from imageai.Detection import ObjectDetection
import password
chromedriver_path = os.path.join("c:/", "Users", "ilyas", "Desktop", "web-projects", "utilities", "chromedriver.exe")
bot = telebot.TeleBot(config.TOKEN)
option = webdriver.ChromeOptions()
option.add_argument('headless')
driver = webdriver.Chrome(options=option, executable_path=chromedriver_path)
@bot.message_handler(commands=['start']) #add buttoms
def button(message):
markup = types.InlineKeyboardMarkup(row_width=2)
item = types.InlineKeyboardButton('Подорваться на поиски', callback_data='to find')
item2 = types.InlineKeyboardButton('Поговорить', callback_data='to talk')
item3 = types.InlineKeyboardButton('Угадать', callback_data='to guess')
item4 = types.InlineKeyboardButton('Нашаманить пароль', callback_data='password')
markup.add(item, item2, item3, item4)
bot.send_message(message.chat.id, 'Ну, допустим, здрасте!', reply_markup=markup)
@bot.callback_query_handler(func=lambda call: True)
def callback(call):
if call.message:
if call.data == 'to find':
msg = bot.send_message(call.message.chat.id, 'Ну, спроси, поищем')
bot.register_next_step_handler(msg, search)
elif call.data == 'to talk':
sti = open('sticker.webp', 'rb')
bot.send_sticker(call.message.chat.id, sti)
bot.send_message(call.message.chat.id, 'Ага, щазз')
elif call.data == 'to guess':
bot.send_message(call.message.chat.id, 'Валяй, загадывай')
prediction = ImageClassification()
elif call.data == 'password':
how_many_chars = bot.send_message(call.message.chat.id, 'Сколько букав?')
bot.register_next_step_handler(how_many_chars, new_password)
def new_password(message):
n = int(message.text)
chars = ascii_letters + digits
word = ''.join(choice(chars) for _ in range(n))
bot.send_message(message.chat.id, 'Держи, я стралась: ' + word)
def search(message):
bot.send_message(message.chat.id, "Зииин, есть у нас такое?")
image_href = 'https://yandex.ru/images/search?text=' + message.text
driver.get(image_href)
sleep(1)
images = driver.find_elements_by_class_name("serp-item__link")
for i in range(len(images)):
bot.send_message(message.chat.id, images[i].get_attribute('href'))
if i == 2:
break
bot.polling()
|
[
"asuntcova@yandex.ru"
] |
asuntcova@yandex.ru
|
6f42a4420ad54741cba50d807238607340e07c03
|
7d62096ef268cc25be7a2d4c2c2d6d490a7dff24
|
/examples/prms_streamflow_ex.py
|
d17d4416d19ad4d5291f57fdb8251ee6fd803217
|
[
"MIT"
] |
permissive
|
pymt-lab/pymt_prms_streamflow
|
becf1e5c6d0d30205408a7bd0e9cb5ca6b97c499
|
a25fe20da317b49ff12e0a6df546d2ba0e0dfe6f
|
refs/heads/master
| 2022-12-31T20:08:18.099769
| 2020-10-13T17:31:17
| 2020-10-13T17:31:17
| 264,988,300
| 0
| 0
|
MIT
| 2020-10-13T17:25:56
| 2020-05-18T15:53:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,607
|
py
|
"""Interact with the PRMSStreamflow BMI through Python."""
import os
import numpy as np
from pymt_prms_streamflow import PRMSStreamflow
run_dir = '../meta/PRMSStreamflow'
config_file = 'control.default'
# Instantiate a model and get its name.
m = PRMSStreamflow()
print(m.get_component_name())
# Initialize the model.
os.chdir(run_dir)
m.initialize(config_file)
print(config_file)
# List the model's exchange items.
print('Number of input vars:', m.get_input_item_count())
for var in m.get_input_var_names():
print(' - {}'.format(var))
print('Number of output vars:', m.get_output_item_count())
for var in m.get_output_var_names():
print(' - {}'.format(var))
# Get variable info.
# var_name = 'seg_outflow'
# var_name = 'flow_out'
var_name = 'hru_outflow'
print('Variable {}'.format(var_name))
print(' - variable type:', m.get_var_type(var_name))
print(' - units:', m.get_var_units(var_name))
print(' - itemsize:', m.get_var_itemsize(var_name))
print(' - nbytes:', m.get_var_nbytes(var_name))
print(' - location:', m.get_var_location(var_name))
# Get grid info for variable.
grid_id = m.get_var_grid(var_name)
print(' - grid id:', grid_id)
print(' - grid type:', m.get_grid_type(grid_id))
grid_rank = m.get_grid_rank(grid_id)
print(' - rank:', grid_rank)
grid_size = m.get_grid_size(grid_id)
print(' - size:', grid_size)
grid_shape = np.empty(grid_rank, dtype=np.int32)
try:
m.get_grid_shape(grid_id, grid_shape)
except RuntimeError:
print(' - shape: n/a')
else:
print(' - shape:', grid_shape)
grid_spacing = np.empty(grid_rank, dtype=np.float64)
try:
m.get_grid_spacing(grid_id, grid_spacing)
except RuntimeError:
print(' - spacing: n/a')
else:
print(' - spacing:', grid_spacing)
grid_origin = np.empty(grid_rank, dtype=np.float64)
try:
m.get_grid_origin(grid_id, grid_origin)
except RuntimeError:
print(' - origin: n/a')
else:
print(' - origin:', grid_origin)
grid_x = np.empty(grid_size, dtype=np.float64)
m.get_grid_x(grid_id, grid_x)
print(' - x:', grid_x)
grid_y = np.empty(grid_size, dtype=np.float64)
m.get_grid_y(grid_id, grid_y)
print(' - y:', grid_y)
grid_z = np.empty(grid_size, dtype=np.float64)
m.get_grid_z(grid_id, grid_z)
print(' - z:', grid_z)
# Get time information from the model.
print('Start time:', m.get_start_time())
print('End time:', m.get_end_time())
print('Current time:', m.get_current_time())
print('Time step:', m.get_time_step())
print('Time units:', m.get_time_units())
# Advance the model by one time step.
print('Advance model by a single time step...')
m.update()
print(' - new time:', m.get_current_time())
# Advance the model until a later time.
print('Advance model to a later time...')
m.update_until(5.0)
print(' - new time:', m.get_current_time())
# Get the variable values.
print('Get values of {}...'.format(var_name))
val = np.empty(grid_size, dtype=m.get_var_type(var_name))
m.get_value(var_name, val)
print(' - values at time {}:'.format(m.get_current_time()))
print(val)
# Get a reference to the variable and check that it updates.
if m.get_grid_type(grid_id) != 'scalar':
ref = m.get_value_ptr(var_name)
for _ in range(3):
print(' - values (by ref) at time {}:'.format(m.get_current_time()))
print(ref)
m.update()
# Set new variable values.
if var_name not in m.get_output_var_names():
print('Set values of {}...'.format(var_name))
new = np.arange(grid_size, dtype=m.get_var_type(var_name))
print(' - values to set:', new)
m.set_value(var_name, new)
print(' - check that values were set:', ref)
# Finalize the model.
m.finalize()
print('Done.')
|
[
"mark.piper@colorado.edu"
] |
mark.piper@colorado.edu
|
edbcbecc6f0f8d884487ac9870caa7a739c31558
|
00843d7d1744e25eecae29a95754294fab79986c
|
/train-model.py
|
d1ed9851036371c26642abb7abcf2624cda3d5d1
|
[] |
no_license
|
Aditya148/Face-Recognition
|
2970648277ef819d457658b3f1bb405b2885f13c
|
514c00d56fa2da7fd76e5e764132a76dcf9fe83c
|
refs/heads/main
| 2023-01-04T10:32:38.909947
| 2020-10-29T10:56:58
| 2020-10-29T10:56:58
| 307,947,791
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,256
|
py
|
# -*- coding: utf-8 -*-
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
# re-size all the images to this
IMAGE_SIZE = [224, 224]
#Path for the dataset folders containing train and test/validation images
train_path = 'Datasets/Train'
valid_path = 'Datasets/Test'
# add preprocessing layer to the front of VGG
vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in vgg.layers:
layer.trainable = False
# useful for getting number of classes
folders = glob('Datasets/Train/*')
# our layers - you can add more if you want
x = Flatten()(vgg.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(len(folders), activation='softmax')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('Datasets/Train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('Datasets/Test',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
'''r=model.fit_generator(training_set,
samples_per_epoch = 8000,
nb_epoch = 5,
validation_data = test_set,
nb_val_samples = 2000)'''
# fit the model
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=5,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# accuracies
plt.plot(r.history['acc'], label='train acc')
plt.plot(r.history['val_acc'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
import tensorflow as tf
from keras.models import load_model
#Save the model at your desired location
model.save('Models/facefeatures_new_model.h5')
print('Model saved successfully')
|
[
"noreply@github.com"
] |
Aditya148.noreply@github.com
|
3bba89a25e36b1f137a155594ea9efacddbf0a07
|
5c6857c12e4abfeb51e5c4426a6f86e4fae86ede
|
/jactorch/data/dataset.py
|
dd275c4051e8e5942601be97ceae205960430408
|
[
"MIT"
] |
permissive
|
lzhbrian/Jacinle
|
93bbb123bca52d5ca1ce5dbded3acb9b827b9061
|
4e6ea4902079d8d59d9eda0849714bd544f281a2
|
refs/heads/master
| 2023-04-01T03:07:07.208186
| 2021-04-07T17:58:38
| 2021-04-07T17:58:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,151
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : dataset.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 03/08/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import random
import itertools
from jacinle.logging import get_logger
logger = get_logger(__file__)
__all__ = ['IterableDatasetMixin', 'ProxyDataset', 'ListDataset', 'FilterableDatasetUnwrapped', 'FilterableDatasetView']
class Dataset(object):
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __add__(self, other):
from torch.utils.data.dataset import ConcatDataset
return ConcatDataset([self, other])
class IterableDatasetMixin(object):
def __iter__(self):
for i in range(len(self)):
yield i, self[i]
class ProxyDataset(Dataset):
"""
A proxy dataset base class for wrapping a base dataset.
"""
def __init__(self, base_dataset):
"""
Args:
base_dataset (Dataset): the base dataset.
"""
self._base_dataset = base_dataset
@property
def base_dataset(self):
return self._base_dataset
def __getitem__(self, item):
return self.base_dataset[item]
def __len__(self):
return len(self.base_dataset)
class ListDataset(Dataset):
"""
Wraps a list into a pytorch Dataset.
"""
def __init__(self, list):
"""
Args:
list (list[Any]): the list of data.
"""
self.list = list
def __getitem__(self, item):
return self.list[item]
def __len__(self):
return len(self.list)
class FilterableDatasetUnwrapped(Dataset, IterableDatasetMixin):
"""
A filterable dataset. User can call various `filter_*` operations to obtain a subset of the dataset.
"""
def __init__(self):
super().__init__()
self.metainfo_cache = dict()
def get_metainfo(self, index):
if index not in self.metainfo_cache:
self.metainfo_cache[index] = self._get_metainfo(index)
return self.metainfo_cache[index]
def _get_metainfo(self, index):
raise NotImplementedError()
class FilterableDatasetView(FilterableDatasetUnwrapped):
def __init__(self, owner_dataset, indices=None, filter_name=None, filter_func=None):
"""
Args:
owner_dataset (Dataset): the original dataset.
indices (List[int]): a list of indices that was filterred out.
filter_name (str): human-friendly name for the filter.
filter_func (Callable): just for tracking.
"""
super().__init__()
self.owner_dataset = owner_dataset
self.indices = indices
self._filter_name = filter_name
self._filter_func = filter_func
@property
def unwrapped(self):
if self.indices is not None:
return self.owner_dataset.unwrapped
return self.owner_dataset
@property
def filter_name(self):
return self._filter_name if self._filter_name is not None else '<anonymous>'
@property
def full_filter_name(self):
if self.indices is not None:
return self.owner_dataset.full_filter_name + '/' + self.filter_name
return '<original>'
@property
def filter_func(self):
return self._filter_func
def collect(self, key_func):
return {key_func(self.get_metainfo(i)) for i in range(len(self))}
def filter(self, filter_func, filter_name=None):
indices = []
for i in range(len(self)):
metainfo = self.get_metainfo(i)
if filter_func(metainfo):
indices.append(i)
if len(indices) == 0:
raise ValueError('Filter results in an empty dataset.')
logger.critical('Filter dataset {}: #before={}, #after={}.'.format(filter_name, len(self), len(indices)))
return type(self)(self, indices, filter_name, filter_func)
def random_trim_length(self, length):
assert length < len(self)
logger.info('Randomly trim the dataset: #samples = {}.'.format(length))
indices = list(random.choice(len(self), size=length, replace=False))
return type(self)(self, indices=indices, filter_name='randomtrim[{}]'.format(length))
def trim_length(self, length):
if type(length) is float and 0 < length <= 1:
length = int(len(self) * length)
assert length < len(self)
logger.info('Trim the dataset: #samples = {}.'.format(length))
return type(self)(self, indices=list(range(0, length)), filter_name='trim[{}]'.format(length))
def trim_range(self, begin, end=None):
if end is None:
end = len(self)
assert end <= len(self)
logger.info('Trim the dataset: #samples = {}.'.format(end - begin))
return type(self)(self, indices=list(range(begin, end)), filter_name='trimrange[{}:{}]'.format(begin, end))
def split_trainval(self, split):
if isinstance(split, float) and 0 < split < 1:
split = int(len(self) * split)
split = int(split)
assert 0 < split < len(self)
nr_train = split
nr_val = len(self) - nr_train
logger.info('Split the dataset: #training samples = {}, #validation samples = {}.'.format(nr_train, nr_val))
return (
type(self)(self, indices=list(range(0, split)), filter_name='train'),
type(self)(self, indices=list(range(split, len(self))), filter_name='val')
)
def split_kfold(self, k):
assert len(self) % k == 0
block = len(self) // k
for i in range(k):
yield (
type(self)(self, indices=list(range(0, i * block)) + list(range((i + 1) * block, len(self))), filter_name='fold{}[train]'.format(i + 1)),
type(self)(self, indices=list(range(i * block, (i + 1) * block)), filter_name='fold{}[val]'.format(i + 1))
)
def repeat(self, nr_repeats):
indices = list(itertools.chain(*[range(len(self)) for _ in range(nr_repeats)]))
logger.critical('Repeat the dataset: #before={}, #after={}.'.format(len(self), len(indices)))
return type(self)(self, indices=indices, filter_name='repeat[{}]'.format(nr_repeats))
def __getitem__(self, index):
if self.indices is None:
return self.owner_dataset[index]
return self.owner_dataset[self.indices[index]]
def __len__(self):
if self.indices is None:
return len(self.owner_dataset)
return len(self.indices)
def get_metainfo(self, index):
if self.indices is None:
return self.owner_dataset.get_metainfo(index)
return self.owner_dataset.get_metainfo(self.indices[index])
|
[
"maojiayuan@gmail.com"
] |
maojiayuan@gmail.com
|
47669b7666a1946891fd5bb9f52e54802d2f537e
|
9efe294abec2371bc938bad3115bc5cf9aa304be
|
/test_green_mask.py
|
6f34d96088b1fd7d06cd51275e1ae151275ec23e
|
[] |
no_license
|
Happyflytoyou/rss_xman
|
ea7f3d4064905aca79e574ec85858ffa4cfb95e6
|
9d52356e2c8b05dc9586fcc7fb5a3d1fae0f2ee0
|
refs/heads/master
| 2020-06-12T10:43:15.068768
| 2019-07-07T14:12:40
| 2019-07-07T14:12:40
| 194,274,610
| 4
| 3
| null | 2019-07-07T14:12:41
| 2019-06-28T13:00:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,832
|
py
|
import torch
from models.Siam_unet import SiamUNet
from models.final_Siam_unet import finalSiamUNet
from torch.autograd import Variable
import utils.dataset as my_dataset
import cv2
import numpy as np
import config.rssia_config as cfg
import preprocessing.transforms as trans
from torch.utils.data import DataLoader
from utils.eval import eval_cal
import gdal
from preprocessing.crop_img import splitimage
from PIL import Image
def prediction(weight):
print("weight")
best_metric = 0
train_transform_det = trans.Compose([
trans.Scale(cfg.TRANSFROM_SCALES),
])
val_transform_det = trans.Compose([
trans.Scale(cfg.TRANSFROM_SCALES),
])
test_transform_det = trans.Compose([
trans.Scale(cfg.TEST_TRANSFROM_SCALES),
])
model = SiamUNet()
# model=torch.nn.DataParallel(model)
if torch.cuda.is_available():
model.cuda()
print('gpu')
# model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
# model.load_state_dict(torch.load(weight))
checkpoint = torch.load(weight)
model.load_state_dict(checkpoint['state_dict'])
test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH, cfg.TEST_LABEL_PATH,cfg.TEST_TXT_PATH, 'val', transform=True, transform_med=test_transform_det)
test_dataloader = DataLoader(test_data, batch_size=cfg.TEST_BATCH_SIZE, shuffle=False, num_workers=8, pin_memory=True)
crop = 0
rows = 12
cols = 12
i = 0
for batch_idx, val_batch in enumerate(test_dataloader):
model.eval()
batch_x1, batch_x2, mask, im_name, h, w = val_batch
print('mask_type{}'.format(mask.type))
with torch.no_grad():
batch_x1,batch_x2=Variable((batch_x1)).cuda(),Variable(((batch_x2))).cuda()
try:
print('try')
output = model(batch_x1, batch_x2)
del batch_x1, batch_x2
except RuntimeError as exception:
if 'out of memory' in str(exception):
print('WARNING: out of memory')
if hasattr(torch.cuda,'empty_cache'):
torch.cuda.empty_cache()
else:
print('exception')
raise exception
# print(output)
output_w, output_h = output.shape[-2:]
output = torch.sigmoid(output).view(output_w, output_h, -1)
# print(output)
output = output.data.cpu().numpy() # .resize([80, 80, 1])
output = np.where(output > cfg.THRESH, 255, 0)
# print(output)
# have no mask so can not eval_cal
# precision,recall,F1=eval_cal(output,mask)
# print('precision:{}\nrecall:{}\nF1:{}'.format(precision,recall,F1))
print(im_name)
im_n=im_name[0].split('/')[1].split('.')[0].split('_')
im__path='final_result/weight50_dmc/mask_2017_2018_960_960_'+im_n[4]+'.tif'
# im__path = 'weitht50_tif.tif'
im_data=np.squeeze(output)
print(im_data.shape)
im_data=np.array([im_data])
print(im_data.shape)
im_geotrans=(0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
im_proj=''
im_width=960
im_height=960
im_bands=1
datatype = gdal.GDT_Byte
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(im__path,im_width, im_height, im_bands, datatype)
if dataset != None:
print("----{}".format(im__path))
dataset.SetGeoTransform(im_geotrans)
dataset.SetProjection(im_proj)
for i in range(im_bands):
dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
del dataset
if __name__ == "__main__":
# weight="model_tif_50.pth"
# weight="weights/model50.pth"
weight="weights/model50.pth"
prediction(weight)
|
[
"1848185889@qq.com"
] |
1848185889@qq.com
|
72406593eb4ad8bee4de31d6537425c61a039f83
|
b6316322bc74bb47c7108051f7a1b5b6343f92b6
|
/ground.py
|
0feabc558f6f181d5d1234dd4c8a93f976aeb5ab
|
[] |
no_license
|
HananeKheirandish/Assignment-13
|
9408a965338e6c201a6b4699e64013b8ee2e4345
|
9ada0212a091ccc2e9eb2199ba4bbee1293ac08e
|
refs/heads/master
| 2023-07-31T21:33:38.937656
| 2021-09-21T21:32:48
| 2021-09-21T21:32:48
| 408,975,958
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
import arcade
class Ground(arcade.Sprite):
def __init__(self, x, y):
super().__init__()
self.texture = arcade.load_texture(':resources:images/tiles/grassMid.png')
self.width = 120
self.height = 135
self.center_x = x
self.center_y = y
class Box(arcade.Sprite):
def __init__(self, x, y):
super().__init__()
self.texture = arcade.load_texture(':resources:images/tiles/grassHalf_mid.png')
self.width = 120
self.height = 135
self.center_x = x
self.center_y = y
|
[
"hananekheirandish1378@gmail.com"
] |
hananekheirandish1378@gmail.com
|
8e5fd91e5309a62076c60a7df6232e48b539fc7a
|
4eb7d655586db7e60e2d54be7dff4a199bb9005c
|
/shopping list.py
|
079d296e5e462d7ef4665143810df0b65fb06882
|
[] |
no_license
|
chaNcharge/Coding
|
c0a5f2b1bf5a38f14763aa5908204572d0ae5853
|
0bb94968efc3a3e75418a6ab6795911e64a83ee3
|
refs/heads/master
| 2021-01-22T07:31:46.490512
| 2018-06-06T01:28:32
| 2018-06-06T01:28:32
| 102,305,539
| 7
| 1
| null | 2017-09-13T01:50:08
| 2017-09-04T01:18:07
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
#!/usr/bin/python
shopping_list = ["banana", "orange", "meme", "meme", "pear", "meme", "apple"]
stock = {
"banana": 6,
"apple": 0,
"orange": 32,
"pear": 15,
"meme": 21
}
prices = {
"banana": 4.99,
"apple": 2.99,
"orange": 1.49,
"pear": 3.99,
"meme": 5.99
}
def compute_bill(food):
total = 0
for i in food:
if stock[i] > 0:
total += prices[i]
stock[i] -= 1
return total
bill = compute_bill(shopping_list)
print bill
|
[
"chancharge@gmail.com"
] |
chancharge@gmail.com
|
f6a2cee4962eb4e0522e2c6b922c4fab40e4ebc2
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tests/layer_tests/common/utils/tf_utils.py
|
fb02c3f0a1b298370faf288e2f248e5dad1da3b0
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 6,923
|
py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import re
import numpy as np
import tensorflow as tf
from openvino.tools.mo.ops.op import PermuteAttrs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def mix_array_with_value(input_array, value):
input_shape = input_array.shape
mask = np.random.randint(0, 2, input_shape).astype(bool)
return np.where(mask, input_array, value)
def load_graph(model_file, output_nodes_for_freeze=None):
is_meta = os.path.splitext(model_file)[-1] == ".meta"
tf.compat.v1.reset_default_graph()
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef() if not is_meta else tf.compat.v1.MetaGraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
nodes_to_clear_device = graph_def.node if isinstance(graph_def, tf.compat.v1.GraphDef) else graph_def.graph_def.node
for node in nodes_to_clear_device:
node.device = ""
if is_meta:
with tf.compat.v1.Session() as sess:
restorer = tf.compat.v1.train.import_meta_graph(graph_def)
restorer.restore(sess, re.sub('\.meta$', '', model_file))
graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, graph_def.graph_def,
output_nodes_for_freeze)
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
def collect_tf_references(model_path, feed_dict, out_layer, output_nodes_for_freeze=None):
_feed_dict = dict()
graph = load_graph(model_path, output_nodes_for_freeze)
output_tensors_list = list()
outputs_list = list()
for input in feed_dict:
input_node = [node for node in graph.as_graph_def().node if node.name == input][0]
if input_node.op == "Placeholder":
tensor = graph.get_tensor_by_name(input + ":0")
_feed_dict[tensor] = feed_dict[input]
else:
for parrent_input in input_node.input:
in_node = [node for node in graph.as_graph_def().node if node.name == parrent_input][0]
if in_node.op in ['Const', 'Assign', 'NoOp', 'Assert']:
continue
else:
tensor = graph.get_tensor_by_name(parrent_input + ":0")
_feed_dict[tensor] = feed_dict[input]
for output in out_layer:
tensor = graph.get_tensor_by_name(output + ":0")
output_tensors_list.append(tensor)
outputs_list.append(output)
with graph.as_default():
with tf.compat.v1.Session(graph=graph) as sess:
outputs = sess.run(output_tensors_list, feed_dict=_feed_dict)
out_dict = dict(zip(outputs_list, outputs))
return out_dict
def children(op, graph):
op = graph.get_operation_by_name(op)
return set(op for out in op.outputs for op in out.consumers())
def collect_control_dependencies(graph):
control_dependents_map = {}
for op in graph.get_operations():
for control_input in op.control_inputs:
if control_input.name not in control_dependents_map:
control_dependents_map[control_input.name] = [op]
else:
control_dependents_map[control_input.name].append(op)
return control_dependents_map
def summarize_graph(model_path, output_nodes_for_freeze=None, reshape_net=None):
placeholders = dict()
variables = list()
outputs = list()
graph = load_graph(model_path, output_nodes_for_freeze)
unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Placeholder', 'Assert', 'switch_t', 'switch_f']
control_dependents_map = collect_control_dependencies(graph)
for node in graph.as_graph_def().node:
if node.op == 'Placeholder':
node_dict = dict()
node_dict['type'] = tf.DType(node.attr['dtype'].type).name
node_dict['shape'] = str(node.attr['shape'].shape.dim).replace('\n', '').replace(' ', '').replace(
'size:', '').replace('[', '').replace(']', '')
node_dict['shape'] = tuple(map(lambda x: int(x) if x else 0, node_dict['shape'].split(',')))
placeholders[node.name] = node_dict
if node.op == "Variable" or node.op == "VariableV2":
variables.append(node.name)
if len(children(node.name, graph)) == 0 and node.name not in control_dependents_map:
if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types:
outputs.append(node.name)
result = dict()
result['inputs'] = placeholders
result['outputs'] = outputs
if reshape_net:
out_layer = list(result['inputs'].keys()) + result['outputs']
feed_dict = {}
for inputl in reshape_net:
feed_dict.update({inputl: np.ones(shape=reshape_net[inputl])})
scoring_res = collect_tf_references(model_path=model_path, feed_dict=feed_dict, out_layer=out_layer)
for layer in scoring_res:
if layer in result['inputs']:
result['inputs'][layer]['shape'] = scoring_res[layer].shape
return result
def permute_nhwc_to_nchw(shape, use_new_frontend=False):
if use_new_frontend:
return shape
perm = PermuteAttrs.get_nhwc_to_nchw_permutation(len(shape)).perm
new_shape = np.array(shape)[perm]
return new_shape
def permute_nchw_to_nhwc(shape, use_new_frontend=False):
if use_new_frontend:
return shape
perm = PermuteAttrs.get_nchw_to_nhwc_permutation(len(shape)).perm
new_shape = np.array(shape)[perm]
return new_shape
def permute_axis(axis, permutation_inv):
return permutation_inv[axis]
def transpose_nchw_to_nhwc(data, use_new_frontend, use_old_api):
if use_new_frontend or not use_old_api:
return data
if len(data.shape) == 4: # reshaping for 4D tensors
return data.transpose(0, 2, 3, 1)
elif len(data.shape) == 5: # reshaping for 5D tensors
return data.transpose(0, 2, 3, 4, 1)
else:
return data
def transpose_nhwc_to_nchw(data, use_new_frontend, use_old_api):
if use_new_frontend or not use_old_api:
return data
if len(data.shape) == 4: # reshaping for 4D tensors
return data.transpose(0, 3, 1, 2) # 2, 0, 1
elif len(data.shape) == 5: # reshaping for 5D tensors
return data.transpose(0, 4, 1, 2, 3) # 3, 0, 1, 2
else:
return data
def save_to_pb(tf_model, path_to_saved_tf_model, model_name = 'model.pb'):
tf.io.write_graph(tf_model, path_to_saved_tf_model, model_name, False)
assert os.path.isfile(os.path.join(path_to_saved_tf_model, model_name)), "model.pb haven't been saved " \
"here: {}".format(path_to_saved_tf_model)
return os.path.join(path_to_saved_tf_model, model_name)
|
[
"noreply@github.com"
] |
openvinotoolkit.noreply@github.com
|
98fc54b6771a94ea39ab4d85d2ce2a00aac5a6fe
|
29061d1ac5ee01d9e2dd73c691bc03d795e0ce7a
|
/2020_21_p2.py
|
5c757a4d3bcfd6ef9b8341a38c01b8186770ebe0
|
[
"MIT"
] |
permissive
|
Dementophobia/advent-of-code-2020
|
af074c8f847095f3d111d656f34aa8c46cb4e18a
|
ee1fb67d4ec55ed082aa7723c79759310925a85a
|
refs/heads/main
| 2023-02-05T19:12:54.844842
| 2020-12-25T10:17:38
| 2020-12-25T10:17:38
| 317,787,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
from aoc import read_file, timer
from re import match, sub
from collections import defaultdict
def analyse_input(raw_input):
allergens_dict = defaultdict(list)
all_ingredients = []
for line in raw_input:
ingredients, allergens = [words.split() for words in match(r"((?:(?:\w+) )+)\(contains ((?:(?:\w+) *)+)\)", sub(",", "", line)).group(1, 2)]
for allergen in allergens:
allergens_dict[allergen].append(set(ingredients))
all_ingredients.extend(ingredients)
return allergens_dict, all_ingredients
def identify_allergen(allergens_in_food):
for allergen, ingredients in allergens_in_food.items():
candidates = ingredients[0].intersection(*ingredients)
if len(candidates) == 1:
return allergen, list(candidates)[0]
def eliminate_combo(allergens_in_food, allergen, ingredient):
del allergens_in_food[allergen]
for allergen in allergens_in_food.keys():
for ingredients in allergens_in_food[allergen]:
ingredients.discard(ingredient)
@timer
def solve():
allergens_in_food, all_ingredients = analyse_input(read_file("21"))
identified_ingredients = []
while len(allergens_in_food):
allergen, ingredient = identify_allergen(allergens_in_food)
eliminate_combo(allergens_in_food, allergen, ingredient)
identified_ingredients.append((allergen, ingredient))
identified_ingredients.sort(key = lambda combo: combo[0])
return ",".join([ingredient[1] for ingredient in identified_ingredients])
result = solve()
print(f"Solution: {result}")
|
[
"34186142+Dementophobia@users.noreply.github.com"
] |
34186142+Dementophobia@users.noreply.github.com
|
c812cf4e30c6d1d51291e9295e325acdf22fc658
|
c705b2620119df0d60e925e55228bfbb5de3f568
|
/archives/learning/security/otp.py
|
f2cd3498bc6d6c0e3c11c542444a6afe0249e1a0
|
[
"Apache-2.0"
] |
permissive
|
mcxiaoke/python-labs
|
5aa63ce90de5da56d59ca2954f6b3aeae7833559
|
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
|
refs/heads/master
| 2021-08-05T03:47:51.844979
| 2021-07-24T11:06:13
| 2021-07-24T11:06:13
| 21,690,171
| 7
| 7
|
Apache-2.0
| 2020-08-07T01:52:32
| 2014-07-10T10:20:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,777
|
py
|
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
otpauth
~~~~~~~
Implements two-step verification of HOTP/TOTP.
:copyright: (c) 2013 - 2014 by Hsiaoming Yang.
:license: BSD, see LICENSE for more details.
"""
import base64
import hashlib
import hmac
import struct
import sys
import time
import warnings
if sys.version_info[0] == 3:
python_version = 3
string_type = str
else:
python_version = 2
string_type = unicode
range = xrange
class OTPAuth(object):
"""One Time Password Authentication.
:param secret: A secret token for the authentication.
"""
def __init__(self, secret):
self.secret = secret
def hotp(self, counter=4):
"""Generate a HOTP code.
:param counter: HOTP is a counter based algorithm.
"""
return generate_hotp(self.secret, counter)
def totp(self, period=30):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param period: A period that a TOTP code is valid in seconds
"""
return generate_totp(self.secret, period)
def valid_hotp(self, code, last=0, trials=100):
"""Valid a HOTP code.
:param code: A number that is less than 6 characters.
:param last: Guess HOTP code from last + 1 range.
:param trials: Guest HOTP code end at last + trials + 1.
"""
if not valid_code(code):
return False
code = int(code)
for i in range(last + 1, last + trials + 1):
if self.hotp(counter=i) == code:
return i
return False
def valid_totp(self, code, period=30):
"""Valid a TOTP code.
:param code: A number that is less than 6 characters.
:param period: A period that a TOTP code is valid in seconds
"""
return valid_code(code) and self.totp(period) == int(code)
def to_uri(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string.
:param type: Algorithm type, hotp or totp.
:param label: Label of the identifier.
:param issuer: The company, the organization or something else.
:param counter: Counter of the HOTP algorithm.
"""
type = type.lower()
if type not in ('hotp', 'totp'):
raise ValueError('type must be hotp or totp')
if type == 'hotp' and not counter:
raise ValueError('HOTP type authentication need counter')
secret = base64.b32encode(to_bytes(self.secret))
# bytes to string
secret = secret.decode('utf-8')
# remove pad string
secret = secret.strip('=')
# https://code.google.com/p/google-authenticator/wiki/KeyUriFormat
url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s'
'&issuer=%(issuer)s')
dct = dict(
type=type, label=label, issuer=issuer,
secret=secret, counter=counter
)
ret = url % dct
if type == 'hotp':
ret = '%s&counter=%s' % (ret, counter)
return ret
def to_google(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
"""
warnings.warn('deprecated, use to_uri instead', DeprecationWarning)
return self.to_uri(type, label, issuer, counter)
def generate_hotp(secret, counter=4):
"""Generate a HOTP code.
:param secret: A secret token for the authentication.
:param counter: HOTP is a counter based algorithm.
"""
# https://tools.ietf.org/html/rfc4226
msg = struct.pack('>Q', counter)
digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest()
ob = digest[19]
if python_version == 2:
ob = ord(ob)
pos = ob & 15
base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff
token = base % 1000000
return token
def generate_totp(secret, period=30):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param secret: A secret token for the authentication.
:param period: A period that a TOTP code is valid in seconds
"""
counter = int(time.time()) // period
return generate_hotp(secret, counter)
def to_bytes(text):
if isinstance(text, string_type):
# Python3 str -> bytes
# Python2 unicode -> str
text = text.encode('utf-8')
return text
def valid_code(code):
code = string_type(code)
return code.isdigit() and len(code) <= 6
if __name__ == '__main__':
gotp=OTPAuth('xjom6zpducm4mltk5stxcogv3wcvq7do')
print gotp.totp()
dotp=OTPAuth('PBFCKI5CSTEGFKDV4RHCLFZSCU')
print dotp.totp()
|
[
"mcxiaoke@gmail.com"
] |
mcxiaoke@gmail.com
|
acb759430a9f83b7215bb9aef54d72993c9d46b7
|
3ef8175776af868486c9f70f2846f32d36d21fe1
|
/PalindromePartitioning.py
|
6c5a0c324609b7dd866a89bb85a0de1f44a3e5e3
|
[] |
no_license
|
youngyuan/Leetcodes
|
495d3c355c24294024cc4a6caaf7bf2a4f11649f
|
4464e40dc78b3529226e98996d4da838828fb3a5
|
refs/heads/master
| 2021-01-22T23:58:19.887689
| 2015-09-18T15:07:51
| 2015-09-18T15:07:51
| 41,552,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
class Solution(object):
def isPalindrome(self, s, start, end):
i = start
j = end
while i < j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
def dfs(self, s, start, end, res, path):
if start > end:
res.append(path)
return
#i is the first substring length
for i in range(start + 1, end + 2):
if self.isPalindrome(s, start, i - 1):
self.dfs(s, i, end, res, path + [s[start:i]])
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
res = []
self.dfs(s, 0, len(s) - 1, res, [])
return res
s = Solution()
print(s.partition("aab"))
|
[
"youngyuan00@gmail.com"
] |
youngyuan00@gmail.com
|
1e91ab4820bab344b781fa5ffd2176a03f1b30c7
|
889ef6cd7328a08d3b12518b18c6f41c40702d32
|
/main.py
|
5bc7067e0d61456139ec529229f634e85df2253f
|
[] |
no_license
|
Yetinator/BicycleRaceSplits
|
ad6c8e91f024a3dca247d56ae667b4aeed2f4d39
|
f26f563ca44a4a471cf1c4592f2d1ec0c52bde7e
|
refs/heads/master
| 2020-04-09T00:44:38.392479
| 2018-12-19T21:43:03
| 2018-12-19T21:43:03
| 159,879,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from tkinter import *
import tkinter.filedialog
import time
from array import *
from writeClass import *
from windowClass import AbstractWindow
from windowClass import MyWindow
from stopWatchClass import SwissWatch
tempWindow = MyWindow()
mainloop()
|
[
"Helmethair@zoho.com"
] |
Helmethair@zoho.com
|
4210d0e4b8cd675a18ae6f39d33ccab6ad71f1fe
|
213f71cd752b20327e175df8f9ad6fdb7fcf596b
|
/blog/migrations/0003_auto_20190509_1119.py
|
23995561e18aa63ebffeb36b10162e77a7080a7f
|
[] |
no_license
|
jnsun/mysite
|
94fd5c7591996d5789427b6b43f8fc546050f6da
|
3ca839d4cd4701f8f2dffc7b161e4a311d53ee31
|
refs/heads/master
| 2020-05-20T03:27:56.666352
| 2019-05-09T06:52:48
| 2019-05-09T06:52:48
| 177,090,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 2.2.1 on 2019-05-09 03:19
from django.db import migrations
import mdeditor.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20190507_1044'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='content',
field=mdeditor.fields.MDTextField(),
),
]
|
[
"jnsun@qq.com"
] |
jnsun@qq.com
|
fcea36acdb8c99a405600822c08ef350ce5e9ff8
|
3427605c7877d1e9899a629160355f8b653847a9
|
/migrations/0001_initial.py
|
3743e4cbbfc0bdaf00ada9ff3596db4d67f1fce9
|
[] |
no_license
|
Pranathi-Paruchuri/E-Commerce
|
d40d97fa6ac37a8f9c7b1066a918aa736dea1045
|
565ff974023678d1b08e07d9c14ddf076aeef921
|
refs/heads/master
| 2022-11-29T12:52:46.991256
| 2020-07-27T18:31:23
| 2020-07-27T18:31:23
| 282,981,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,415
|
py
|
# Generated by Django 3.0.8 on 2020-07-20 11:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_ordered', models.DateTimeField(auto_now_add=True)),
('complete', models.BooleanField(default=False)),
('transaction_id', models.CharField(max_length=100, null=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Customer')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('price', models.FloatField()),
('digital', models.BooleanField(blank=True, default=False, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
],
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('zipcode', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Customer')),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Order')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(blank=True, default=0, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Order')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.Product')),
],
),
]
|
[
"noreply@github.com"
] |
Pranathi-Paruchuri.noreply@github.com
|
bb65ecfb10019fd65397574241138df4aa66a8fb
|
9789aaa94e4a321fed2a1f624ef180d938f1fe56
|
/src/entry.py
|
a751c34c3bcfab03051a7dce97a1e000465541e1
|
[] |
no_license
|
fantascy/snsanalytics
|
61ff6b8f384f0bd4be8f89a2a19101ad2cf1bc77
|
927f186c7f5a1d534e0ff7ce7aff46a0c1a36c51
|
refs/heads/master
| 2021-01-13T14:18:05.684839
| 2016-11-06T07:43:35
| 2016-11-06T07:43:35
| 72,827,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
from django.views.generic.simple import direct_to_template, redirect_to
from django.views.defaults import page_not_found as django_page_not_found
import deploysoup
import context
import settings
from sns.dashboard.views import home as sns_home
from sns.chan.views import twitter_callback as new_twitter_callback
from sns.chan.views import facebook_callback as new_facebook_callback
from sns.url.views import redirect as new_redirect
from msb.dashboard.views import home as msb_home
from fe.dashboard.views import home as fe_home
from soup.dashboard.views import home as soup_home
from cake.dashboard.views import home as cake_home
from soup.user.views import twitter_callback as soup_twitter_callback
_DASHBOARD_MAP = {
"sns" : sns_home,
"msb" : msb_home,
"fe" : fe_home,
"soup" : soup_home,
"cake" : cake_home,
"appspot" : sns_home,
}
def home(request):
return _DASHBOARD_MAP[context.get_context().app()](request)
def twitter_callback(request):
if context.get_context().app() == deploysoup.APP :
return soup_twitter_callback(request)
else:
return new_twitter_callback(request)
def facebook_callback(request):
return new_facebook_callback(request)
def redirect(request, urlHash):
return new_redirect(request, urlHash)
def page_not_found(request):
context.get_context().set_login_required(False)
return django_page_not_found(request, template_name=("%s/404.html" % context.get_context().app()))
def favicon(request):
context.get_context().set_login_required(False)
return redirect_to(request, url=("%s%s/images/favicon.ico" % (settings.MEDIA_URL, context.get_context().app())))
def robot_txt(request):
context.get_context().set_login_required(False)
return direct_to_template(request, "%s/robots.txt" % context.get_context().app())
|
[
"cong@snsanalytics.com"
] |
cong@snsanalytics.com
|
49e2dcdaf2dd5f03916376f133577cc6a1c46e39
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/units/quantity.py
|
b315485d32611402a5243f5f34a2aac4c8397ccb
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 69,779
|
py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitConversionError, UnitTypeError)
from .utils import is_effectively_unity
from .format.latex import Latex
from astropy.utils.compat import NUMPY_LT_1_17
from astropy.utils.compat.misc import override__dir__
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable
from astropy.utils.data_info import ParentDtypeInfo
from astropy import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
from .quantity_helper.function_helpers import (
SUBCLASS_SAFE_FUNCTIONS, FUNCTION_HELPERS, DISPATCHED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f'{val.value}'
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Quantity (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: http://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `Quantity` object (sequence), str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See http://docs.astropy.org/en/latest/units/
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None:
if not copy:
return value
if value.dtype.kind in 'iu':
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{}" as a {}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (isiterable(value) and len(value) > 0 and
all(isinstance(v, Quantity) for v in value)):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=False, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(0), numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and value.dtype.kind in 'iuO':
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more, since we require '
'numpy >=1.16. Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, 'value', input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : `~numpy.ndarray` or tuple of `~numpy.ndarray`
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : `UnitBase`, or anything convertible to a :class:`~astropy.units.Unit`, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : Quantity subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, UnitBase):
raise UnitTypeError(
"{} instances require {} units, not {} instances."
.format(type(self).__name__, UnitBase, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
def to(self, unit, equivalencies=[]):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
return self._new_view(self._to_value(unit, equivalencies), unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If not provided or
``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : `~numpy.ndarray` or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
else:
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
# Index with empty tuple to decay array scalars in to numpy scalars.
return value[()]
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
"'{}' object has no '{}' member".format(
self.__class__.__name__,
attr))
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
"{} instance has no attribute '{}'".format(
self.__class__.__name__, attr))
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting. On the other
# hand, for structured arrays, the ufunc does not work, so we do use
# __eq__ and live with the warnings.
def __eq__(self, other):
try:
if self.dtype.kind == 'V':
return super().__eq__(other)
else:
return np.equal(self, other)
except UnitsError:
return False
except TypeError:
return NotImplemented
def __ne__(self, other):
try:
if self.dtype.kind == 'V':
return super().__ne__(other)
else:
return np.not_equal(self, other)
except UnitsError:
return True
except TypeError:
return NotImplemented
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
try:
factor = self.unit._to(other)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] *= factor
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __div__(self, other):
""" Division between `Quantity` objects. """
return self.__truediv__(other)
def __idiv__(self, other):
""" Division between `Quantity` objects. """
return self.__itruediv__(other)
def __rdiv__(self, other):
""" Division between `Quantity` objects. """
return self.__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : numeric, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
subfmt : str, optional
Subformat of the result. For the moment,
only used for format="latex". Supported values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
lstr
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
return f'{self.value}{self._unitstr:s}'
# else, for the moment we assume format="latex"
# need to do try/finally because "threshold" cannot be overridden
# with array2string
pops = np.get_printoptions()
format_spec = '.{}g'.format(
precision if precision is not None else pops['precision'])
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({}{}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
try:
formatter = {'float_kind': float_formatter,
'complex_kind': complex_formatter}
if conf.latex_array_threshold > -1:
np.set_printoptions(threshold=conf.latex_array_threshold,
formatter=formatter)
# the view is needed for the scalar case - value might be float
latex_value = np.array2string(
self.view(np.ndarray),
max_line_width=np.inf, separator=',~')
latex_value = latex_value.replace('...', r'\dots')
finally:
np.set_printoptions(**pops)
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
delimiter_left, delimiter_right = formats[format][subfmt]
return r'{left}{0} \; {1}{right}'.format(latex_value, latex_unit,
left=delimiter_left,
right=delimiter_right)
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=prefixstr)
return f'{prefixstr}{arrstr}{self._unitstr:s}>'
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format='latex', subfmt='inline')
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format(f"{value}{self._unitstr:s}",
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.list()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity, so let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(self.unit)
except TypeError:
# Could not make a Quantity. Maybe masked printing?
# Note: masked quantities do not work very well, but no reason
# to break even repr and str.
if (value is np.ma.masked_print_option and
self.dtype.kind == 'O'):
return value
else:
raise
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(as_quantity.value)):
_value = as_quantity.value
else:
raise
if check_precision:
# If, e.g., we are casting double to float, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype)
if not np.all(np.logical_or(self_dtype_array == _value,
np.isnan(_value))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode='raise'):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn("function '{}' is not known to astropy's Quantity. "
"Will run it anyway, hoping it will treat ndarray "
"subclasses correctly. Please raise an issue at "
"https://github.com/astropy/astropy/issues. "
.format(function.__name__), AstropyWarning)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Quantity)
for t in types):
raise TypeError("the Quantity implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
if NUMPY_LT_1_17:
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof)
def mean(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.mean, axis, dtype, out=out)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitalized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
f", so cannot set it to '{unit}'."))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`.
"""
return np.isclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`.
"""
return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'desired' ({}) and 'actual' ({}) "
"are not convertible"
.format(desired.unit, actual.unit))
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'atol' ({}) and 'actual' ({}) "
"are not convertible"
.format(atol.unit, actual.unit))
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
[
"nicolas.holzschuch@inria.fr"
] |
nicolas.holzschuch@inria.fr
|
ff4f676fa354d855a5e6ad5e934dd37f8761e8d9
|
20c71b8e74506e569426c29645d708015fecca4b
|
/main/api.py
|
352a92724e2bea2a9deb0889d8be6f8a9c7bc568
|
[] |
no_license
|
ruchej/guestbook
|
5468e5b57a9732956253340efe13ffac76965f4d
|
e1cd1d358aa4ee01fb36519aee81ad50ffdf031c
|
refs/heads/master
| 2022-12-12T06:39:40.997569
| 2020-09-21T06:43:43
| 2020-09-21T06:43:43
| 296,591,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
from rest_framework import generics
from main.models import GuestResponse
from .serializers import GuestResponseSerializer
class ListGR(generics.ListAPIView):
queryset = GuestResponse.objects.all()
serializer_class = GuestResponseSerializer
class CreateGR(generics.CreateAPIView):
serializer_class = GuestResponseSerializer
|
[
"lvar-8@ya.ru"
] |
lvar-8@ya.ru
|
1082a5fed48e307852e68329f09867d9e725f342
|
af35bc4b716b99cc2995a224c163ac36c438792c
|
/for/prime_numbers.py
|
24a685f272009500a070a1ae008e4d29b6c531fa
|
[] |
no_license
|
Kris2209/python_practice
|
795cca381132751a3ecf2ff49069043dae7fdb32
|
de3a73b1f88dcce2ad51a6a172e92652350b2842
|
refs/heads/main
| 2023-08-11T07:31:33.961991
| 2021-09-14T14:34:07
| 2021-09-14T14:34:07
| 398,629,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
# Напишите программу, которая считает количество простых чисел в заданной последовательности и выводит ответ на экран.
def is_prime(n):
sqrt = n ** 0.5
stop = int(sqrt + (sqrt % 1 > 0))
for k in range(2, stop + 1):
if (n % k == 0):
return False
return True
n = int(input('Сколько чисел будем проверять? '))
count = 0
for k in range(n):
number = int( input(f'Введите { k + 1 } -е число: ') )
if (is_prime(number)): count += 1
print()
print('В вашей последовательности ', count, ' простых чисел')
|
[
"yakovenko.k1997@gmail.com"
] |
yakovenko.k1997@gmail.com
|
a1c9d9f338a1b479bd1b42a0f0397121e24c9b17
|
30b2eb381ec8f3225671274e77a55b63206dfb60
|
/leetcode/p0912/merge_sort.py
|
4e7fa636f095e20a1cd252558c4dd90128788689
|
[] |
no_license
|
b1ueskydragon/PythonGround
|
52888f32336e5e20be8490454beb67e676be7057
|
5a401250e88926235f581e6c004d1a4acb44230d
|
refs/heads/master
| 2021-07-10T03:00:38.340959
| 2021-04-02T03:34:29
| 2021-04-02T03:34:29
| 98,208,402
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
class Solution:
def sortArray(self, nums: [int]) -> [int]:
def divide(xs):
if len(xs) < 2:
return xs
mid = len(xs) // 2
left = divide(xs[:mid])
right = divide(xs[mid:])
return conquer(left, right)
def conquer(l1, l2):
# l1, l2 = sorted list
merged = []
while l1 and l2:
if l1[0] < l2[0]:
merged.append(l1.pop(0))
else:
merged.append(l2.pop(0))
merged += l1
merged += l2
return merged
return divide(nums)
|
[
"dragoalie@gmail.com"
] |
dragoalie@gmail.com
|
6148052c0e616349dbab22d39e1b93729ce4070f
|
c79b32f270cf5051ab0488528eb1b1b05b674b06
|
/lab8/api/models.py
|
eef069428d4c023849a152ac9cbe1f4016df31e7
|
[] |
no_license
|
AzhrAkhmtkn/WebDevelopment
|
a977b1b63a6ee521818a6e79f88574ce2d7a1d6b
|
028c75c78416547d475c8f7da7d8bcb2ed2d9d12
|
refs/heads/main
| 2023-04-16T23:57:56.853946
| 2021-04-14T16:42:48
| 2021-04-14T16:42:48
| 337,195,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=300)
def to_json(self):
return {
'id': self.id,
'name': self.name
}
class Product(models.Model):
name = models.CharField(max_length=300)
price = models.FloatField(default= 0)
description = models.TextField(max_length=300)
count = models.IntegerField(default= 0)
is_active = models.BooleanField()
def to_json(self):
return {
'id': self.id,
'name': self.name,
'price': self.price,
'description': self.description,
'count': self.count,
'is_active': self.is_active
}
|
[
"a.bolatovna2001@gmail.com"
] |
a.bolatovna2001@gmail.com
|
1d2877dd8825d0b1fb80d7ab2ab259963689f282
|
11060ca244940baef96a51d794d73aab44fc31c6
|
/src/brainstorming/tornado/restmongo/rest/handler.py
|
326a05674ace5ea711685b23c4d878747376ff37
|
[] |
no_license
|
D3f0/txscada
|
eb54072b7311068a181c05a03076a0b835bb0fe1
|
f8e1fd067a1d001006163e8c3316029f37af139c
|
refs/heads/master
| 2020-12-24T06:27:17.042056
| 2016-07-27T17:17:56
| 2016-07-27T17:17:56
| 3,565,335
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,926
|
py
|
from tornado.web import RequestHandler, HTTPError
from rest.emitters import Emitter
import httplib as http
class RESTHandler(RequestHandler):
MAPPED_METHODS = { 'GET': 'retrieve', 'POST': 'create', 'PUT': 'update', 'DELETE': 'delete' }
def __init__(self, *args, **kwargs):
emitter_format = kwargs.pop('emitter_format', 'json')
super(RESTHandler, self).__init__(*args, **kwargs)
self.emitter_class, ct = Emitter.get(emitter_format)
self.set_header('Content-Type', ct)
def retrieve(self, *args, **kwargs):
raise HTTPError(http.METHOD_NOT_ALLOWED)
def create(self, *args, **kwargs):
raise HTTPError(http.METHOD_NOT_ALLOWED)
def update(self, *args, **kwargs):
raise HTTPError(http.METHOD_NOT_ALLOWED)
def delete(self, *args, **kwargs):
raise HTTPError(http.METHOD_NOT_ALLOWED)
def write(self, chunk):
emitter = self.emitter_class(chunk)
super(RESTHandler, self).write(emitter.render(self.request))
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
method = self.request.method
try:
if method not in self.MAPPED_METHODS.keys():
raise HTTPError(http.METHOD_NOT_ALLOWED)
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if method == "POST" and self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
self.prepare()
if not self._finished:
function = getattr(self, self.MAPPED_METHODS[method])
function(*args, **kwargs)
if self._auto_finish and not self._finished:
self.finish()
except Exception, e:
self._handle_request_exception(e)
|
[
"devnull@localhost"
] |
devnull@localhost
|
04db73cf6465fb41c21a66c052f385d4b408274e
|
8e39a4f4ae1e8e88d3b2d731059689ad5b201a56
|
/x11-libs/gdk-pixbuf/gdk-pixbuf-2.31.7.py
|
675fbd31c023b5284965677d53ef86aacdbfd165
|
[] |
no_license
|
wdysln/new
|
d5f5193f81a1827769085932ab7327bb10ef648e
|
b643824b26148e71859a1afe4518fe05a79d333c
|
refs/heads/master
| 2020-05-31T00:12:05.114056
| 2016-01-04T11:38:40
| 2016-01-04T11:38:40
| 37,287,357
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
metadata = """
summary @ An image loading library for GTK+ V2
homepage @ http://www.gtk.org/
license @ GPL2
src_url @ ftp://ftp.gnome.org/pub/gnome/sources/gdk-pixbuf/2.31/gdk-pixbuf-$version.tar.xz
arch @ ~x86_64
options @ debug introspection X jpeg tiff
"""
depends = """
runtime @ sys-libs/glib media-libs/libpng x11-libs/libX11 media-libs/tiff media-libs/jpeg
build @ dev-util/pkg-config sys-devel/gettext media-libs/jasper
"""
def configure():
export("HOME", build_dir)
conf("--disable-static \
--disable-silent-rules \
--with-libjasper \
--with-x11 \
--with-included-loaders=png")
def build():
export("HOME", build_dir)
make()
def install():
export("HOME", build_dir)
raw_install("DESTDIR=%s" % install_dir)
insdoc("COPYING", "AUTHORS")
def post_install():
if not system("/usr/bin/gdk-pixbuf-query-loaders --update-cache"):
raise BuildError
|
[
"zirkovandersen@gmail.com"
] |
zirkovandersen@gmail.com
|
a9d8b7ed7b8a9422898cab5220c4a7fcf726652a
|
74f6c5b2fbc1c868c7fabda7486eabfdb4466f1b
|
/5.py
|
dd03195d440cc3d05f2979a902950e0f68dc5b20
|
[] |
no_license
|
jcstoltzfus/project-euler
|
0eec3e3275f1d4d0a2cca2cc1df7426fcac0a89d
|
50439a22d65eb029794c25346dc62702558728fb
|
refs/heads/master
| 2020-03-30T11:19:52.795796
| 2018-10-01T22:33:56
| 2018-10-01T22:33:56
| 151,167,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
bol = False
num = 1
while(not bol):
isTrue = True
for i in range(1,21):
if(not (num % i == 0)):
isTrue = False
break
if(isTrue):
bol = True
else: num += 1
print num
|
[
"stoltzfus.eelman@gmail.com"
] |
stoltzfus.eelman@gmail.com
|
b6275b3b63ff9b7896792ace7da39bcb430ca2a8
|
0060b1d9ea546854e9f5d40aa39ec8e4ec4b774f
|
/mobility/parsing.py
|
111fb27da9461c99e5573019398e9c6160c45a8b
|
[] |
no_license
|
VinayVPai/covid19-analysis
|
fe766683ced91ac20d99ce2a484b4ba2086e628f
|
ba73cf2766057a3480ab17308a4481a68395d2be
|
refs/heads/master
| 2022-04-15T08:37:09.496150
| 2020-04-13T05:04:05
| 2020-04-13T05:04:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,316
|
py
|
from collections import defaultdict
import glob
import os
import sys
import logging
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from geoIds import GEO_IDS
# PyMuPDF
import fitz
def parse_stream(stream):
data_raw = []
data_transformed = []
rotparams = None
npatches = 0
for line in stream.splitlines():
if line.endswith(" cm"):
# page 146 of https://www.adobe.com/content/dam/acom/en/devnet/pdf/pdfs/pdf_reference_archives/PDFReference.pdf
rotparams = list(map(float, line.split()[:-1]))
elif line.endswith(" l"):
x,y = list(map(float, line.split()[:2]))
a,b,c,d,e,f = rotparams
xp = a*x+c*y+e
yp = b*x+d*y+f
data_transformed.append([xp,yp])
data_raw.append([x,y])
elif line.endswith(" m"):
npatches += 1
else:
pass
data_raw = np.array(data_raw)
basex, basey = data_raw[-1]
good = False
if basex == 0.:
data_raw[:,1] = basey - data_raw[:,1]
data_raw[:,1] *= 100/60.
data_raw = data_raw[data_raw[:,1]!=0.]
if npatches == 1: good = True
return dict(data=np.array(data_raw), npatches=npatches, good=good)
def parse_page(doc, ipage, verbose=False):
categories = [
"Retail & recreation",
"Grocery & pharmacy",
"Parks",
"Transit stations",
"Workplace",
"Residential",
]
counties = []
curr_county = None
curr_category = None
data = defaultdict(lambda: defaultdict(list))
pagetext = doc.getPageText(ipage)
lines = pagetext.splitlines()
tickdates = list(filter(lambda x:len(x.split())==3, set(lines[-10:])))
for line in lines:
# don't need these lines at all
if ("* Not enough data") in line: continue
if ("needs a significant volume of data") in line: continue
# if we encountered a category, add to dict, otherwise
# push all seen lines into the existing dict entry
if any(line.startswith(c) for c in categories):
curr_category = line
elif curr_category:
data[curr_county][curr_category].append(line)
# If it doesn't match anything, then it's a county name
if (all(c not in line for c in categories)
and ("compared to baseline" not in line)
and ("Not enough data" not in line)
and ('Mobility trends ' not in line)
):
# saw both counties already
if len(data.keys()) == 2: break
counties.append(line)
curr_county = line
newdata = {}
for county in data:
newdata[county] = {}
for category in data[county]:
# if the category text ends with a space, then there was a star/asterisk there
# indicating lack of data. we skip these.
if category.endswith(" "): continue
temp = [x for x in data[county][category] if "compared to baseline" in x]
if not temp: continue
percent = int(temp[0].split()[0].replace("%",""))
newdata[county][category.strip()] = percent
data = newdata
tomatch = []
for county in counties:
for category in categories:
if category in data[county]:
tomatch.append([county,category,data[county][category]])
if verbose:
logging.debug(len(tomatch))
logging.debug(data)
goodplots = []
xrefs = sorted(doc.getPageXObjectList(ipage), key=lambda x:int(x[1].replace("X","")))
for _, xref in enumerate(xrefs):
stream = doc.xrefStream(xref[0]).decode()
info = parse_stream(stream)
if not info["good"]: continue
goodplots.append(info)
if verbose:
logging.debug(len(goodplots))
ret = []
if len(tomatch) != len(goodplots):
return ret
for m,g in zip(tomatch,goodplots):
xs = g["data"][:,0]
ys = g["data"][:,1]
maxys = ys[np.where(xs==xs.max())[0]]
maxy = maxys[np.argmax(np.abs(maxys))]
# parsed the tick date labels as text. find the min/max (first/last)
# and make evenly spaced dates, one per day, to assign to x values between
# 0 and 200 (the width of the plots).
ts = list(map(lambda x: pd.Timestamp(x.split(None,1)[-1] + ", 2020"), tickdates))
low, high = min(ts), max(ts)
dr = list(map(lambda x:str(x).split()[0], pd.date_range(low, high, freq="D")))
lutpairs = list(zip(np.linspace(0,200,len(dr)),dr))
dates = []
values = []
asort = xs.argsort()
xs = xs[asort]
ys = ys[asort]
for x,y in zip(xs,ys):
date = min(lutpairs, key=lambda v:abs(v[0]-x))[1]
dates.append(date)
values.append(round(y,3))
ret.append(dict(
county=m[0],category=m[1],change=m[2],
values=values,
dates=dates,
changecalc=maxy,
))
return ret
def parse_page_total(doc, ipage, verbose=False):
"""
First two pages
"""
categories = [
"Retail & recreation",
"Grocery & pharmacy",
"Parks",
"Transit stations",
"Workplaces", # note the s at the end
"Residential",
]
curr_category = None
data = defaultdict(lambda: defaultdict(list))
pagetext = doc.getPageText(ipage)
lines = pagetext.splitlines()
# tickdates = list(filter(lambda x:len(x.split())==3, set(lines[-10:])))
tickdates = []
for line in lines:
# don't need these lines at all
if ("* Not enough data") in line: continue
if ("needs a significant volume of data") in line: continue
if 'Mobility trends ' in line or 'hubs' in line: continue
# if pred_is_county_name and
# if we encountered a category, add to dict, otherwise
# push all seen lines into the existing dict entry
if any(line.startswith(c) for c in categories):
curr_category = line
elif line[:3] in ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'):
tickdates.append(line)
elif line[0] not in ('+', '-'):
continue
elif curr_category:
data[curr_category] = data.get(curr_category, []) + [line]
newdata = {}
for category in data:
# if the category text ends with a space, then there was a star/asterisk there
# indicating lack of data. we skip these.
if category.endswith(" "): continue
temp = data[category][0]
percent = int(temp.split()[0].replace("%",""))
newdata[category.strip()] = percent
data = newdata
tomatch = []
for category in categories:
if category in data:
tomatch.append([category,data[category]])
if verbose:
logging.debug(len(tomatch))
logging.debug(data)
goodplots = []
xrefs = sorted(doc.getPageXObjectList(ipage), key=lambda x:int(x[1].replace("X","")))
for _, xref in enumerate(xrefs):
stream = doc.xrefStream(xref[0]).decode()
info = parse_stream(stream)
if not info["good"]:
logging.warning('Bad info, skipping')
continue
goodplots.append(info)
if verbose:
logging.debug(len(goodplots))
ret = []
if len(tomatch) != len(goodplots):
return ret
for m,g in zip(tomatch,goodplots):
xs = g["data"][:,0]
ys = g["data"][:,1]
maxys = ys[np.where(xs==xs.max())[0]]
maxy = maxys[np.argmax(np.abs(maxys))]
# parsed the tick date labels as text. find the min/max (first/last)
# and make evenly spaced dates, one per day, to assign to x values between
# 0 and 200 (the width of the plots).
ts = list(map(lambda x: pd.Timestamp(x.split(None,1)[-1] + ", 2020"), tickdates))
low, high = min(ts), max(ts)
dr = list(map(lambda x:str(x).split()[0], pd.date_range(low, high, freq="D")))
lutpairs = list(zip(np.linspace(0,200,len(dr)),dr))
dates = []
values = []
asort = xs.argsort()
xs = xs[asort]
ys = ys[asort]
for x,y in zip(xs,ys):
date = min(lutpairs, key=lambda v:abs(v[0]-x))[1]
dates.append(date)
values.append(round(y,3))
ret.append(dict(
category=m[0],change=m[1],
values=values,
dates=dates,
changecalc=maxy,
))
return ret
def build_pdf_path(state, us, date):
if us:
return f"us_pdfs/{date}/{date}_US_{state}_Mobility_Report_en.pdf"
else:
return f"pdfs/{date}/{date}_{state}_Mobility_Report_en.pdf"
def parse_state(state, us, date):
pdfpath = build_pdf_path(state, us, date)
logging.info(f"Parsing pages 2+ for state {state} : ", pdfpath)
doc = fitz.Document(pdfpath)
data = []
for i in range(2, doc.pageCount-1):
for entry in parse_page(doc, i):
entry["state"] = state
entry["page"] = i
data.append(entry)
df = pd.DataFrame(data)
try:
ncounties = df['county'].nunique()
except KeyError:
ncounties = 0
logging.info(f"Parsed {len(df)} plots for {ncounties} counties in {state}")
# try:
# return df[["state","county","category","change","changecalc","dates", "values","page"]]
# except KeyError:
# # in this case, df is empty
# return df[["state", "category", "change", "changecalc", "dates", "values", "page"]]
return df
def parse_state_total(state, us, date):
"""
First two pages
"""
pdfpath = build_pdf_path(state, us, date)
logging.info(f"Parsing two first pages of state {state}: ", pdfpath)
doc = fitz.Document(pdfpath)
data = []
for i in range(2):
for entry in parse_page_total(doc, i):
entry['state'] = state
entry['page'] = i
entry['county'] = 'total'
data.append(entry)
df = pd.DataFrame(data)
return df
def parse_all(date, us=False):
pdfglob = glob.glob(f"us_pdfs/{date}/*.pdf") if us else glob.glob(f"pdfs/{date}/*.pdf")
if us:
states = [x.split("_US_",1)[1].split("_Mobility",1)[0] for x in pdfglob]
else:
states = [x.split("_")[1] for x in pdfglob]
dfs = []
for state in tqdm(states):
try:
state_counties = parse_state(state, us=us, date=date)
except (KeyError, IndexError) as e:
logging.warning(str(e))
state_counties = pd.DataFrame()
state = parse_state_total(state, us=us, date=date)
dfs += [state, state_counties]
df = pd.concat(dfs).reset_index(drop=True)
data = []
for _, row in tqdm(df.iterrows(), total=df.shape[0]):
# do a little clean up and unstack the dates/values as separate rows
dorig = dict()
dorig["state"] = row["state"].replace("_"," ")
dorig["county"] = row["county"]
dorig["category"] = row["category"].replace(" & ","/").replace(" ","_").lower()
dorig["page"] = row["page"]
dorig["change"] = row["change"]
dorig["changecalc"] = row["changecalc"]
for x,y in zip(row["dates"], row["values"]):
d = dorig.copy()
d["date"] = x
d["value"] = y
data.append(d)
df = pd.DataFrame(data)
df = (df.assign(value=lambda f: f['value'] * (f['change'] / f['changecalc']))
.replace("workplaces", 'workplace')
.drop('changecalc', axis=1))
if not us:
df = (df.rename({'state': 'country_geoid',
'county': 'region'}, axis=1)
.assign(country=lambda f: f['country_geoid'].map(GEO_IDS)))
return df
if __name__ == "__main__":
dates = ['2020-03-29', '2020-04-05']
us = len(sys.argv) > 1 and sys.argv[1].lower() == 'us'
for date in dates:
filename = f'{date}_us' if us else f'{date}_world'
df = parse_all(date, us=us)
df.to_json(f'../dist/static/mobility/{filename}.json.gz', orient='records', indent=2)
df.to_csv(f'../dist/static/mobility/{filename}.csv.gz', index=False)
|
[
"horace.guy.k@gmail.com"
] |
horace.guy.k@gmail.com
|
6bf29746917b0fca38a29cf1bca79320d73ad8f1
|
d7a4701e18be0f38820f5c15d80099fda6385f9f
|
/ABC185/B.py
|
369adbcaf93345af9b9239f966033d645742dde9
|
[] |
no_license
|
shiki7/Atcoder
|
979a6f0eeb65f3704ea20a949940a0d5e3434579
|
c215c02d3bfe1e9d68846095b1bd706bd4557dd0
|
refs/heads/master
| 2022-05-21T16:59:01.529489
| 2022-04-29T11:26:42
| 2022-04-29T11:26:42
| 201,536,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
N, M, T = map(int, input().split())
AB = list(list(map(int, input().split())) for _ in range(M))
total = N
for i in range(M):
a = AB[i][0]
b = AB[i][1]
if i == 0:
total -= AB[i][0]
else:
total -= AB[i][0] - AB[i-1][1]
if total <= 0:
print('No')
exit()
total += AB[i][1] - AB[i][0]
total = min(total, N)
total -= T - AB[-1][1]
if total <= 0:
print('No')
exit()
print('Yes')
|
[
"noreply@github.com"
] |
shiki7.noreply@github.com
|
f96a638a4cb5da10a1c6fd376a557e05cb7893a0
|
dadbed9984cdc9d9a1e84a3c9929ac4c9a58c370
|
/src/datasets/adult.py
|
90a374fcd2c2e68a40c05c2377bb8c4ebffc1ab4
|
[
"MIT"
] |
permissive
|
GRSEB9S/deepSVDD
|
74efac2d3c997aff07c85d30587883ef55fd1030
|
caf44c93914414ca26525fec69b780e920b9d061
|
refs/heads/master
| 2021-07-08T21:25:50.738316
| 2017-10-06T08:30:49
| 2017-10-06T08:30:49
| 109,243,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
from datasets.base import DataLoader
from config import Configuration as Cfg
import numpy as np
import pandas as pd
class Adult_DataLoader(DataLoader):
def __init__(self):
DataLoader.__init__(self)
self.dataset_name = "adult"
self.n_train = 32561
self.n_val = 16281
self.n_test = 16281
self.seed = Cfg.seed
if Cfg.ad_experiment:
self.n_classes = 2
else:
self.n_classes = 10
Cfg.n_batches = int(np.ceil(self.n_train * 1. / Cfg.batch_size))
self.data_path = "../data/adult.data"
self.on_memory = True
Cfg.store_on_gpu = True
# load data from disk
self.load_data()
def load_data(self):
print("Loading data...")
names = ["age", "workclass", "fnlwgt", "education", "education-num",
"marital-status", "occupation", "relationship", "race", "sex",
"capital-gain", "capital-loss", "hours-per-week",
"native-country", "label"]
# load data
df = pd.read_csv(self.data_path, sep=',\s', header=None, names=names,
na_values=['?'], engine='python')
# remove NAs
df = df.dropna()
# convert categorical variables
# one-hot encode categorical features
# extract X and y
y = df.iloc[:, 0:-1]
data_test = np.genfromtxt(self.data_path + 'adult.test')
|
[
"lukas.ruff@gmail.com"
] |
lukas.ruff@gmail.com
|
f88e046174b76a32b75f5c6a78c8e3b5ce4518a2
|
d381b1834c6ff5102f66f2710d1738500e108519
|
/字节跳动/t1.py
|
b0dedefbfd8eb31d951738b9dbfe16b68352618c
|
[] |
no_license
|
hshrimp/test_school
|
d930c8eaafba450cc57e4b03d1240c3661a54878
|
dde5033c944af294abf5876ceb1da7f6f3b3c0e4
|
refs/heads/master
| 2020-03-28T13:31:25.210726
| 2018-10-08T10:30:23
| 2018-10-08T10:30:23
| 148,402,081
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : wushaohong
# abc3
def find(string):
max_len = 0
for i in range(len(string)):
temp = [string[i]]
for j in range(i + 1, len(string)):
if string[j] in temp:
if len(temp) > max_len:
max_len = len(temp)
break
else:
temp.append(string[j])
if j == len(string) - 1:
if len(temp) > max_len:
max_len = len(temp)
print(max_len)
if __name__ == '__main__':
s = input()
find(s)
|
[
"850566163@qq.com"
] |
850566163@qq.com
|
c76ada545351b735242bd62fc9916b53aad7ebfe
|
e882573ccbfe73a325f38a0ae4550259e9591332
|
/mrcnn/config.py
|
18b36c1e1ddb39bd5d3aa6f8db8113ec8faf284c
|
[
"MIT"
] |
permissive
|
liruiqi0515/mask-rcnn-point
|
f4f4a3ef513c133c068c7506bd78dc7f8965527e
|
8a4da7067b7eb0c638199a7af988a7573a9fffbc
|
refs/heads/main
| 2023-07-15T23:44:06.591872
| 2021-08-31T05:11:50
| 2021-08-31T05:11:50
| 401,557,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,393
|
py
|
"""
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet50"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 128
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 6000
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 50
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,
self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,
self.IMAGE_CHANNEL_COUNT])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
|
[
"liruiqi.0515@byteance.com"
] |
liruiqi.0515@byteance.com
|
520f3cc8fe34f78313a662372111448912232de3
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2221/60738/311747.py
|
a5dbf23c47d0228b74c67eef7c30cc21c88d4a22
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
n=input()
if n=="300 699":
print(3)
elif n=="3 3":
print(1)
elif n=="4 4":
print(0)
elif n=="3 2":
print(0)
elif n=="20 19":
print(1)
elif n=="12 17":
print(2)
else:
print(n)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
2049cf24b24885439770e7d337d061794c73e4fc
|
e44ca0dbee6e598230b634ffc1667c890986ff98
|
/scripts/dir_crawler.py
|
58997ad45b1dc2ad158f0d13a7a21dcf8730d18f
|
[
"Apache-2.0"
] |
permissive
|
ibest/APER
|
393e5d3c8f9bdae6a704ab084188abfa5d25724e
|
ac39ae2506672b256d6215e3331fe97f5ed59ad5
|
refs/heads/master
| 2021-01-01T05:42:20.505854
| 2015-02-25T19:00:27
| 2015-02-25T19:00:27
| 22,858,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
#!/usr/bin/python
bowtie_build = "/mnt/home/grcuser/GRC_projects/GRC_miseq-quality_UI/Phi-X"
fasta_file = "/mnt/home/grcuser/GRC_projects/GRC_miseq-quality_UI/Phi-X.fa"
def directory_crawler(pathname):
import sys
import os
lst_R1_R2= []
i = 0
if pathname[-1] != '/':
pathname += '/'
if os.path.isdir(pathname):
for flds in os.listdir(pathname):
if os.path.isdir(pathname + flds):
run_dir = pathname + flds + '/'
lst_R1_R2 = Parse_Files_To_Reads(run_dir)
if lst_R1_R2[0] != '' and lst_R1_R2[1] != '':
Command_Call(lst_R1_R2, "/mnt/home/stre3949/Bowtie2_Global/" + flds)
#sys.exit(0)
def Parse_Files_To_Reads_BWA(run_dir):
import os
R1_string = ""
R2_string = ""
bwa_string = ""
for dir_path, dir_name, file_names in os.walk(run_dir):
for file in file_names:
if "_R1_" in file and "fastq" in file:
if os.path.isfile(dir_path + '/' + file.replace("_R1_", "_R4_")):
R1_string = (dir_path + '/' + file)
R2_string = (dir_path + '/' + file.replace("_R1_", "_R4_"))
elif os.path.isfile(dir_path + '/' + file.replace("_R1_", "_R2_")):
R1_string = (dir_path + '/' + file)
R2_string = (dir_path + '/' + file.replace("_R1_", "_R2_"))
bwa_string += "$(bwa mem /mnt/home/grcuser/miseq_quality/Phi-X.fa " + R1_string + " " + R2_string + ") "
return [bwa_string, "nothing"]
def Parse_Files_To_Reads(run_dir):
import os
R1_string = ""
R2_string = ""
for dir_path, dir_name, file_names in os.walk(run_dir):
for file in file_names:
if "_R1_" in file and "fastq" in file:
if os.path.isfile(dir_path + '/' + file.replace("_R1_", "_R4_")):
R1_string += (dir_path + '/' + file + ",")
R2_string += (dir_path + '/' + file.replace("_R1_", "_R4_") + ",")
elif os.path.isfile(dir_path + '/' + file.replace("_R1_", "_R2_")):
R1_string += (dir_path + '/' + file + ",")
R2_string += (dir_path + '/' + file.replace("_R1_", "_R2_") + ",")
return [R1_string[:-1], R2_string[:-1]]
def Command_Call(lst_R1_R2_reads, name):
import os
command_string = "bowtie2 -I 0 -X 1500 -p 38 -x " + bowtie_build + " -1 " + lst_R1_R2_reads[0] + " -2 " + lst_R1_R2_reads[1] + " 2> bowtielog.out | samtools calmd -S -u - " + fasta_file + " | samtools view - | tee >(~/APER/source/qual_metric -r -o " + name + ".b2_global_r_ate) >(~/Quality_Scores/qual_metric -o " + name + ".qual_all) | ~/APER/source/qual_metric -o " + name + ".b2_global_ate"
command_string = "bowtie2 --local -I 0 -X 1500 -p 38 -x " + bowtie_build + " -1 " + lst_R1_R2_reads[0] + " -2 " + lst_R1_R2_reads[1] + " 2> bowtielog.out | samtools calmd -S -u - " + fasta_file + " | samtools view - | tee >(~/APER/source/qual_metric -r -o " + name + ".b2_local_r_ate) | ~/APER/source/qual_metric -o " + name + ".b2_local_ate"
print command_string
#os.system(command_string)
def Command_Call_BWA(lst_R1_R2_reads, name):
import os
command_string = "echo " + lst_R1_R2_reads[0] + " | samtools calmd -S -u - /mnt/home/grcuser/miseq_quality/Phi-X.fa | samtools view - | /mnt/home/stre3949/SAM_Metric/Quality_Metric/qual_metric -o " + name + ".ate"
print command_string
#os.system(command_string)
def main():
import sys
import os
for directories in range(1, len(sys.argv)):
if os.path.isdir(sys.argv[directories]):
directory_crawler(sys.argv[directories])
main()
|
[
"stre3949@petunia.ibest.uidaho.edu"
] |
stre3949@petunia.ibest.uidaho.edu
|
d4dc078e7a322abe14646c024db13482d5c1dec0
|
ab025e9fe3e7af6ebb34df6ac8ed4bf9d5222c24
|
/utils/data_generator.py
|
a88b0051dc007d0226de0b1bc38d4d44e3073c32
|
[
"MIT"
] |
permissive
|
PHOENIX26012000/dcase2018_task4
|
7f6b1e536f1cc3379f235c6251ac62c070ec9cbf
|
d165ad27b9e1990256d5b2e73d4fb74826978301
|
refs/heads/master
| 2022-04-25T01:39:47.137051
| 2018-08-26T21:59:46
| 2018-08-26T21:59:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,131
|
py
|
import os
import sys
import numpy as np
import h5py
import time
import logging
from utilities import calculate_scalar, scale
import config
class DataGenerator(object):
def __init__(self, train_hdf5_path, validate_hdf5_path, batch_size,
validate, seed=1234):
"""Data generator.
Args:
train_hdf5_path: str, path of train hdf5 file
validate_hdf5_path: str, path of validate hdf5 path
batch_size: int
validate: bool
seed: int
"""
self.random_state = np.random.RandomState(seed)
self.validate_random_state = np.random.RandomState(0)
lb_to_ix = config.lb_to_ix
self.batch_size = batch_size
self.validate = validate
# Load data
load_time = time.time()
hf = h5py.File(train_hdf5_path, 'r')
self.train_audio_names = np.array([s.decode() for s in hf['audio_name'][:]])
self.train_x = hf['feature'][:]
self.train_y = hf['target'][:]
hf.close()
hf = h5py.File(validate_hdf5_path, 'r')
self.validate_audio_names = np.array([s.decode() for s in hf['audio_name']])
self.validate_x = hf['feature'][:]
self.validate_y = hf['target'][:]
hf.close()
logging.info('Loading data time: {:.3f} s'
''.format(time.time() - load_time))
# Get train & validate audio indexes
self.audio_names = np.concatenate(
(self.train_audio_names, self.validate_audio_names), axis=0)
self.x = np.concatenate((self.train_x, self.validate_x), axis=0)
self.y = np.concatenate((self.train_y, self.validate_y), axis=0)
if validate:
self.train_audio_indexes = np.arange(len(self.train_audio_names))
self.validate_audio_indexes = np.arange(
len(self.train_audio_names),
len(self.train_audio_names) + len(self.validate_audio_names))
else:
self.train_audio_indexes = np.arange(len(self.audio_names))
self.validate_audio_indexes = np.array([])
logging.info("Training audios: {}".format(
len(self.train_audio_indexes)))
logging.info("Validation audios: {}".format(
len(self.validate_audio_indexes)))
# Calculate scalar
(self.mean, self.std) = calculate_scalar(
self.x[self.train_audio_indexes])
def generate_train(self):
"""Generate mini-batch data for training.
"""
batch_size = self.batch_size
indexes = np.array(self.train_audio_indexes)
samples = len(indexes)
self.random_state.shuffle(indexes)
iteration = 0
pointer = 0
while True:
# Reset pointer
if pointer >= samples:
pointer = 0
self.random_state.shuffle(indexes)
# Get batch indexes
batch_indexes = indexes[pointer : pointer + batch_size]
pointer += batch_size
iteration += 1
batch_x = self.x[batch_indexes]
batch_y = self.y[batch_indexes]
# Transform data
batch_x = self.transform(batch_x)
batch_y = batch_y.astype(np.float32)
yield batch_x, batch_y
def generate_validate(self, data_type, shuffle=False, max_iteration=None):
"""Generate mini-batch data for validation.
Args:
data_type: 'train' | 'validate'
shuffle: bool
max_iteration: int, maximum iteration for speed up validation
"""
batch_size = self.batch_size
if data_type == 'train':
indexes = np.array(self.train_audio_indexes)
elif data_type == 'validate':
indexes = np.array(self.validate_audio_indexes)
else:
raise Exception("Invalid data_type!")
audios_num = len(indexes)
if shuffle:
self.validate_random_state.shuffle(indexes)
iteration = 0
pointer = 0
while True:
if iteration == max_iteration:
break
if pointer >= audios_num:
break
# Get batch indexes
batch_indexes = indexes[pointer : pointer + batch_size]
pointer += batch_size
iteration += 1
batch_x = self.x[batch_indexes]
batch_y = self.y[batch_indexes]
batch_audio_names = self.audio_names[batch_indexes]
# Transform data
batch_x = self.transform(batch_x)
batch_y = batch_y.astype(np.float32)
yield batch_x, batch_y, batch_audio_names
def transform(self, x):
"""Transform data.
Args:
x: (batch_x, seq_len, freq_bins) | (seq_len, freq_bins)
Returns:
Transformed data.
"""
return scale(x, self.mean, self.std)
class TestDataGenerator(DataGenerator):
def __init__(self, train_hdf5_path, validate_hdf5_path, eval_hdf5_path,
batch_size):
"""Test data generator.
Args:
train_hdf5_path: str, path of training hdf5 file
validate_hdf5_path, str, path of validation hdf5
eval_hdf5_path: str, path of evaluation hdf5 file
batch_size: int
"""
super(TestDataGenerator, self).__init__(
train_hdf5_path=train_hdf5_path,
validate_hdf5_path=validate_hdf5_path,
batch_size=batch_size,
validate=False)
# Load data
load_time = time.time()
hf = h5py.File(eval_hdf5_path, 'r')
self.eval_audio_names = np.array(
[name.decode() for name in hf['audio_name'][:]])
self.eval_x = hf['feature'][:]
logging.info("Load data time: {}".format(time.time() - load_time))
def generate_eval(self):
audios_num = len(self.eval_audio_names)
audio_indexes = np.arange(audios_num)
batch_size = self.batch_size
pointer = 0
while True:
# Reset pointer
if pointer >= audios_num:
break
# Get batch indexes
batch_audio_indexes = audio_indexes[pointer: pointer + batch_size]
pointer += batch_size
batch_x = self.eval_x[batch_audio_indexes]
batch_audio_names = self.eval_audio_names[batch_audio_indexes]
# Transform data
batch_x = self.transform(batch_x)
yield batch_x, batch_audio_names
|
[
"qiuqiangkong@gmail.com"
] |
qiuqiangkong@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.