repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
extsui/7SegFinger
|
test_8digit_and_latch.py
|
1
|
4734
|
# -*- coding: utf-8 -*-
import spidev
import math
def reverse_bit_order(x):
x_reversed = 0x00
if (x & 0x80):
x_reversed |= 0x01
if (x & 0x40):
x_reversed |= 0x02
if (x & 0x20):
x_reversed |= 0x04
if (x & 0x10):
x_reversed |= 0x08
if (x & 0x08):
x_reversed |= 0x10
if (x & 0x04):
x_reversed |= 0x20
if (x & 0x02):
x_reversed |= 0x40
if (x & 0x01):
x_reversed |= 0x80
return x_reversed
"""
x = (((x & 0x55) << 1) | ((x & 0xAA) >> 1)) % 0xFF
x = (((x & 0x33) << 2) | ((x & 0xCC) >> 2)) % 0xFF
return ((x << 4) | (x >> 4)) % 0xFF
"""
def calc_checksum(array):
sum = 0
for item in array:
sum += item
sum %= 256
return (0xFF - sum) % 256
spi = spidev.SpiDev()
spi.open(0, 0)
#
# CS+ データ送受信タイミング設定 タイプ1
#
# SCK:  ̄ ̄|_| ̄|_| ̄|_| ̄...
# SOp: 末尾ビット→ <D7> <D6> <D5> ... → 先頭ビット
#
spi.mode = 0
#
# SPIのクロック周波数
# ・500kHz: デフォルト
# ・1MHz: OK
# ・2MHz: NG(データ化け発生)
# ⇒1MHzを設定。
#
spi.max_speed_hz = 1000000
#
#
#
data = [ 0x60, 0xDA, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01, ]
brightness = [ 100, 100, 100, 100, 100, 100, 100, 100, ]
# 1フレーム作成
xfer_data = [ 0x01 ] # タイプ=表示データ
xfer_data.extend(brightness) # 表示データ部
checksum = calc_checksum(xfer_data)
xfer_data.append(checksum)
print xfer_data
#
# RaspberryPiはMSBFirstでしかデータを送信できない。
# (spi.lsbfirstメンバがあるが、Read-Only)
# ⇒送信前にビットを逆転する必要がある。
# [参考URL] http://tightdev.net/SpiDev_Doc.pdf
#
xfer_data = map(reverse_bit_order, xfer_data)
print xfer_data
# フレーム送信
spi.writebytes(xfer_data)
import os
os.system('sleep 1')
num_to_pattern = [
0xfc, # 0
0x60, # 1
0xda, # 2
0xf2, # 3
0x66, # 4
0xb6, # 5
0xbe, # 6
0xe4, # 7
0xfe, # 8
0xf6, # 9
]
# LATCHピンの指定
# GPIO21(40番ピン)を使用する。
import RPi.GPIO as GPIO
# ピン番号ではなく、機能名(例:GPIO21)で指定できるようにする。
GPIO.setmode(GPIO.BCM)
# 出力設定
GPIO.setup(21, GPIO.OUT)
rad = 0.0
while (True):
xfer_data = [ 0x01 ]
brightness[0] = int(math.sin(rad + math.pi/8*0) * 50 + 50)
brightness[1] = int(math.sin(rad + math.pi/8*1) * 50 + 50)
brightness[2] = int(math.sin(rad + math.pi/8*2) * 50 + 50)
brightness[3] = int(math.sin(rad + math.pi/8*3) * 50 + 50)
brightness[4] = int(math.sin(rad + math.pi/8*4) * 50 + 50)
brightness[5] = int(math.sin(rad + math.pi/8*5) * 50 + 50)
brightness[6] = int(math.sin(rad + math.pi/8*6) * 50 + 50)
brightness[7] = int(math.sin(rad + math.pi/8*7) * 50 + 50)
xfer_data.extend(brightness)
checksum = calc_checksum(xfer_data)
xfer_data.append(checksum)
xfer_data = map(reverse_bit_order, xfer_data)
spi.writebytes(xfer_data)
rad += 0.05 * 2
import datetime as dt
now = dt.datetime.now()
xfer_data = [ 0x00 ]
# data[0] = num_to_pattern[now.year / 1000 % 10]
# data[1] = num_to_pattern[now.year / 100 % 10]
# data[2] = num_to_pattern[now.year / 10 % 10]
# data[3] = num_to_pattern[now.year / 1 % 10]
# data[4] = num_to_pattern[now.month / 10 % 10]
# data[5] = num_to_pattern[now.month / 1 % 10]
# data[6] = num_to_pattern[now.day / 10 % 10]
# data[7] = num_to_pattern[now.day / 1 % 10]
data[0] = num_to_pattern[now.hour / 10 % 10]
data[1] = num_to_pattern[now.hour / 1 % 10]
if (now.microsecond < 500*1000):
data[1] |= 0x01;
data[2] = num_to_pattern[now.minute / 10 % 10]
data[3] = num_to_pattern[now.minute / 1 % 10]
if (now.microsecond < 500*1000):
data[3] |= 0x01;
data[4] = num_to_pattern[now.second / 10 % 10]
data[5] = num_to_pattern[now.second / 1 % 10]
if (now.microsecond < 500*1000):
data[5] |= 0x01;
data[6] = num_to_pattern[now.microsecond / 100000 % 10]
data[7] = num_to_pattern[now.microsecond / 10000 % 10]
# data[4] = num_to_pattern[now.microsecond / 1000 % 10]
# data[5] = num_to_pattern[now.microsecond / 100 % 10]
# data[6] = num_to_pattern[now.microsecond / 10 % 10]
# data[7] = num_to_pattern[now.microsecond / 1 % 10]
xfer_data.extend(data)
checksum = calc_checksum(xfer_data)
xfer_data.append(checksum)
xfer_data = map(reverse_bit_order, xfer_data)
spi.writebytes(xfer_data)
# 表示更新(LATCH↑↓)
GPIO.output(21, GPIO.HIGH)
GPIO.output(21, GPIO.LOW)
# あまりにも早く更新してもいかんので適度に遅延させる。
os.system('sleep 0.001')
|
mit
| -5,917,152,520,773,421,000
| 21.32973
| 60
| 0.575996
| false
| 1.960927
| false
| false
| false
|
examachine/bitdrill
|
scripts/merge-itemsets.py
|
1
|
2635
|
#! /usr/bin/python
import sys
import string
import os
import copy
import transaction
import pattern
from transaction import decode_items, encode_items
if len(sys.argv)!=5:
print 'usage: prog <db> <freq itemset1> <freq itemset2> <support>'
sys.exit(-1)
else:
dbname = sys.argv[1]
freq1name = sys.argv[2]
freq2name = sys.argv[3]
support = int(sys.argv[4])
db = transaction.read_db(dbname)
freq1 = pattern.read_freq_patterns(freq1name)
freq2 = pattern.read_freq_patterns(freq2name)
# 1st step F1,1 X F2,1
if (not 1 in freq1) or (not 1 in freq2):
print "there are no frequent itemsets to merge"
sys.exit(0)
k = 2 # starting level
freq_previous = set() # used for support
out = file('%s.out.merge.%d' % (dbname, support), 'w')
while 1:
print 'merging frequent itemsets in level %d \n' % k
pruned = 0
candidates = {}
if k == 2:
for xs in freq1[1].iterkeys():
x = decode_items(xs)
for ys in freq2[1].iterkeys():
y = decode_items(ys)
z = x + y
candidates[encode_items(x+y)] = 0 # initial count
else:
for xlen in range(1, k):
ylen = k - xlen
if not xlen in freq1:
continue
for xs in freq1[xlen].iterkeys():
x = decode_items(xs)
if not ylen in freq2:
continue
for ys in freq2[ylen].iterkeys():
y = decode_items(ys)
z = x + y # candidate
prune = False
if xlen > 1:
for item in x:
zprev = copy.copy(z)
zprev.remove(item)
if not encode_items(zprev) in freq_previous:
pruned +=1
prune = True # not supported by subsets
break
if not prune and ylen > 1:
for item in y:
zprev = copy.copy(z)
zprev.remove(item)
if not encode_items(zprev) in freq_previous:
pruned +=1
prune = True # not supported by subsets
break
if not prune:
candidates[encode_items(x+y)] = 0 # initial count
print 'pruned %d candidates, remaining %d candidates \n' % (pruned, len(candidates))
tid = 0
for txn in db.transactions:
t = set(txn)
if tid % 100 == 0:
sys.stdout.write('.')
sys.stdout.flush()
for zs in candidates.keys():
z = decode_items(zs)
if set(z).issubset(t):
candidates[zs] = candidates[zs] + 1
tid += 1
print
freq_previous = set()
for (zs, count) in candidates.iteritems():
print 'candidate', zs
if count >= support:
out.write('%s (%d)\n' % (zs, count))
freq_previous.add(zs)
if len(freq_previous)==0:
print 'end run'
break
print 'number of frequent itemsets is %d\n' % len(freq_previous)
k += 1
out.close()
|
agpl-3.0
| -6,056,729,359,778,043,000
| 23.174312
| 88
| 0.604554
| false
| 3.092723
| false
| false
| false
|
cheery/textended-edit
|
compositor/__init__.py
|
1
|
3711
|
# Used to display minitex boxes on screen.
from minitex import boxmodel
import renderers
class Compositor(object):
def __init__(self, images, debug=False):
self.debug = debug
self.images = images
self.imglayer = renderers.ImageLayer(images)
self.fontlayers = {}
def get_fontlayer(self, font):
if font not in self.fontlayers:
self.fontlayers[font] = renderers.FontLayer(self.images, font)
return self.fontlayers[font]
def close(self):
for fontlayer in self.fontlayers.values():
fontlayer.close()
self.imglayer.close()
def clear(self):
for fontlayer in self.fontlayers.values():
fontlayer.clear()
self.imglayer.clear()
def decor(self, quad, source, color):
if source is None and color is None:
return
if color is None:
color = 1, 1, 1, 1
if isinstance(source, boxmodel.Patch9):
self.imglayer.patch9(quad, self.imglayer.patch9_texcoords(source.source), color)
else:
self.imglayer.quad(quad, self.imglayer.texcoords(source), color)
def compose(self, subj, x, y):
subj.quad = x, y-subj.height, x+subj.width, y+subj.depth
if self.debug:
# Should resolve the path correctly here.
#self.imglayer.patch9(subj.quad, self.imglayer.patch9_texcoords("assets/border-1px.png"), (1.0, 1.0, 1.0, 0.1))
# If the layout needs to be debugged later on, I consider rewriting this.
if subj.subj is not None:
self.imglayer.quad((x-1, y-1, x+1, y+1), self.imglayer.texcoords(None), (1.0, 0.0, 0.0, 1.0))
if isinstance(subj, boxmodel.HBox):
for node in subj.contents:
if isinstance(node, boxmodel.Glue):
node.quad = x+node.offset, subj.quad[1], x+node.offset+node.computed, subj.quad[3]
if self.debug:
self.imglayer.quad(node.quad, self.imglayer.texcoords(None), (0.0, 1.0, 0.0, 0.2))
else:
self.compose(node, x+node.offset, y+node.shift)
elif isinstance(subj, boxmodel.VBox):
y = y - subj.height
for node in subj.contents:
if isinstance(node, boxmodel.Glue):
node.quad = subj.quad[0], y+node.offset, subj.quad[2], y+node.offset+node.computed
if self.debug:
self.imglayer.quad(node.quad, self.imglayer.texcoords(None), (1.0, 1.0, 0.0, 0.2))
else:
self.compose(node, x + node.shift, y + node.offset)
elif isinstance(subj, boxmodel.Padding):
left, top, right, bottom = subj.padding
if subj.background is not None or subj.color is not None:
self.decor(subj.quad, subj.background, subj.color)
for node in subj.contents:
if isinstance(node, (boxmodel.HBox, boxmodel.VBox)):
self.compose(node, x + node.offset, y + node.shift)
else:
assert False
elif isinstance(subj, boxmodel.ImageBox):
self.decor(subj.quad, subj.source, subj.color)
elif isinstance(subj, boxmodel.LetterBox):
x0, y0, x1, y1 = subj.quad
p0, p1, p2, p3 = subj.padding
fontlayer = self.get_fontlayer(subj.font)
fontlayer.quad((x0-p0, y0-p1, x1+p2, y1+p3), subj.texcoords, subj.color)
def render(self, x, y, width, height):
self.imglayer.render(x, y, width, height)
for fontlayer in self.fontlayers.values():
fontlayer.render(x, y, width, height)
|
mit
| -7,913,254,701,952,263,000
| 44.256098
| 123
| 0.575856
| false
| 3.445682
| false
| false
| false
|
adamewing/tebreak
|
scripts/picklereads.py
|
1
|
2261
|
#!/usr/bin/env python
import os
import pickle
import argparse
import logging
from uuid import uuid4
from collections import defaultdict as dd
logger = logging.getLogger(__name__)
def output_fastq(ins, pickle, uuid):
out_sr_fn = '.'.join(pickle.strip().split('.')[:-1]) + '.' + uuid + '.SR.fastq'
out_dr_fn = '.'.join(pickle.strip().split('.')[:-1]) + '.' + uuid + '.DR.fastq'
sr_count = 0
dr_count = 0
out_sr = open(out_sr_fn, 'w')
out_dr = open(out_dr_fn, 'w')
for read in ins['READSTORE']:
if read.find('.SR/') > 0:
out_sr.write(read)
sr_count += 1
if read.find('.DR/') > 0:
out_dr.write(read)
dr_count += 1
out_sr.close()
out_dr.close()
return out_sr_fn, out_dr_fn, sr_count, dr_count
def main(args):
logger.debug('loading pickle: %s' % args.pickle)
with open(args.pickle, 'rb') as pickin:
insertions = pickle.load(pickin)
logger.debug('finished loading %s' % args.pickle)
logger.debug('raw candidate count: %d' % len(insertions))
uuids = {}
with open(args.uuids) as _:
for line in _:
if not line.startswith('UUID') and not line.startswith ('#'):
uuids[line.strip().split()[0]] = True
for ins in insertions:
if ins['INFO']['ins_uuid'] in uuids:
if len(ins['READSTORE']) == 0:
logger.warning('no reads for insertion: %s' % ins['INFO']['ins_uuid'])
continue
sr_fq, dr_fq, sr_count, dr_count = output_fastq(ins, args.pickle, ins['INFO']['ins_uuid'])
logger.info('wrote %d split reads to %s' % (sr_count, sr_fq))
logger.info('wrote %d discordant reads to %s' % (dr_count, dr_fq))
if __name__ == '__main__':
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(description='output reads supporting insertions')
parser.add_argument('-p', '--pickle', required=True, help='input filename (tebreak.py pickle)')
parser.add_argument('-u', '--uuids', required=True, help='list of UUIDS in a .txt file - can use a tebreak table')
args = parser.parse_args()
main(args)
|
mit
| 6,442,492,667,076,628,000
| 29.146667
| 118
| 0.580716
| false
| 3.43617
| false
| false
| false
|
webcomics/dosage
|
dosagelib/plugins/rhjunior.py
|
1
|
1471
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import _ParserScraper
class RHJunior(_ParserScraper):
stripUrl = 'https://www.rhjunior.com/%s/'
imageSearch = '//div[contains(@class, "entry-content")]//img'
multipleImagesPerStrip = True
def __init__(self, name, sub, prev, first, last=None):
super().__init__('RHJunior/' + name)
self.prevSearch = ('//a[@rel="prev"]', '//a[@title="' + prev + '"]')
self.url = self.stripUrl % ('comics/' + sub)
self.firstStripUrl = self.stripUrl % (sub + '-' + first)
if last:
self.url = self.stripUrl % (sub + '-' + last)
self.endOfLife = True
@classmethod
def getmodules(cls):
return (
cls('GoblinHollow', 'goblin-hollow',
'', '0001', last='7'),
cls('NipAndTuck', 'nip-and-tuck',
'Nip and Tuck', '0000'),
cls('QuentynQuinnSpaceRanger', 'quentyn-quinn-space-ranger',
'Quentyn Quinn, Space Ranger', '0001'),
cls('TalesOfTheQuestor', 'tales-of-the-questor',
'Tales of the Questor', 'cover'),
cls('TheJournalOfEnniasLongscript', 'the-journal-of-ennias-longscript',
'', '0001', last='0111'),
cls('TheProbabilityBomb', 'the-probability-bomb',
'the Probability Bomb', 'kickstarter'),
)
|
mit
| -8,525,481,723,409,454,000
| 38.756757
| 83
| 0.558804
| false
| 3.283482
| false
| false
| false
|
zingale/hydro_examples
|
advection/weno.py
|
1
|
15253
|
import numpy
from matplotlib import pyplot
import advection
import weno_coefficients
from scipy.integrate import ode
def weno(order, q):
"""
Do WENO reconstruction
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = numpy.zeros_like(q)
beta = numpy.zeros((order, len(q)))
w = numpy.zeros_like(beta)
np = len(q) - 2 * order
epsilon = 1e-16
for i in range(order, np+order):
q_stencils = numpy.zeros(order)
alpha = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k, i] += sigma[k, l, m] * q[i+k-l] * q[i+k-m]
alpha[k] = C[k] / (epsilon + beta[k, i]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[i+k-l]
w[:, i] = alpha / numpy.sum(alpha)
qL[i] = numpy.dot(w[:, i], q_stencils)
return qL
def weno_M(order, q):
"""
Do WENOM reconstruction following Gerolymos equation (18)
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = numpy.zeros_like(q)
beta = numpy.zeros((order, len(q)))
w = numpy.zeros_like(beta)
np = len(q) - 2 * order
epsilon = 1e-16
for i in range(order, np+order):
q_stencils = numpy.zeros(order)
alpha_JS = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k, i] += sigma[k, l, m] * q[i+k-l] * q[i+k-m]
alpha_JS[k] = C[k] / (epsilon + beta[k, i]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[i+k-l]
w_JS = alpha_JS / numpy.sum(alpha_JS)
alpha = w_JS * (C + C**2 - 3 * C * w_JS + w_JS**2) / \
(C**2 + w_JS * (1 - 2 * C))
w[:, i] = alpha / numpy.sum(alpha)
qL[i] = numpy.dot(w[:, i], q_stencils)
return qL
class WENOSimulation(advection.Simulation):
def __init__(self, grid, u, C=0.8, weno_order=3):
self.grid = grid
self.t = 0.0 # simulation time
self.u = u # the constant advective velocity
self.C = C # CFL number
self.weno_order = weno_order
def init_cond(self, type="tophat"):
""" initialize the data """
if type == "sine_sine":
self.grid.a[:] = numpy.sin(numpy.pi*self.grid.x -
numpy.sin(numpy.pi*self.grid.x) / numpy.pi)
else:
super().init_cond(type)
def rk_substep(self):
g = self.grid
g.fill_BCs()
f = self.u * g.a
alpha = abs(self.u)
fp = (f + alpha * g.a) / 2
fm = (f - alpha * g.a) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno(self.weno_order, fp[:-1])
fml[-1::-1] = weno(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
def evolve(self, num_periods=1):
""" evolve the linear advection equation using RK4 """
self.t = 0.0
g = self.grid
tmax = num_periods*self.period()
# main evolution loop
while self.t < tmax:
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep()
if self.t + dt > tmax:
dt = tmax - self.t
# RK4
# Store the data at the start of the step
a_start = g.a.copy()
k1 = dt * self.rk_substep()
g.a = a_start + k1 / 2
k2 = dt * self.rk_substep()
g.a = a_start + k2 / 2
k3 = dt * self.rk_substep()
g.a = a_start + k3
k4 = dt * self.rk_substep()
g.a = a_start + (k1 + 2 * (k2 + k3) + k4) / 6
self.t += dt
def evolve_scipy(self, num_periods=1):
""" evolve the linear advection equation using RK4 """
self.t = 0.0
g = self.grid
def rk_substep_scipy(t, y):
# Periodic BCs
y[:g.ng] = y[-2*g.ng:-g.ng]
y[-g.ng:] = y[g.ng:2*g.ng]
f = self.u * y
alpha = abs(self.u)
fp = (f + alpha * y) / 2
fm = (f - alpha * y) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno(self.weno_order, fp[:-1])
fml[-1::-1] = weno(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
tmax = num_periods*self.period()
r = ode(rk_substep_scipy).set_integrator('dop853')
r.set_initial_value(g.a, 0)
dt = self.timestep()
# main evolution loop
while r.successful() and r.t < tmax:
dt = min(dt, tmax - r.t)
r.integrate(r.t+dt)
g.a[:] = r.y
class WENOMSimulation(WENOSimulation):
def rk_substep(self):
g = self.grid
g.fill_BCs()
f = self.u * g.a
alpha = abs(self.u)
fp = (f + alpha * g.a) / 2
fm = (f - alpha * g.a) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno_M(self.weno_order, fp[:-1])
fml[-1::-1] = weno_M(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
def evolve_scipy(self, num_periods=1):
""" evolve the linear advection equation using scipy """
self.t = 0.0
g = self.grid
def rk_substep_scipy(t, y):
# Periodic BCs
y[:g.ng] = y[-2*g.ng:-g.ng]
y[-g.ng:] = y[g.ng:2*g.ng]
f = self.u * y
alpha = abs(self.u)
fp = (f + alpha * y) / 2
fm = (f - alpha * y) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno_M(self.weno_order, fp[:-1])
fml[-1::-1] = weno_M(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
tmax = num_periods*self.period()
r = ode(rk_substep_scipy).set_integrator('dop853')
r.set_initial_value(g.a, 0)
dt = self.timestep()
# main evolution loop
while r.successful() and r.t < tmax:
dt = min(dt, tmax - r.t)
r.integrate(r.t+dt)
g.a[:] = r.y
if __name__ == "__main__":
#-------------------------------------------------------------------------
# compute WENO3 case
xmin = 0.0
xmax = 1.0
nx = 64
order = 3
ng = order+1
g = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = WENOSimulation(g, u, C=0.5, weno_order=3)
s.init_cond("gaussian")
ainit = s.grid.a.copy()
s.evolve(num_periods=1)
pyplot.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1],
ls=":", label="exact")
pyplot.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
label="WENO3")
# #-------------------------------------------------------------------------
# # convergence test
# # Note that WENO schemes with standard weights lose convergence at
# # critical points. For high degree critical points they lose more orders.
# # The suggestion in Gerolymos is that you may expect to drop down to
# # order r-1 in the limit.
# # The Gaussian has all odd derivatives vanishing at the origin, so
# # the higher order schemes will lose accuracy.
# # For the Gaussian:
# # This shows clean 5th order convergence for r=3
# # But for r=4-6 the best you get is ~6th order, and 5th order is more
# # realistic
# # For sin(x - sin(x)) type data Gerolymos expects better results
# # But the problem actually appears to be the time integrator
# # Switching to Dormand-Price 8th order from scipy (a hack) will make it
# # work for all cases. With sin(.. sin) data you get 2r - 2 thanks to
# # the one critical point.
#
# problem = "sine_sine"
#
# xmin =-1.0
# xmax = 1.0
## orders = [4]
# orders = [3, 4, 5, 6]
## N1 = [2**4*3**i//2**i for i in range(5)]
## N2 = [2**5*3**i//2**i for i in range(6)]
## N3 = [3**4*4**i//3**i for i in range(5)]
## N4 = [2**(4+i) for i in range(4)]
## N = numpy.unique(numpy.array(N1+N2+N3+N4, dtype=numpy.int))
## N.sort()
## N = [32, 64, 128, 256, 512]
## N = [32, 64, 128]
# N = [24, 32, 54, 64, 81, 108, 128]
#
# errs = []
# errsM = []
#
# u = 1.0
#
# colors="bygrc"
#
# for order in orders:
# ng = order+1
# errs.append([])
# errsM.append([])
# for nx in N:
# print(order, nx)
# gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
# su = WENOSimulation(gu, u, C=0.5, weno_order=order)
## guM = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
## suM = WENOMSimulation(guM, u, C=0.5, weno_order=order)
#
# su.init_cond("sine_sine")
## suM.init_cond("sine_sine")
# ainit = su.grid.a.copy()
#
# su.evolve_scipy(num_periods=1)
## suM.evolve_scipy(num_periods=1)
#
# errs[-1].append(gu.norm(gu.a - ainit))
## errsM[-1].append(guM.norm(guM.a - ainit))
#
# pyplot.clf()
# N = numpy.array(N, dtype=numpy.float64)
# for n_order, order in enumerate(orders):
# pyplot.scatter(N, errs[n_order],
# color=colors[n_order],
# label=r"WENO, $r={}$".format(order))
## pyplot.scatter(N, errsM[n_order],
## color=colors[n_order],
## label=r"WENOM, $r={}$".format(order))
# pyplot.plot(N, errs[n_order][0]*(N[0]/N)**(2*order-2),
# linestyle="--", color=colors[n_order],
# label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(2*order-2))
## pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
## color="k", label=r"$\mathcal{O}(\Delta x^4)$")
#
# ax = pyplot.gca()
# ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
# ax.set_xscale('log')
# ax.set_yscale('log')
#
# pyplot.xlabel("N")
# pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
# fontsize=16)
#
# pyplot.legend(frameon=False)
# pyplot.savefig("weno-converge-sine-sine.pdf")
## pyplot.show()
#-------------- RK4
problem = "gaussian"
xmin = 0.0
xmax = 1.0
orders = [3, 5]
N = [54, 64, 81, 108, 128]
errs = []
u = 1.0
colors="brc"
for order in orders:
ng = order+1
errs.append([])
for nx in N:
print(order, nx)
gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
su = WENOSimulation(gu, u, C=0.5, weno_order=order)
su.init_cond("gaussian")
ainit = su.grid.a.copy()
su.evolve(num_periods=5)
errs[-1].append(gu.norm(gu.a - ainit))
pyplot.clf()
N = numpy.array(N, dtype=numpy.float64)
for n_order, order in enumerate(orders):
pyplot.scatter(N, errs[n_order],
color=colors[n_order],
label=r"WENO, $r={}$".format(order))
pyplot.plot(N, errs[0][-1]*(N[-1]/N)**(5),
linestyle="--", color=colors[0],
label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(5))
pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
color="k", label=r"$\mathcal{O}(\Delta x^4)$")
ax = pyplot.gca()
ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
ax.set_xscale('log')
ax.set_yscale('log')
pyplot.xlabel("N")
pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
fontsize=16)
pyplot.title("Convergence of Gaussian, RK4")
pyplot.legend(frameon=False)
pyplot.savefig("weno-converge-gaussian-rk4.pdf")
# pyplot.show()
#-------------- Gaussian
problem = "gaussian"
xmin = 0.0
xmax = 1.0
orders = [3, 4, 5, 6]
N = [24, 32, 54, 64, 81, 108, 128]
# N = [32, 64, 108, 128]
errs = []
errsM = []
u = 1.0
colors="bygrc"
for order in orders:
ng = order+1
errs.append([])
errsM.append([])
for nx in N:
print(order, nx)
gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
su = WENOSimulation(gu, u, C=0.5, weno_order=order)
# guM = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
# suM = WENOMSimulation(guM, u, C=0.5, weno_order=order)
su.init_cond("gaussian")
# suM.init_cond("gaussian")
ainit = su.grid.a.copy()
su.evolve_scipy(num_periods=1)
# suM.evolve_scipy(num_periods=1)
errs[-1].append(gu.norm(gu.a - ainit))
# errsM[-1].append(guM.norm(guM.a - ainit))
pyplot.clf()
N = numpy.array(N, dtype=numpy.float64)
for n_order, order in enumerate(orders):
pyplot.scatter(N, errs[n_order],
color=colors[n_order],
label=r"WENO, $r={}$".format(order))
# pyplot.scatter(N, errsM[n_order],
# color=colors[n_order],
# label=r"WENOM, $r={}$".format(order))
pyplot.plot(N, errs[n_order][0]*(N[0]/N)**(2*order-2),
linestyle="--", color=colors[n_order],
label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(2*order-2))
# pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
# color="k", label=r"$\mathcal{O}(\Delta x^4)$")
ax = pyplot.gca()
ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
ax.set_xscale('log')
ax.set_yscale('log')
pyplot.xlabel("N")
pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
fontsize=16)
pyplot.title("Convergence of Gaussian, DOPRK8")
pyplot.legend(frameon=False)
pyplot.savefig("weno-converge-gaussian.pdf")
# pyplot.show()
|
bsd-3-clause
| -1,032,854,415,870,718,300
| 29.386454
| 81
| 0.485872
| false
| 2.898708
| false
| false
| false
|
maikelwever/autoaurbuilder
|
autoaurbuilder/build/management/commands/schedule.py
|
1
|
1383
|
from django.core.management.base import BaseCommand
from package.models import Package
class Command(BaseCommand):
args = 'None'
help = 'Checks for updates on packages and schedules rebuilds if needed.'
def handle(self, *args, **kwargs):
self.stdout.write("Checking for updates on packages...")
pkg_count = Package.objects.count()
counter = 1
for package in Package.objects.all():
self.stdout.write("\r[{0: <2}/{1: <2}] Updating package {2: <30}".format(
counter, pkg_count, package.name
), ending="")
self.stdout.flush()
package.check_for_updates()
counter += 1
self.stdout.write("Done, now reschedueling builds.")
rebuild_scheduled = []
for package in Package.objects.all():
if package.needs_rebuild():
if package not in rebuild_scheduled:
for i in package.get_dependencies():
if i not in rebuild_scheduled:
rebuild_scheduled.append(i)
if i.needs_rebuild():
i.rebuild()
rebuild_scheduled.append(package)
package.rebuild()
self.stdout.write("Rescheduled: {0}".format(rebuild_scheduled))
self.stdout.write("DONE!")
|
gpl-3.0
| 53,555,113,558,810,240
| 35.394737
| 85
| 0.550253
| false
| 4.656566
| false
| false
| false
|
jromang/retina-old
|
distinclude/spyderlib/widgets/externalshell/baseshell.py
|
1
|
12772
|
# -*- coding: utf-8 -*-
#
# Copyright © 2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
import sys
import os
import os.path as osp
from time import time, strftime, gmtime
from spyderlib.qt.QtGui import (QApplication, QWidget, QVBoxLayout,
QHBoxLayout, QMenu, QLabel, QInputDialog,
QLineEdit, QToolButton)
from spyderlib.qt.QtCore import (QProcess, SIGNAL, QByteArray, QTimer, Qt,
QTextCodec)
locale_codec = QTextCodec.codecForLocale()
# Local imports
from spyderlib.utils.qthelpers import (create_toolbutton, create_action,
add_actions)
from spyderlib.baseconfig import get_conf_path, _
from spyderlib.config import get_icon
def add_pathlist_to_PYTHONPATH(env, pathlist):
# PyQt API 1/2 compatibility-related tests:
assert isinstance(env, list)
assert all([isinstance(path, basestring) for path in env])
pypath = "PYTHONPATH"
pathstr = os.pathsep.join(pathlist)
if os.environ.get(pypath) is not None:
for index, var in enumerate(env[:]):
if var.startswith(pypath+'='):
env[index] = var.replace(pypath+'=',
pypath+'='+pathstr+os.pathsep)
env.append('OLD_PYTHONPATH='+os.environ[pypath])
else:
env.append(pypath+'='+pathstr)
#TODO: code refactoring/cleaning (together with systemshell.py and pythonshell.py)
class ExternalShellBase(QWidget):
"""External Shell widget: execute Python script in a separate process"""
SHELL_CLASS = None
def __init__(self, parent=None, wdir=None, history_filename=None,
show_icontext=True, light_background=True, menu_actions=None,
show_buttons_inside=True, show_elapsed_time=True):
QWidget.__init__(self, parent)
self.menu_actions = menu_actions
self.run_button = None
self.kill_button = None
self.options_button = None
self.icontext_action = None
self.show_elapsed_time = show_elapsed_time
if wdir is None:
wdir = osp.dirname(osp.abspath(self.fname))
self.wdir = wdir if osp.isdir(wdir) else None
self.arguments = ""
self.shell = self.SHELL_CLASS(parent, get_conf_path(history_filename))
self.shell.set_light_background(light_background)
self.connect(self.shell, SIGNAL("execute(QString)"),
self.send_to_process)
self.connect(self.shell, SIGNAL("keyboard_interrupt()"),
self.keyboard_interrupt)
# Redirecting some SIGNALs:
self.connect(self.shell, SIGNAL('redirect_stdio(bool)'),
lambda state: self.emit(SIGNAL('redirect_stdio(bool)'),
state))
self.state_label = None
self.time_label = None
vlayout = QVBoxLayout()
toolbar_buttons = self.get_toolbar_buttons()
if show_buttons_inside:
self.state_label = QLabel()
hlayout = QHBoxLayout()
hlayout.addWidget(self.state_label)
hlayout.addStretch(0)
hlayout.addWidget(self.create_time_label())
hlayout.addStretch(0)
for button in toolbar_buttons:
hlayout.addWidget(button)
vlayout.addLayout(hlayout)
else:
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.addWidget(self.get_shell_widget())
self.setLayout(vlayout)
self.resize(640, 480)
if parent is None:
self.setWindowIcon(self.get_icon())
self.setWindowTitle(_("Console"))
self.t0 = None
self.timer = QTimer(self)
self.process = None
self.is_closing = False
if show_buttons_inside:
self.update_time_label_visibility()
def set_elapsed_time_visible(self, state):
self.show_elapsed_time = state
if self.time_label is not None:
self.time_label.setVisible(state)
def create_time_label(self):
"""Create elapsed time label widget (if necessary) and return it"""
if self.time_label is None:
self.time_label = QLabel()
return self.time_label
def update_time_label_visibility(self):
self.time_label.setVisible(self.show_elapsed_time)
def is_running(self):
if self.process is not None:
return self.process.state() == QProcess.Running
def get_toolbar_buttons(self):
if self.run_button is None:
self.run_button = create_toolbutton(self, text=_("Run"),
icon=get_icon('run.png'),
tip=_("Run again this program"),
triggered=self.start_shell)
if self.kill_button is None:
self.kill_button = create_toolbutton(self, text=_("Kill"),
icon=get_icon('kill.png'),
tip=_("Kills the current process, "
"causing it to exit immediately"))
buttons = [self.run_button]
if self.options_button is None:
options = self.get_options_menu()
if options:
self.options_button = create_toolbutton(self, text=_("Options"),
icon=get_icon('tooloptions.png'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
add_actions(menu, options)
self.options_button.setMenu(menu)
if self.options_button is not None:
buttons.append(self.options_button)
buttons.append(self.kill_button)
return buttons
def set_icontext_visible(self, state):
"""Set icon text visibility"""
for widget in self.get_toolbar_buttons():
if state:
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
else:
widget.setToolButtonStyle(Qt.ToolButtonIconOnly)
def get_options_menu(self):
self.show_time_action = create_action(self, _("Show elapsed time"),
toggled=self.set_elapsed_time_visible)
self.show_time_action.setChecked(self.show_elapsed_time)
actions = [self.show_time_action]
if self.menu_actions is not None:
actions += [None]+self.menu_actions
return actions
def get_shell_widget(self):
return self.shell
def get_icon(self):
raise NotImplementedError
def show_time(self, end=False):
if self.time_label is None:
return
elapsed_time = time()-self.t0
if elapsed_time > 24*3600: # More than a day...!
format = "%d %H:%M:%S"
else:
format = "%H:%M:%S"
if end:
color = "#AAAAAA"
else:
color = "#AA6655"
text = "<span style=\'color: %s\'><b>%s" \
"</b></span>" % (color, strftime(format, gmtime(elapsed_time)))
self.time_label.setText(text)
def closeEvent(self, event):
if self.process is not None:
self.is_closing = True
self.process.kill()
self.process.waitForFinished(100)
self.disconnect(self.timer, SIGNAL("timeout()"), self.show_time)
def set_running_state(self, state=True):
self.set_buttons_runnning_state(state)
self.shell.setReadOnly(not state)
if state:
if self.state_label is not None:
self.state_label.setText(_(
"<span style=\'color: #44AA44\'><b>Running...</b></span>"))
self.t0 = time()
self.connect(self.timer, SIGNAL("timeout()"), self.show_time)
self.timer.start(1000)
else:
if self.state_label is not None:
self.state_label.setText(_('Terminated.'))
self.disconnect(self.timer, SIGNAL("timeout()"), self.show_time)
def set_buttons_runnning_state(self, state):
self.run_button.setVisible(not state)
self.kill_button.setVisible(state)
def start_shell(self, ask_for_arguments=False):
"""Start shell"""
if ask_for_arguments and not self.get_arguments():
self.set_running_state(False)
return
self.create_process()
def get_arguments(self):
arguments, valid = QInputDialog.getText(self, _('Arguments'),
_('Command line arguments:'),
QLineEdit.Normal,
self.arguments)
if valid:
self.arguments = unicode(arguments)
return valid
def create_process(self):
raise NotImplementedError
def finished(self, exit_code, exit_status):
self.shell.flush()
self.emit(SIGNAL('finished()'))
if self.is_closing:
return
self.set_running_state(False)
self.show_time(end=True)
#===============================================================================
# Input/Output
#===============================================================================
def transcode(self, bytes):
return unicode( locale_codec.toUnicode(bytes.data()) )
def get_stdout(self):
self.process.setReadChannel(QProcess.StandardOutput)
bytes = QByteArray()
while self.process.bytesAvailable():
bytes += self.process.readAllStandardOutput()
return self.transcode(bytes)
def get_stderr(self):
self.process.setReadChannel(QProcess.StandardError)
bytes = QByteArray()
while self.process.bytesAvailable():
bytes += self.process.readAllStandardError()
return self.transcode(bytes)
def write_output(self):
self.shell.write(self.get_stdout(), flush=True)
QApplication.processEvents()
def send_to_process(self, qstr):
raise NotImplementedError
def send_ctrl_to_process(self, letter):
char = chr("abcdefghijklmnopqrstuvwxyz".index(letter) + 1)
byte_array = QByteArray()
byte_array.append(char)
self.process.write(byte_array)
self.process.waitForBytesWritten(-1)
self.shell.write(locale_codec.toUnicode(byte_array), flush=True)
def keyboard_interrupt(self):
raise NotImplementedError
def test():
from spyderlib.utils.qthelpers import qapplication
app = qapplication()
from spyderlib.widgets.externalshell.pythonshell import ExternalPythonShell
from spyderlib.widgets.externalshell.systemshell import ExternalSystemShell
import spyderlib
from spyderlib.plugins.variableexplorer import VariableExplorer
settings = VariableExplorer.get_settings()
shell = ExternalPythonShell(wdir=osp.dirname(spyderlib.__file__),
ipython_kernel=True, stand_alone=settings,
arguments="-q4thread -pylab -colors LightBG",
mpl_patch_enabled=True, light_background=False)
# shell = ExternalPythonShell(wdir=osp.dirname(spyderlib.__file__),
# interact=True, umd_enabled=True,
# stand_alone=settings,
# umd_namelist=['guidata', 'guiqwt'],
# umd_verbose=True, mpl_patch_enabled=False,
# light_background=False)
# shell = ExternalSystemShell(wdir=osp.dirname(spyderlib.__file__),
# light_background=False)
shell.shell.toggle_wrap_mode(True)
shell.start_shell(False)
from spyderlib.qt.QtGui import QFont
font = QFont("Lucida console")
font.setPointSize(10)
shell.shell.set_font(font)
shell.show()
sys.exit(app.exec_())
if __name__ == "__main__":
test()
|
gpl-3.0
| 7,928,487,626,756,879,000
| 37.791277
| 82
| 0.548743
| false
| 4.244267
| false
| false
| false
|
rosenbrockc/fortpy
|
fortpy/interop/converter.py
|
1
|
19280
|
import os
from .. import msg
import re
import xml.etree.ElementTree as ET
from fortpy.testing.comparer import FileComparer
class TemplateLine(object):
"""Represents a single line in the template file and how to format it.
:arg element: the XML element that defines this line in the file.
:arg group: the [group] that this line belongs to.
"""
def __init__(self, element, group, commentchar):
self.identifier = element.attrib["id"]
#First look at the "mandatory" attributes and assign defaults if missing
if "type" in element.attrib:
self.dtype = re.split(",\s*", element.attrib["type"])
else:
msg.warn("no type specified for {}. Assuming string.".format(self.identifier))
self.dtype = [ "string" ]
#Values specifies how many variable values are present in the file
if "values" in element.attrib:
self.values = re.split(",\s*", element.attrib["values"])
i = 0
for i in range(len(self.values)):
if self.values[i].isdigit():
self.values[i] = int(self.values[i])
elif "from" not in element.attrib:
msg.warn("no value count specified for {}. Assuming *.".format(self.identifier))
self.values = [ "*" ]
else:
self.values = []
#Handle all the optional attributes
if "choices" in element.attrib:
self.choices = re.split(",\s*", element.attrib["choices"])
else:
self.choices = []
if "comment" in element.attrib:
self.comment = "{} {}".format(commentchar, element.attrib["comment"])
else:
self.comment = ""
if "default" in element.attrib:
self.default = eval(element.attrib["default"])
else:
self.default = None
#How from works: if an element has a from attribute, it is included in
#the plaintext file after conversion but does *not* appear in the XML
#file that is being converted. It grabs its value from another group
#or line whose id is the from attribute's value.
if "from" in element.attrib:
self.fromtag = element.attrib["from"]
else:
self.fromtag = None
#Related to from, this operator specifies how the value should be generated
#using the line/group whose id is the from attribute's value.
if "operator" in element.attrib:
self.operator = element.attrib["operator"]
else:
self.operator = "count"
self.group = group
self._nvalues = None
self._caster = {
"int": self._cast_int,
"float": self._cast_float,
#We want to use the same syntax even though we do nothing with strings
"string": lambda s: s
}
@property
def nvalues(self):
"""Returns the number of values recorded on this single line. If the
number is variable, it returns -1."""
if self._nvalues is None:
self._nvalues = 0
for val in self.values:
if type(val) == type(int):
self._nvalues += val
else:
self._nvalues = -1
break
return self._nvalues
def write(self, valuedict):
"""Returns the lines that this template line should add to the input file."""
if self.identifier in valuedict:
value = valuedict[self.identifier]
elif self.default is not None:
value = self.default
elif self.fromtag is not None and self.fromtag in valuedict:
if self.operator == "count":
value = len(valuedict[self.fromtag])
else:
msg.err("referenced 'from' attribute/operator {} not in xml dictionary.".format(self.fromtag))
exit(1)
else:
msg.err("a required line {} had no value or default specified.".format(self.identifier))
exit(1)
#Before we generate the result, validate the choices if they exist
if len(self.choices) > 0:
for single in value:
if str(single) not in self.choices:
msg.warn("failed choices validation for {} in {} (line {})".format(
single, self.choices, self.identifier))
result = []
#Get the string representation of the value
if isinstance(value, list):
sval = " ".join([ str(val) for val in value])
else:
sval = str(value)
if self.comment != "" and (self.nvalues < 0 or self.nvalues > 5):
#We will put the comments on a separate line from the actual values.
result.append(self.comment)
result.append(sval)
else:
result.append("{} {}".format(sval, self.comment))
return result
def parse(self, element):
"""Parses the contents of the specified XML element using template info.
:arg element: the XML element from the input file being converted.
"""
result = []
if element.text is not None and element.tag == self.identifier:
l, k = (0, 0)
raw = element.text.split()
while k < len(self.values):
dtype = self.dtype[k]
if isinstance(self.values[k], int):
for i in range(self.values[k]):
result.append(self._caster[dtype](raw[i + l]))
l += self.values[k]
k += 1
else:
#This is a variable argument line, just use up the rest
#of them as the type of the current line
rest = [ self._caster[dtype](val) for val in raw[l::] ]
result.extend(rest)
break
else:
msg.warn("no results for parsing {} using line {}".format(element.tag, self.identifier))
return result
def _cast_int(self, value):
"""Returns the specified value as int if possible."""
try:
return int(value)
except ValueError:
msg.err("Cannot convert {} to int for line {}.".format(value, self.identifier))
exit(1)
def _cast_float(self, value):
"""Returns the specified value as float if possible."""
try:
return float(value)
except ValueError:
msg.err("Cannot convert {} to float for line {}.".format(value, self.identifier))
exit(1)
class TemplateGroup(object):
"""Represents a logical grouping of line templates.
:arg element: the XML group element to parse.
:arg commentchar: the character(s) that specify comment lines. Used when
inserting comments beside lines in the plaintext file.
"""
def __init__(self, element, commentchar):
self.identifier = element.attrib["name"]
self.order = []
self.lines = {}
if "comment" in element.attrib:
self.comment = "{} {}".format(commentchar, element.attrib["comment"])
else:
self.comment = ""
if "repeat" in element.attrib:
self.repeat = element.attrib["repeat"]
else:
self.repeat = None
self._load(element, commentchar)
def _load(self, element, commentchar):
"""Loads all the child line elements from the XML group element."""
for child in element:
if "id" in child.attrib:
tline = TemplateLine(child, self, commentchar)
self.order.append(tline.identifier)
self.lines[tline.identifier] = tline
else:
msg.warn("no id element in {}. Ignored. (group._load)".format(child))
def parse(self, element):
"""Extracts the values from the specified XML element that is being converted."""
#All the children of this element are what we are trying to parse.
result = []
for child in element:
if child.tag in self.lines:
values = { child.tag: self.lines[child.tag].parse(child) }
result.append(values)
return result
def write(self, valuedict):
"""Generates the lines for the converted input file using the specified
value dictionary."""
result = []
if self.identifier in valuedict:
values = valuedict[self.identifier]
else:
return result
if self.comment != "":
result.append(self.comment)
if self.repeat is not None and type(values) == type([]):
if self.repeat.isdigit():
for i in range(int(self.repeat)):
result.extend(self._write_iterate(values[i]))
else:
#We are repeating for as many values as we have in the value
#entry for the group in the dictionary.
for value in values:
result.extend(self._write_iterate(value))
elif type(values) == type({}):
#This group doesn't get repeated, so the values variable must
#be a dictionary, just run it once.
result = self._write_iterate(values)
return result
def _write_iterate(self, values):
"""Generates the lines for a single pass through the group."""
result = []
for key in self.order:
result.append(self.lines[key].write(values))
if len(result) > 1:
return result
else:
return result[0]
class TemplateContents(object):
"""The contents of an XML input template.
:attr order: a list of id attributes from the lines in the template file
that preserves the order in which the lines showed up in the file.
:attr entries: a dictionary of TemplateLine and TemplateGroup instances
for the corresponding lines and groups in the template. Dict keys are
the identifiers in the order list.
:attr comment: the character(s) at the start of a line that specify it as
a comment line."""
def __init__(self):
self.order = []
self.entries = {}
self.comment = "#"
class FileTemplate(object):
"""Represents an XML template that specifies how to format an input/output
file using a dictionary of keyed values.
:arg path: the full path to the XML template file to load.
"""
def __init__(self, path, name, direction="input"):
self.name = name
self.path = os.path.expanduser(path)
self.versions = {}
self.direction = direction
self._load()
def _load(self):
"""Extracts the XML template data from the file."""
if os.path.exists(self.path):
root = ET.parse(self.path).getroot()
if (root.tag == "fortpy" and "mode" in root.attrib and
root.attrib["mode"] == "template" and "direction" in root.attrib and
root.attrib["direction"] == self.direction):
#First, we need instances of the template contents for each of the
#versions listed in the fortpy tag.
for v in _get_xml_version(root):
self.versions[v] = TemplateContents()
#Now we can update the contents objects using the XML data.
self._load_entries(root)
#See if a custom name was specified for the auto-converted
#files.
if "autoname" in root.attrib:
self.name = root.attrib["autoname"]
else:
msg.err("the specified template {} ".format(self.path) +
"is missing the mode and direction attributes.")
exit(1)
else:
msg.err("could not find the template {}.".format(self.path))
exit(1)
def parse(self, root):
"""Returns a dictionary of values extracted from the root of the
specified XML file. It is assumed that the file is an input/output
file to be converted into plaintext. As such the file should only
specify a single version number."""
#Use the first element in the versions list since there should only be one.
v = _get_xml_version(root)[0]
result = {}
for child in root:
if child.tag in self.versions[v].entries:
entry = self.versions[v].entries[child.tag]
#Entry can be either a line or a group. Both objects have a parse
#method that returns a list of values. In the line's case, the
#list is the values from that line. For the group, it is a list
#of dictionaries, a dictionary for each tag name.
result[child.tag] = entry.parse(child)
return result
def write(self, valuedict, version):
"""Generates the lines for the converted input file from the valuedict.
:arg valuedict: a dictionary of values where the keys are ids in the
template and the values obey their template rules.
:arg version: the target version of the output file.
"""
result = []
if version in self.versions:
for tag in self.versions[version].order:
entry = self.versions[version].entries[tag]
result.extend(entry.write(valuedict))
return result
def _load_entries(self, root):
"""Loads all the child entries of the input template from the
specified root element."""
mdict = {
"comments": self._comment,
"line": self._line,
"group": self._group
}
for entry in root:
mdict[entry.tag](entry)
def _comment(self, element):
"""Extracts the character to use for comments in the input file."""
for v in _get_xml_version(element):
self.versions[v].comment = element.text
def _line(self, element):
"""Parses the XML element as a single line entry in the input file."""
for v in _get_xml_version(element):
if "id" in element.attrib:
tline = TemplateLine(element, None, self.versions[v].comment)
self.versions[v].entries[tline.identifier] = tline
self.versions[v].order.append(tline.identifier)
else:
msg.warn("no id element in {}. Ignored. (_line)".format(element))
def _group(self, element):
"""Parses the XML element as a group of [unknown] number of lines."""
for v in _get_xml_version(element):
if "name" in element.attrib:
g = TemplateGroup(element, self.versions[v].comment)
self.versions[v].entries[g.identifier] = g
self.versions[v].order.append(g.identifier)
else:
msg.warn("no name element in {}. Ignored. (_group)".format(element))
def _get_xml_version(element):
"""Extracts a list of versions that an xml element references. Returns
a [ 1 ] list if there isn't a versions attribute."""
if "versions" in element.attrib:
result = [ int(v) for v in re.split(",\s*", element.attrib["versions"]) ]
else:
result = [ 1 ]
return result
class FileConverter(object):
"""Converts XML-based input/output files into non-keyword based ones.
:arg template_dir: the path to the directory containing input file templates.
"""
def __init__(self, template_dir):
self.template_dir = os.path.expanduser(template_dir)
self.templates = {}
def convert(self, path, version, target = None):
"""Converts the specified file using the relevant template.
:arg path: the full path to the file to convert.
:arg version: the new version of the file.
:arg target: the optional path to save the file under. If not
specified, the file is saved based on the template file name.
"""
#Get the template and values out of the XML input file and
#write them in the format of the keywordless file.
values, template = self.parse(path)
lines = template.write(values, version)
#Finally, write the lines to the correct path.
if target is None:
target = os.path.join(os.path.dirname(path), template.name)
with open(os.path.expanduser(target), 'w') as f:
f.write("\n".join(lines))
def parse(self, path):
"""Extracts a dictionary of values from the XML file at the specified path."""
#Load the template that will be used for parsing the values.
expath, template, root = self._load_template(path)
if expath is not None:
values = template.parse(root)
return (values, template)
class OutputConverter(object):
"""Converts plain-text output files between versions."""
def __init__(self, template_dir):
self.comparer = FileComparer(os.path.expanduser(template_dir))
def convert(self, path, version, target):
"""Converts the specified source file to a new version number."""
source = self.comparer.get_representation(path)
lines = [ '# <fortpy version="{}"></fortpy>\n'.format(version) ]
for line in self.comparer.template.contents[version].preamble:
lines.append(line.write(source.preamble, source.version, source.stored) + "\n")
for line in self.comparer.template.contents[version].body:
for valueset in source.body:
lines.append(line.write(valueset, source.version, source.stored) + "\n")
with open(os.path.expanduser(target), 'w') as f:
f.writelines(lines)
class InputConverter(FileConverter):
"""Converts XML-based input files into non-keyword based ones.
:arg template_dir: the path to the directory containing input file templates.
"""
def __init__(self, template_dir):
super(InputConverter, self).__init__(template_dir)
def _load_template(self, path):
#First we extract the file name for the template or look for it
#in the root element. The naming convention is to use .xin.xml
#as the extension. If we replace the .xin.xml by .in.xml it
#should cover most cases.
expath = os.path.expanduser(path)
root = ET.parse(expath).getroot()
if root.tag == "fortpy" and "mode" in root.attrib and \
root.attrib["mode"] == "input":
#This is a valid input file.
if "template" in root.attrib:
template = root.attrib["template"]
else:
template = os.path.split(expath)[1].replace(".xin.xml", ".in.xml")
tpath = os.path.join(self.template_dir, template)
name = template.replace(".xml","")
self.templates[template] = FileTemplate(tpath, name)
return (expath, self.templates[template], root)
else:
msg.warn("the input file {} is missing the mode attribute.".format(path))
return None
|
mit
| -2,943,328,751,953,272,000
| 39.504202
| 110
| 0.578631
| false
| 4.421004
| false
| false
| false
|
room77/py77
|
pylib/util/singleton.py
|
1
|
2390
|
"""
Singleton implementation.
Usage:
class A(singleton.Singleton): pass
Please NOTE:
id(A.Instance()), id(A))
"""
__copyright__ = '2013, Room 77, Inc.'
__author__ = 'Pramod Gupta'
import threading
# with_metaclass method from Six compatibility library.
# https://github.com/benjaminp/six/blob/1.11.0/six.py#L819
def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
class SingletonException(Exception):
pass
class _SingletonMeta(type):
def __new__(cls, name, bases, dct):
if '__new__' in dct:
raise SingletonException('Can not override __new__ in a Singleton')
return super(_SingletonMeta, cls).__new__(cls, name, bases, dct)
def __call__(cls, *args, **dictArgs):
raise SingletonException('Singletons may only be instantiated through Instance()')
class Singleton(with_metaclass(_SingletonMeta, object)):
_lock = threading.RLock()
@classmethod
def Instance(cls, *args, **kw):
"""
Call this to instantiate an instance or retrieve the existing instance.
If the singleton requires args to be instantiated, include them the first
time you call Instance.
"""
if not cls.Instantiated(): Singleton._createSingletonInstance(cls, args, kw)
return cls._instance
@classmethod
def Instantiated(cls):
# Don't use hasattr(cls, '_instance'), because that screws things up if there is a singleton
# that extends another singleton.
# hasattr looks in the base class if it doesn't find in subclass.
return '_instance' in cls.__dict__
@staticmethod
def _createSingletonInstance(cls, args, kw):
with Singleton._lock:
# Check if the the class really needs to be instantiated.
if cls.Instantiated(): return
try:
# Create the new instance and init it.
instance = cls.__new__(cls)
instance.__init__(*args, **kw)
except TypeError as e:
if e.message.find('__init__() takes') != -1:
raise SingletonException('If the singleton requires __init__ args, '
'supply them on first call to Instance().')
else:
raise e
cls._instance = instance
|
mit
| 9,167,899,862,102,201,000
| 28.875
| 98
| 0.648536
| false
| 3.930921
| false
| false
| false
|
caktus/django-treenav
|
treenav/tests/test_views.py
|
1
|
17704
|
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.http import HttpRequest
from django.template import Template
from django.template.context import Context
from django.test import override_settings
from django.urls import reverse
from treenav.context_processors import treenav_active
from treenav.forms import MenuItemForm
from treenav.models import Item, MenuItem
from .base import TreeNavTestCase as TestCase
from .models import Team
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class TreeNavTestCase(TestCase):
def setUp(self):
self.root = self.create_menu_item(
**{
"label": "Primary Navigation",
"slug": "primary-nav",
"order": 0,
}
)
self.create_menu_item(
**{
"parent": self.root,
"label": "Our Blog",
"slug": "our-blog",
"order": 4,
}
)
self.create_menu_item(
**{
"parent": self.root,
"label": "Home",
"slug": "home",
"order": 0,
}
)
self.child = self.create_menu_item(
**{
"parent": self.root,
"label": "About Us",
"slug": "about-us",
"order": 9,
}
)
self.second_level = self.create_menu_item(
**{
"parent": self.child,
"label": "Second",
"slug": "second",
"order": 0,
}
)
self.third_level = self.create_menu_item(
**{
"parent": self.second_level,
"label": "Third",
"slug": "third",
"order": 0,
}
)
def test_treenav_active(self):
request = HttpRequest()
request.META["PATH_INFO"] = "/"
treenav_active(request)
def test_to_tree(self):
self.root.to_tree()
def compile_string(self, url, template_str):
return Template(template_str).render(Context())
def test_non_unique_form_save(self):
dup = MenuItemForm(
{
"label": "test nav",
"slug": "primary-nav",
"order": 0,
}
)
self.assertFalse(dup.is_valid(), "Form says a duplicate slug is valid.")
def test_single_level_menu_root(self):
template_str = """{% load treenav_tags %}
{% single_level_menu "primary-nav" 0 %}
"""
result = self.compile_string("/", template_str)
self.assertNotIn(self.second_level.label, result)
def test_single_level_menu_about_us(self):
template_str = """{% load treenav_tags %}
{% single_level_menu "about-us" 0 %}
"""
result = self.compile_string("/", template_str)
self.assertIn(self.second_level.label, result)
def test_show_treenav(self):
template_str = """{% load treenav_tags %}
{% show_treenav "primary-nav" %}
"""
result = self.compile_string("/", template_str)
self.assertNotIn(self.second_level.label, result)
def test_single_level_menu_show_treenav_equality(self): # necessary?
"""Tests that the single_level_menu and show_treenav tags output the
same for the top level of the tree.
"""
template_str = """{% load treenav_tags %}
{% single_level_menu "primary-nav" 0 %}
"""
single_level_menu_result = self.compile_string("/", template_str)
template_str = """{% load treenav_tags %}
{% show_treenav "primary-nav" %}
"""
show_treenav_result = self.compile_string("/", template_str)
self.assertEqual(single_level_menu_result, show_treenav_result)
def test_show_treenav_third_level(self):
template_str = """{% load treenav_tags %}
{% show_treenav "primary-nav" full_tree="True" %}
"""
result = self.compile_string("/", template_str)
self.assertIn(self.third_level.label, result)
def test_show_menu_crumbs(self):
template_str = """{% load treenav_tags %}
{% show_menu_crumbs "about-us" %}
"""
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
self.create_menu_item(
**{
"parent": self.root,
"label": "Durham Bulls",
"slug": "durham-bulls",
"order": 4,
"content_type": ct,
"object_id": team.pk,
}
)
compiled = self.compile_string(team.get_absolute_url(), template_str)
# FIXME: This fixes the pep8 warning, but need to figure out what we're asserting
self.assertTrue(compiled)
def test_getabsoluteurl(self):
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
menu = self.create_menu_item(
**{
"label": "Durham Bulls",
"slug": "durham-bulls",
"order": 4,
"content_type": ct,
"object_id": team.pk,
}
)
self.assertEqual(menu.href, team.get_absolute_url())
def test_changed_getabsoluteurl(self):
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
menu = self.create_menu_item(
parent=self.root,
label="Durham Bulls",
slug="durham-bulls",
order=9,
content_type=ct,
object_id=team.pk,
href=team.get_absolute_url(),
)
# change slug and save it to fire post_save signal
team.slug = "wildcats"
team.save()
menu = MenuItem.objects.get(slug="durham-bulls")
self.assertEqual(menu.href, team.get_absolute_url())
def test_active_url(self):
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
self.child.object_id = team.pk
self.child.content_type = ct
self.child.content_object = team
self.child.save()
item = Item(self.child)
active_item = item.set_active(team.get_absolute_url())
self.assertEqual(active_item.node, self.child)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class TreeNavViewTestCase(TestCase):
def setUp(self):
self.root = self.create_menu_item(
label="Primary Navigation",
slug="primary-nav",
order=0,
)
self.create_menu_item(
parent=self.root,
label="Our Blog",
slug="our-blog",
order=4,
)
self.create_menu_item(
parent=self.root,
label="Home",
slug="home",
order=0,
)
self.child = self.create_menu_item(
parent=self.root,
label="About Us",
slug="about-us",
order=9,
)
def test_tags_level(self):
url = reverse("test_view", args=("home",))
response = self.client.post(url, {"pslug": "primary-nav", "N": 0})
self.assertEqual(response.content.decode("utf-8").count("<li"), 3)
self.assertContains(response, "depth-0")
def test_tags_no_page(self):
url = reverse("test_view", args=("notthere",))
response = self.client.post(url, {"pslug": "primary-nav", "N": 0})
self.assertEqual(response.content.decode("utf-8").count("<li"), 3)
self.assertContains(response, "depth-0")
def test_tags_level2(self):
self.create_menu_item(
parent=self.child,
label="Second Level",
slug="second-level",
order=10,
)
url = reverse("test_view", args=("home",))
response = self.client.post(url, {"pslug": "about-us", "N": 0})
self.assertEqual(response.content.decode("utf-8").count("<li"), 1)
def test_tags_improper(self):
url = reverse("test_view", args=("home",))
response = self.client.post(url, {"pslug": "no-nav", "N": 10000})
self.assertNotContains(response, "<ul")
def test_hierarchy(self):
root = self.root.to_tree()
self.assertEqual(len(root.children), 3)
children = ("Home", "Our Blog", "About Us")
for item, expected_label in zip(root.children, children):
self.assertEqual(item.node.label, expected_label)
def test_undefined_url(self):
"""
Testing the undefined_url view.
"""
slug = self.child.slug
url = reverse(
"treenav_undefined_url",
args=[
slug,
],
)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class RefreshViewTestCase(TestCase):
"Admin view to trigger refresh of hrefs."
def setUp(self):
self.superuser = User.objects.create_user("test", "", "test")
self.superuser.is_staff = True
self.superuser.is_superuser = True
self.superuser.save()
self.refresh_url = reverse("admin:treenav_refresh_hrefs")
self.info = MenuItem._meta.app_label, MenuItem._meta.model_name
self.changelist_url = reverse("admin:%s_%s_changelist" % self.info)
self.client.login(username="test", password="test")
def test_trigger_refresh(self):
"Trigger update of menu item HREFs."
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
menu = self.create_menu_item(
label="Durham Bulls",
slug="durham-bulls",
order=9,
content_type=ct,
object_id=team.pk,
href=team.get_absolute_url(),
)
# change slug and save it to fire post_save signal
team.slug = "wildcats"
team.save()
self.assertNotEqual(menu.href, team.get_absolute_url())
response = self.client.get(self.refresh_url, follow=True)
self.assertRedirects(response, self.changelist_url)
menu = MenuItem.objects.get(pk=menu.pk)
self.assertEqual(menu.href, team.get_absolute_url())
self.assertEqual(len(response.context["messages"]), 1)
def test_trigger_refresh_redirects_to_custom_admin(self):
"Trigger update of menu item HREFs for a second custom admin."
refresh_url = reverse("admin:treenav_refresh_hrefs", current_app="admin2")
response = self.client.get(refresh_url, follow=True)
changelist_url = reverse(
"admin:%s_%s_changelist" % self.info, current_app="admin2"
)
self.assertRedirects(response, changelist_url)
def test_no_permission(self):
"Non-staff cannot trigger the refresh."
self.superuser.is_staff = False
self.superuser.save()
response = self.client.get(self.refresh_url, follow=True)
# Admin displays a login page with 200 status code
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["messages"]), 0)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class ClearCacheViewTestCase(TestCase):
"Admin view to clear menu cache."
def setUp(self):
self.superuser = User.objects.create_user("test", "", "test")
self.superuser.is_staff = True
self.superuser.is_superuser = True
self.superuser.save()
self.cache_url = reverse("admin:treenav_clean_cache")
self.info = MenuItem._meta.app_label, MenuItem._meta.model_name
self.changelist_url = reverse("admin:%s_%s_changelist" % self.info)
self.client.login(username="test", password="test")
def test_reset_cache(self):
"Clear MenuItems from cache."
menu = self.create_menu_item(
label="Our Blog",
slug="our-blog",
order=4,
)
menu.to_tree()
valid = cache.get("menu-tree-%s" % menu.slug)
self.assertTrue(valid, "Menu should be cached")
cache.set("menu-tree-%s" % menu.slug, "INVALID!!!")
response = self.client.get(self.cache_url, follow=True)
self.assertRedirects(response, self.changelist_url)
self.assertEqual(len(response.context["messages"]), 1)
# Cache should be recycled
current = cache.get("menu-tree-%s" % menu.slug)
self.assertNotEqual(current, "INVALID!!!")
def test_reset_cache_redirects_to_custom_admin(self):
"After cleaning cache, redirects to custom admin."
cache_url = reverse("admin:treenav_clean_cache", current_app="admin2")
response = self.client.get(cache_url, follow=True)
changelist_url = reverse(
"admin:%s_%s_changelist" % self.info, current_app="admin2"
)
self.assertRedirects(response, changelist_url)
def test_no_permission(self):
"Non-staff cannot clear the cache."
self.superuser.is_staff = False
self.superuser.save()
response = self.client.get(self.cache_url, follow=True)
# Admin displays a login page with 200 status code
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["messages"]), 0)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class SimultaneousReorderTestCase(TestCase):
def setUp(self):
self.root = self.create_menu_item(
label="Primary Navigation",
slug="primary-nav",
order=0,
)
self.blog = self.create_menu_item(
parent=self.root,
label="Our Blog",
slug="our-blog",
order=4,
)
self.home = self.create_menu_item(
parent=self.root,
label="Home",
slug="home",
order=0,
)
self.superuser = User.objects.create_user("test", "", "test")
self.superuser.is_staff = True
self.superuser.is_superuser = True
self.superuser.save()
self.info = MenuItem._meta.app_label, MenuItem._meta.model_name
self.changeform_url = reverse("admin:%s_%s_change" % self.info, args=(1,))
self.client.login(username="test", password="test")
def test_reorder(self):
# Build up the post dict, starting with the top form
data = {
"parent": "",
"label": "Primary Navigation",
"slug": "primary-nav",
"order": 0,
"is_enabled": "on",
"link": "",
"content_type": "",
"object_id": "",
}
# Now update the post dict with inline form info
data.update(
{
"children-TOTAL_FORMS": 3,
"children-INITIAL_FORMS": 2,
"children-MAX_NUM_FORMS": 1000,
}
)
# Update the post dict with the children, swapping their order values
data.update(
{
"children-0-id": 3,
"children-0-parent": 1,
"children-0-label": "Home",
"children-0-slug": "home",
"children-0-order": 4,
"children-0-is_enabled": "on",
"children-0-link": "",
"children-0-content_type": "",
"children-0-object_id": "",
"children-1-id": 2,
"children-1-parent": 1,
"children-1-label": "Our Blog",
"children-1-slug": "our-blog",
"children-1-order": 0,
"children-1-is_enabled": "on",
"children-1-link": "",
"children-1-content_type": "",
"children-1-object_id": "",
}
)
# Update the post dict with the empty inline form entry
data.update(
{
"children-2-id": "",
"children-2-parent": 1,
"children-2-label": "",
"children-2-slug": "",
"children-2-order": "",
"children-2-is_enabled": "on",
"children-2-link": "",
"children-2-content_type": "",
"children-2-object_id": "",
}
)
# Update the post dict with the end of the form
data.update(
{
"children-__prefix__-id": "",
"children-__prefix__-parent": 1,
"children-__prefix__-label": "",
"children-__prefix__-slug": "",
"children-__prefix__-order": "",
"children-__prefix__-is_enabled": "on",
"children-__prefix__-link": "",
"children-__prefix__-content_type": "",
"children-__prefix__-object_id": "",
"_save": "Save",
}
)
self.client.post(self.changeform_url, data)
order = self.root.get_children()
# Check if children are in the correct order
self.assertEqual(order[0], self.blog)
self.assertEqual(order[1], self.home)
# Check if the lft and rght attributes assigned by mptt are correct
self.assertNotEqual(order[0].lft, order[1].lft)
self.assertNotEqual(order[0].rght, order[1].rght)
|
bsd-3-clause
| 514,541,756,168,908,400
| 35.278689
| 89
| 0.546091
| false
| 3.881605
| true
| false
| false
|
NarlikarLab/DIVERSITY
|
weblogoMod/weblogolib/__init__.py
|
1
|
41331
|
#!/usr/bin/env python
# -------------------------------- WebLogo --------------------------------
# Copyright (c) 2003-2004 The Regents of the University of California.
# Copyright (c) 2005 Gavin E. Crooks
# Copyright (c) 2006-2011, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Replicates README.txt
"""
WebLogo (http://code.google.com/p/weblogo/) is a tool for creating sequence
logos from biological sequence alignments. It can be run on the command line,
as a standalone webserver, as a CGI webapp, or as a python library.
The main WebLogo webserver is located at http://weblogo.threeplusone.com
Please consult the manual for installation instructions and more information:
(Also located in the weblogolib/htdocs subdirectory.)
http://weblogo.threeplusone.com/manual.html
For help on the command line interface run
./weblogo --help
To build a simple logo run
./weblogo < cap.fa > logo0.eps
To run as a standalone webserver at localhost:8080
./weblogo --serve
To create a logo in python code:
>>> from weblogolib import *
>>> fin = open('cap.fa')
>>> seqs = read_seq_data(fin)
>>> data = LogoData.from_seqs(seqs)
>>> options = LogoOptions()
>>> options.title = "A Logo Title"
>>> format = LogoFormat(data, options)
>>> fout = open('cap.eps', 'w')
>>> eps_formatter( data, format, fout)
-- Distribution and Modification --
This package is distributed under the new BSD Open Source License.
Please see the LICENSE.txt file for details on copyright and licensing.
The WebLogo source code can be downloaded from
http://code.google.com/p/weblogo/
WebLogo requires Python 2.5, 2.6 or 2.7, and the python
array package 'numpy' (http://www.scipy.org/Download)
Generating logos in PDF or bitmap graphics formats require that the ghostscript
program 'gs' be installed. Scalable Vector Graphics (SVG) format also requires
the program 'pdf2svg'.
"""
import sys
import copy
import os
from datetime import datetime
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from math import sqrt
from weblogoMod.corebio.data import rna_letters, dna_letters, amino_acid_letters
from string import Template
from subprocess import *
from weblogoMod.corebio.utils import resource_string, resource_filename
from math import log, sqrt, exp
# Avoid 'from numpy import *' since numpy has lots of names defined
from numpy import array, asarray, float64, ones, zeros, int32,all,any, shape
import numpy as na
from color import *
from colorscheme import *
from weblogoMod.corebio.seq import Alphabet, Seq, SeqList
from weblogoMod.corebio import seq_io
from weblogoMod.corebio.utils import isfloat, find_command, ArgumentError, stdrepr
from weblogoMod.corebio.moremath import *
from weblogoMod.corebio.data import amino_acid_composition
from weblogoMod.corebio.seq import unambiguous_rna_alphabet, unambiguous_dna_alphabet, unambiguous_protein_alphabet
import weblogoMod.corebio
from logomath import Dirichlet
# ------ META DATA ------
__all__ = [ 'LogoOptions',
'description',
'__version__',
'LogoFormat',
'LogoData',
'GhostscriptAPI',
'std_color_schemes',
'default_color_schemes',
'classic',
'std_units',
'std_sizes',
'std_alphabets',
'std_percentCG',
'pdf_formatter',
'jpeg_formatter',
'png_formatter',
'png_print_formatter',
'txt_formatter',
'eps_formatter',
'formatters',
'default_formatter',
'base_distribution',
'equiprobable_distribution',
'read_seq_data',
'color',
'colorscheme',
'logomath',
]
description = "Create sequence logos from biological sequence alignments."
__version__ = weblogoMod.corebio.__version__
# These keywords are substituted by subversion.
# The date and revision will only tell the truth after a branch or tag,
# since different files in trunk will have been changed at different times
release_date ="$Date: 2012-07-02 19:28:12 -0700 (Mon, 02 Jul 2012) $".split()[1]
release_build = "$Revision: 145 $".split()[1]
release_description = "WebLogo %s (%s)" % (__version__, release_date)
def cgi(htdocs_directory) :
import weblogolib._cgi
weblogolib._cgi.main(htdocs_directory)
class GhostscriptAPI(object) :
"""Interface to the command line program Ghostscript ('gs')"""
formats = ('png', 'pdf', 'jpeg')
def __init__(self, path=None) :
try:
command = find_command('gs', path=path)
except EnvironmentError:
try:
command = find_command('gswin32c.exe', path=path)
except EnvironmentError:
raise EnvironmentError("Could not find Ghostscript on path."
" There should be either a gs executable or a gswin32c.exe on your system's path")
self.command = command
def version(self) :
args = [self.command, '--version']
try :
p = Popen(args, stdout=PIPE)
(out,err) = p.communicate()
except OSError :
raise RuntimeError("Cannot communicate with ghostscript.")
return out.strip()
def convert(self, format, fin, fout, width, height, resolution=300) :
device_map = { 'png':'png16m', 'pdf':'pdfwrite', 'jpeg':'jpeg'}
try :
device = device_map[format]
except KeyError:
raise ValueError("Unsupported format.")
args = [self.command,
"-sDEVICE=%s" % device,
"-dPDFSETTINGS=/screen", #Modification printer changed to screen
#"-q", # Quite: Do not dump messages to stdout.
"-sstdout=%stderr", # Redirect messages and errors to stderr
"-sOutputFile=-", # Stdout
"-dUseCIEColor", #Modification
"-dDEVICEWIDTHPOINTS=%s" % str(width),
"-dDEVICEHEIGHTPOINTS=%s" % str(height),
"-dSAFER", # For added security
"-dNOPAUSE",]
if device != 'pdf' :
args.append("-r%s" % str(resolution) )
if resolution < 300 : # Antialias if resolution is Less than 300 DPI
args.append("-dGraphicsAlphaBits=4")
args.append("-dTextAlphaBits=4")
args.append("-dAlignToPixels=0")
args.append("-") # Read from stdin. Must be last argument.
error_msg = "Unrecoverable error : Ghostscript conversion failed " \
"(Invalid postscript?). %s" % " ".join(args)
source = fin.read()
try :
p = Popen(args, stdin=PIPE, stdout = PIPE, stderr= PIPE)
(out,err) = p.communicate(source)
except OSError :
raise RuntimeError(error_msg)
if p.returncode != 0 :
print("COMMAND " + str(self.command))
print("ARGS" + str(args))
error_msg += '\nReturn code: %i\n' % p.returncode
if err is not None : error_msg += err
raise RuntimeError(error_msg)
print >>fout, out
# end class Ghostscript
aa_composition = [ amino_acid_composition[_k] for _k in
unambiguous_protein_alphabet]
# ------ DATA ------
classic = ColorScheme([
ColorGroup("G", "orange" ),
ColorGroup("TU", "red"),
ColorGroup("C", "blue"),
ColorGroup("A", "green")
] )
std_color_schemes = {"auto": None, # Depends on sequence type
"monochrome": monochrome,
"base pairing": base_pairing,
"classic": classic,
"hydrophobicity" : hydrophobicity,
"chemistry" : chemistry,
"charge" : charge,
}#
default_color_schemes = {
unambiguous_protein_alphabet: hydrophobicity,
unambiguous_rna_alphabet: base_pairing,
unambiguous_dna_alphabet: base_pairing
}
std_units = {
"bits" : 1./log(2),
"nats" : 1.,
"digits" : 1./log(10),
"kT" : 1.,
"kJ/mol" : 8.314472 *298.15 /1000.,
"kcal/mol": 1.987 *298.15 /1000.,
"probability" : None,
}
# The base stack width is set equal to 9pt Courier.
# (Courier has a width equal to 3/5 of the point size.)
# Check that can get 80 characters in journal page @small
# 40 characters in a journal column
std_sizes = {
"small" : 5.4 ,
"medium" : 5.4*2,
"large" : 5.4*3
}
std_alphabets = {
'protein': unambiguous_protein_alphabet,
'rna': unambiguous_rna_alphabet,
'dna': unambiguous_dna_alphabet}
std_percentCG = {
'H. sapiens' : 40.,
'E. coli' : 50.5,
'S. cerevisiae' : 38.,
'C. elegans' : 36.,
'D. melanogaster': 43.,
'M. musculus' : 42.,
'T. thermophilus' : 69.4,
}
# Thermus thermophilus: Henne A, Bruggemann H, Raasch C, Wiezer A, Hartsch T,
# Liesegang H, Johann A, Lienard T, Gohl O, Martinez-Arias R, Jacobi C,
# Starkuviene V, Schlenczeck S, Dencker S, Huber R, Klenk HP, Kramer W,
# Merkl R, Gottschalk G, Fritz HJ: The genome sequence of the extreme
# thermophile Thermus thermophilus.
# Nat Biotechnol 2004, 22:547-53
class LogoOptions(object) :
""" A container for all logo formatting options. Not all of these
are directly accessible through the CLI or web interfaces.
To display LogoOption defaults:
>>> from weblogolib import *
>>> LogoOptions()
All physical lengths are measured in points. (72 points per inch, 28.3 points per cm)
String attributes:
o creator_text -- Embedded as comment in figures.
o logo_title
o logo_label
o unit_name -- See std_units for options. (Default 'bits')
o yaxis_label -- Defaults to unit_name
o xaxis_label
o fineprint -- Defaults to WebLogo name and version
Boolean attributes:
o show_yaxis
o show_xaxis
o show_ends
o show_fineprint
o show_errorbars -- Draw errorbars (default: False)
o show_boxes -- Draw boxes around stack characters (default: True)
o debug -- Draw extra graphics debugging information.
o rotate_numbers -- Draw xaxis numbers with vertical orientation?
o scale_width -- boolean, scale width of characters proportional to ungaps
o pad_right -- Make a single line logo the same width as multiline logos (default: False)
Other attributes:
o stacks_per_line
o yaxis_tic_interval
o yaxis_minor_tic_ratio
o yaxis_scale
o xaxis_tic_interval
o number_interval
o shrink_fraction -- Proportional shrinkage of characters if show_boxes is true.
o errorbar_fraction
o errorbar_width_fraction
o errorbar_gray
o resolution -- Dots per inch (default: 96). Used for bitmapped output formats
o default_color
o color_scheme
o stack_width --
o stack_aspect_ratio -- Ratio of stack height to width (default: 5)
o logo_margin -- Default: 2 pts
o stroke_width -- Default: 0.5 pts
o tic_length -- Default: 5 pts
o stack_margin -- Default: 0.5 pts
o small_fontsize -- Small text font size in points
o fontsize -- Regular text font size in points
o title_fontsize -- Title text font size in points
o number_fontsize -- Font size for axis-numbers, in points.
o text_font
o logo_font
o title_font
o first_index
o logo_start
o logo_end
"""
def __init__(self, **kwargs) :
""" Create a new LogoOptions instance.
>>> L = LogoOptions(logo_title = "Some Title String")
>>> L.show_yaxis = False
>>> repr(L)
"""
self.alphabet = None
self.creator_text = release_description
self.logo_title = ""
self.logo_label = ""
self.stacks_per_line = 40
self.unit_name = "bits"
self.show_yaxis = True
# yaxis_lable default depends on other settings. See LogoFormat
self.yaxis_label = None
self.yaxis_tic_interval = 1.
self.yaxis_minor_tic_ratio = 5
self.yaxis_scale = None
self.show_xaxis = True
self.xaxis_label = ""
self.xaxis_tic_interval =1
self.rotate_numbers = False
self.number_interval = 5
self.show_ends = False
self.annotate = None
self.show_fineprint = True
self.fineprint = "Based on WebLogo "+__version__
self.show_boxes = False
self.shrink_fraction = 0.5
self.show_errorbars = True
self.errorbar_fraction = 0.90
self.errorbar_width_fraction = 0.25
self.errorbar_gray = 0.75
self.resolution = 96. # Dots per inch
self.default_color = Color.by_name("black")
self.color_scheme = None
#self.show_color_key = False # NOT yet implemented
self.debug = False
self.logo_margin = 2
self.stroke_width = 0.5
self.tic_length = 5
self.stack_width = std_sizes["large"]
self.stack_aspect_ratio = 5
self.stack_margin = 0.5
self.pad_right = False
self.small_fontsize = 6
self.fontsize = 10
self.title_fontsize = 12
self.number_fontsize = 8
self.text_font = "ArialMT"
self.logo_font = "Arial-BoldMT"
self.title_font = "ArialMT"
self.first_index = 1
self.logo_start = None
self.logo_end=None
self.scale_width = True
self.reverse_stacks = True # If true, draw stacks with largest letters on top.
from weblogoMod.corebio.utils import update
update(self, **kwargs)
def __repr__(self) :
from weblogoMod.corebio.util import stdrepr
return stdrepr( self)
def __repr__(self) :
attributes = vars(self).keys()
attributes.sort()
return stdrepr(self, attributes )
# End class LogoOptions
class LogoFormat(LogoOptions) :
""" Specifies the format of the logo. Requires LogoData and LogoOptions
objects.
>>> data = LogoData.from_seqs(seqs )
>>> options = LogoOptions()
>>> options.title = "A Logo Title"
>>> format = LogoFormat(data, options)
Raises an ArgumentError if arguments are invalid.
"""
def __init__(self, data, options= None) :
""" Create a new LogoFormat instance.
"""
LogoOptions.__init__(self)
if options is not None :
self.__dict__.update(options.__dict__)
self.alphabet = data.alphabet
self.seqlen = data.length
# Derived parameters.
self.show_title = False
self.show_xaxis_label = False
self.yaxis_minor_tic_interval = None
self.lines_per_logo = None
self.char_width = None # Maximum character width. Stack width minus margins.
self.line_margin_left = None
self.line_margin_right = None
self.line_margin_bottom = None
self.line_margin_top = None
self.title_height = None
self.xaxis_label_height = None
self.line_height = None
self.line_width = None
self.logo_height = None
self.logo_width = None
self.creation_date = None
self.end_type = None
self.stack_height = self.stack_width * self.stack_aspect_ratio
# Attribute to test, test, error message
arg_conditions = (
("stacks_per_line", lambda x: x>0 , "Stacks per line must be positive."),
("stack_width", lambda x: x>0.0, "Stack width must be greater than zero."),
("stack_aspect_ratio" , lambda x: x>0, "Stack aspect ratio must be greater than zero."),
("fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("small_fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("title_fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("errorbar_fraction" , lambda x: x>=0.0 and x<=1.0,
"The visible fraction of the error bar must be between zero and one."),
("yaxis_tic_interval" , lambda x: x>=0.0 , "The yaxis tic interval cannot be negative."),
("yaxis_minor_tic_interval" , lambda x: not (x and x<0.0) , "Distances cannot be negative."),
("xaxis_tic_interval" , lambda x: x>0.0 , "Tic interval must be greater than zero."),
("number_interval" , lambda x: x>0.0 , "Invalid interval between numbers."),
("shrink_fraction" , lambda x: x>=0.0 and x<=1.0 , "Invalid shrink fraction."),
("stack_margin" , lambda x: x>0.0 , "Invalid stack margin."),
("logo_margin" , lambda x: x>0.0 , "Invalid logo margin."),
("stroke_width", lambda x: x>0.0 , "Invalid stroke width."),
("tic_length" , lambda x: x>0.0 , "Invalid tic length."),
)
# Run arguments tests. The second, attribute argument to the ArgumentError is
# used by the UI to provide user feedback.
# FIXME: More validation
for test in arg_conditions :
if not test[1]( getattr(self,test[0]) ) : raise ArgumentError(test[2], test[0])
# Inclusive upper and lower bounds
# FIXME: Validate here. Move from eps_formatter
if self.logo_start is None: self.logo_start = self.first_index
if self.logo_end is None :
self.logo_end = self.seqlen + self.first_index -1
self.total_stacks = self.logo_end - self.logo_start +1
if self.logo_start - self.first_index <0 :
raise ArgumentError(
"Logo range extends before start of available sequence.",
'logo_range')
if self.logo_end - self.first_index >= self.seqlen :
raise ArgumentError(
"Logo range extends beyond end of available sequence.",
'logo_range')
if self.logo_title : self.show_title = True
if not self.fineprint : self.show_fineprint = False
if self.xaxis_label : self.show_xaxis_label = True
if self.yaxis_label is None :
self.yaxis_label = self.unit_name
if self.yaxis_label :
self.show_yaxis_label = True
else :
self.show_yaxis_label = False
self.show_ends = False
if not self.yaxis_scale :
conversion_factor = std_units[self.unit_name]
if conversion_factor :
self.yaxis_scale=log(len(self.alphabet))*conversion_factor
else :
self.yaxis_scale=1.0 # probability units
if self.yaxis_scale<=0.0 :
raise ArgumentError("Invalid yaxis scale", 'yaxis_scale',)
if self.yaxis_tic_interval >= self.yaxis_scale:
self.yaxis_tic_interval /= 2.
self.yaxis_minor_tic_interval \
= float(self.yaxis_tic_interval)/self.yaxis_minor_tic_ratio
if self.color_scheme is None :
if self.alphabet in default_color_schemes :
self.color_scheme = default_color_schemes[self.alphabet]
else :
self.color_scheme = monochrome
self.lines_per_logo = 1+ ( (self.total_stacks-1) / self.stacks_per_line)
if self.lines_per_logo==1 and not self.pad_right:
self.stacks_per_line = min(self.stacks_per_line, self.total_stacks)
self.char_width = self.stack_width - 2* self.stack_margin
if self.show_yaxis :
self.line_margin_left = self.fontsize * 3.0
else :
self.line_margin_left = 0
if self.show_ends :
self.line_margin_right = self.fontsize *1.5
else :
self.line_margin_right = self.fontsize
if self.show_xaxis :
if self.rotate_numbers :
self.line_margin_bottom = self.number_fontsize *2.5
else:
self.line_margin_bottom = self.number_fontsize *1.5
else :
self.line_margin_bottom = 4
self.line_margin_top = 4
if self.show_title :
self.title_height = self.title_fontsize
else :
self.title_height = 0
self.xaxis_label_height =0.
if self.show_xaxis_label :
self.xaxis_label_height += self.fontsize
if self.show_fineprint :
self.xaxis_label_height += self.small_fontsize
self.line_height = (self.stack_height + self.line_margin_top +
self.line_margin_bottom )
self.line_width = (self.stack_width*self.stacks_per_line +
self.line_margin_left + self.line_margin_right )
self.logo_height = int(2*self.logo_margin + self.title_height \
+ self.xaxis_label_height + self.line_height*self.lines_per_logo)
self.logo_width = int(2*self.logo_margin + self.line_width )
self.creation_date = datetime.now().isoformat(' ')
end_type = '-'
end_types = {
unambiguous_protein_alphabet: 'p',
unambiguous_rna_alphabet: '-',
unambiguous_dna_alphabet: 'd'
}
if self.show_ends and self.alphabet in end_types:
end_type = end_types[self.alphabet]
self.end_type = end_type
if self.annotate is None :
self.annotate = []
for i in range(self.seqlen):
index = i + self.first_index
if index % self.number_interval == 0 :
self.annotate.append( "%d"%index)
else :
self.annotate.append("")
if len(self.annotate)!=self.seqlen :
raise ArgumentError(
"Annotations must be same length as sequences.",
'annotate')
# End __init__
# End class LogoFormat
# ------ Logo Formaters ------
# Each formatter is a function f(LogoData, LogoFormat, output file).
# that draws a representation of the logo into the given file.
# The main graphical formatter is eps_formatter. A mapping 'formatters'
# containing all available formatters is located after the formatter
# definitions.
def pdf_formatter(data, format, fout) :
""" Generate a logo in PDF format."""
feps = StringIO()
eps_formatter(data, format, feps)
feps.seek(0)
gs = GhostscriptAPI()
gs.convert('pdf', feps, fout, format.logo_width, format.logo_height)
def _bitmap_formatter(data, format, fout, device) :
feps = StringIO()
eps_formatter(data, format, feps)
feps.seek(0)
gs = GhostscriptAPI()
gs.convert(device, feps, fout,
format.logo_width, format.logo_height, format.resolution)
def jpeg_formatter(data, format, fout) :
""" Generate a logo in JPEG format."""
_bitmap_formatter(data, format, fout, device="jpeg")
def svg_formatter(data, format, fout) :
""" Generate a logo in Scalable Vector Graphics (SVG) format.
Requires the program 'pdf2svg' be installed.
"""
fpdf = StringIO()
pdf_formatter(data, format, fpdf)
fpdf.seek(0)
try:
command = find_command('pdf2svg')
except EnvironmentError:
raise EnvironmentError("Scalable Vector Graphics (SVG) format requires the program 'pdf2svg'. "
"Cannot find 'pdf2svg' on search path.")
import tempfile, os
fpdfi, fname_pdf = tempfile.mkstemp(suffix=".pdf")
fsvgi, fname_svg = tempfile.mkstemp(suffix=".svg")
try:
fpdf2 = open(fname_pdf, 'w')
fpdf2.write(fpdf.getvalue() )
fpdf2.seek(0)
args = [command, fname_pdf, fname_svg]
p = Popen(args)
(out,err) = p.communicate()
fsvg = open(fname_svg)
fout.write(fsvg.read())
finally:
os.remove(fname_svg)
os.remove(fname_pdf)
def png_formatter(data, format, fout) :
""" Generate a logo in PNG format."""
_bitmap_formatter(data, format, fout, device="png")
def png_print_formatter(data, format, fout) :
""" Generate a logo in PNG format with print quality (600 DPI) resolution."""
format.resolution = 600
_bitmap_formatter(data, format, fout, device="png")
def txt_formatter( logodata, format, fout) :
""" Create a text representation of the logo data.
"""
print >>fout, str(logodata)
def eps_formatter( logodata, format, fout) :
""" Generate a logo in Encapsulated Postscript (EPS)"""
substitutions = {}
from_format =[
"creation_date", "logo_width", "logo_height",
"lines_per_logo", "line_width", "line_height",
"line_margin_right","line_margin_left", "line_margin_bottom",
"line_margin_top", "title_height", "xaxis_label_height",
"creator_text", "logo_title", "logo_margin",
"stroke_width", "tic_length",
"stacks_per_line", "stack_margin",
"yaxis_label", "yaxis_tic_interval", "yaxis_minor_tic_interval",
"xaxis_label", "xaxis_tic_interval", "number_interval",
"fineprint", "shrink_fraction", "errorbar_fraction",
"errorbar_width_fraction",
"errorbar_gray", "small_fontsize", "fontsize",
"title_fontsize", "number_fontsize", "text_font",
"logo_font", "title_font",
"logo_label", "yaxis_scale", "end_type",
"debug", "show_title", "show_xaxis",
"show_xaxis_label", "show_yaxis", "show_yaxis_label",
"show_boxes", "show_errorbars", "show_fineprint",
"rotate_numbers", "show_ends", "stack_height",
"stack_width"
]
for s in from_format :
substitutions[s] = getattr(format,s)
substitutions["shrink"] = str(format.show_boxes).lower()
# --------- COLORS --------------
def format_color(color):
return " ".join( ("[",str(color.red) , str(color.green),
str(color.blue), "]"))
substitutions["default_color"] = format_color(format.default_color)
colors = []
for group in format.color_scheme.groups :
cf = format_color(group.color)
for s in group.symbols :
colors.append( " ("+s+") " + cf )
substitutions["color_dict"] = "\n".join(colors)
data = []
# Unit conversion. 'None' for probability units
conv_factor = std_units[format.unit_name]
data.append("StartLine")
seq_from = format.logo_start- format.first_index
seq_to = format.logo_end - format.first_index +1
# seq_index : zero based index into sequence data
# logo_index : User visible coordinate, first_index based
# stack_index : zero based index of visible stacks
for seq_index in range(seq_from, seq_to) :
logo_index = seq_index + format.first_index
stack_index = seq_index - seq_from
if stack_index!=0 and (stack_index % format.stacks_per_line) ==0 :
data.append("")
data.append("EndLine")
data.append("StartLine")
data.append("")
data.append("0 0 0 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
# if format.annotate[seq_index][-1] == "*":
# data.append("0 0 1 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
# else:
# data.append("0 0 0 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
if conv_factor:
stack_height = logodata.entropy[seq_index] * std_units[format.unit_name]
else :
stack_height = 1.0 # Probability
s = zip(logodata.counts[seq_index], logodata.alphabet)
def mycmp( c1, c2 ) :
# Sort by frequency. If equal frequency then reverse alphabetic
if c1[0] == c2[0] : return cmp(c2[1], c1[1])
return cmp(c1[0], c2[0])
s.sort(mycmp)
if not format.reverse_stacks: s.reverse()
C = float(sum(logodata.counts[seq_index]))
if C > 0.0 :
fraction_width = 1.0
if format.scale_width :
fraction_width = logodata.weight[seq_index]
# print >>sys.stderr, fraction_width
for c in s:
data.append(" %f %f (%s) ShowSymbol" % (fraction_width, c[0]*stack_height/C, c[1]) )
# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.
if logodata.entropy_interval is not None and conv_factor and C>0.0:
low, high = logodata.entropy_interval[seq_index]
center = logodata.entropy[seq_index]
low *= conv_factor
high *= conv_factor
center *=conv_factor
if high> format.yaxis_scale : high = format.yaxis_scale
down = (center - low)
up = (high - center)
data.append(" %f %f DrawErrorbar" % (down, up) )
data.append("EndStack")
data.append("")
data.append("EndLine")
substitutions["logo_data"] = "\n".join(data)
# Create and output logo
template = resource_string( __name__, 'template.eps', __file__)
logo = Template(template).substitute(substitutions)
print >>fout, logo
# map between output format names and logo
formatters = {
'eps': eps_formatter,
'pdf': pdf_formatter,
'png': png_formatter,
'png_print' : png_print_formatter,
'jpeg' : jpeg_formatter,
'svg' : svg_formatter,
'logodata' : txt_formatter,
}
default_formatter = eps_formatter
def parse_prior(composition, alphabet, weight=None) :
""" Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
- None or 'none' : No composition sepecified
- 'auto' or 'automatic': Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
- 'equiprobable' : All monomers have the same probability.
- a percentage, e.g. '45%' or a fraction '0.45':
The fraction of CG bases for nucleotide alphabets
- a species name, e.g. 'E. coli', 'H. sapiens' :
Use the average CG percentage for the specie's
genome.
- An explicit distribution, e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None: return None
comp = composition.strip()
if comp.lower() == 'none': return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight<0 : raise ValueError("Weight cannot be negative.")
if comp.lower() == 'equiprobable' :
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == 'auto' or comp.lower() == 'automatic':
if alphabet == unambiguous_protein_alphabet :
prior = weight * asarray(aa_composition, float64)
else :
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG :
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == '%' :
prior = weight * base_distribution( float(comp[:-1]))
elif isfloat(comp) :
prior = weight * base_distribution( float(comp)*100. )
elif composition[0] == '{' and composition[-1] == '}' :
explicit = composition[1: -1]
explicit = explicit.replace(',',' ').replace("'", ' ').replace('"',' ').replace(':', ' ').split()
if len(explicit) != len(alphabet)*2 :
#print explicit
raise ValueError("Explicit prior does not match length of alphabet")
prior = - ones(len(alphabet), float64)
try :
for r in range(len(explicit)/2) :
letter = explicit[r*2]
index = alphabet.ord(letter)
value = float(explicit[r*2 +1])
prior[index] = value
except ValueError :
raise ValueError("Cannot parse explicit composition")
if any(prior==-1.) :
raise ValueError("Explicit prior does not match alphabet")
prior/= sum(prior)
prior *= weight
else :
raise ValueError("Unknown or malformed composition: %s"%composition)
if len(prior) != len(alphabet) :
raise ValueError(
"The sequence alphabet and composition are incompatible.")
return prior
def base_distribution(percentCG) :
A = (1. - (percentCG/100.))/2.
C = (percentCG/100.)/2.
G = (percentCG/100.)/2.
T = (1. - (percentCG/100))/2.
return asarray((A,C,G,T), float64)
def equiprobable_distribution( length) :
return ones( (length), float64) /length
def read_seq_data(lines,
input_parser=seq_io.read,
alphabet=None,
ignore_lower_case=False,
max_file_size=0):
""" Read sequence data from the input stream and return a seqs object.
The environment variable WEBLOGO_MAX_FILE_SIZE overides the max_file_size argument.
Used to limit the load on the WebLogo webserver.
"""
seqs = input_parser(lines)
if seqs is None or len(seqs) ==0 :
raise ValueError("Please provide a multiple sequence alignment")
if ignore_lower_case :
# Case is significant. Do not count lower case letters.
for i,s in enumerate(seqs) :
seqs[i] = s.mask()
# Add alphabet to seqs.
if alphabet :
seqs.alphabet = alphabet
else :
seqs.alphabet = Alphabet.which(seqs)
return seqs
class LogoData(object) :
"""The data needed to generate a sequence logo.
- alphabet
- length
- counts -- An array of character counts
- entropy -- The relative entropy of each column
- entropy_interval -- entropy confidence interval
"""
def __init__(self, length=None, alphabet = None, counts =None,
entropy =None, entropy_interval = None, weight=None) :
"""Creates a new LogoData object"""
self.length = length
self.alphabet = alphabet
self.counts = counts
self.entropy = entropy
self.entropy_interval = entropy_interval
self.weight = weight
@classmethod
def from_counts(cls, alphabet, counts, prior= None):
"""Build a LogoData object from counts."""
# Counts is a Motif object?
#counts = counts.array
seq_length, A = counts.shape
if prior is not None: prior = array(prior, float64)
if prior is None or sum(prior)==0.0:
R = log(A)
ent = zeros( seq_length, float64)
entropy_interval = None
for i in range (0, seq_length) :
C = sum(counts[i])
#FIXME: fixup corebio.moremath.entropy()?
if C == 0 :
ent[i] = 0.0
else :
ent[i] = R - entropy(counts[i])
else :
ent = zeros( seq_length, float64)
entropy_interval = zeros( (seq_length,2) , float64)
R = log(A)
for i in range (0, seq_length) :
alpha = array(counts[i] , float64)
alpha += prior
posterior = Dirichlet(alpha)
ent[i] = posterior.mean_relative_entropy(prior/sum(prior))
entropy_interval[i][0], entropy_interval[i][1] = \
posterior.interval_relative_entropy(prior/sum(prior), 0.95)
weight = array( na.sum(counts,axis=1) , float)
weight /= max(weight)
return cls(seq_length, alphabet, counts, ent, entropy_interval, weight)
@classmethod
def from_seqs(cls, seqs, prior= None):
"""Build a LogoData object from a SeqList, a list of sequences."""
# --- VALIDATE DATA ---
# check that at least one sequence of length at least 1 long
if len(seqs)==0 or len(seqs[0]) ==0:
raise ValueError("No sequence data found.")
# Check sequence lengths
seq_length = len(seqs[0])
for i,s in enumerate(seqs) :
#print i,s, len(s)
#TODO: Redundant? Should be checked in SeqList?
if seq_length != len(s) :
raise ArgumentError(
"Sequence number %d differs in length from the previous sequences" % (i+1) ,
'sequences')
# FIXME: Check seqs.alphabet?
counts = seqs.profile()
return cls.from_counts(seqs.alphabet, counts, prior)
def __str__(self) :
out = StringIO()
print >>out, '## LogoData'
print >>out, '# First column is position number, counting from zero'
print >>out, '# Subsequent columns are raw symbol counts'
print >>out, '# Entropy is mean entropy measured in nats.'
print >>out, '# Low and High are the 95% confidence limits.'
print >>out, '# Weight is the fraction of non-gap symbols in the column.'
print >>out, '#\t'
print >>out, '#\t',
for a in self.alphabet :
print >>out, a, '\t',
print >>out, 'Entropy\tLow\tHigh\tWeight'
for i in range(self.length) :
print >>out, i+1, '\t',
for c in self.counts[i] : print >>out, c, '\t',
print >>out, "%6.4f" % self.entropy[i], '\t',
if self.entropy_interval is not None:
print >>out, "%6.4f" % self.entropy_interval[i][0], '\t',
print >>out, "%6.4f" % self.entropy_interval[i][1], '\t',
else :
print >>out, '\t','\t',
if self.weight is not None :
print >>out, "%6.4f" % self.weight[i],
print >>out, ''
print >>out, '# End LogoData'
return out.getvalue()
|
gpl-3.0
| 6,209,968,925,322,337,000
| 34.265358
| 137
| 0.570637
| false
| 3.89768
| false
| false
| false
|
mredar/ucldc_oai_harvest
|
oai_harvester/oai_harvester.py
|
1
|
6784
|
#! /usr/bin/env python
'''UCLDC OAI Harvester: Collects records from OAI interfaces and inputs to
basic solr schema. Driven off the collection registry'''
'''
imagining right now that this will be woken by a crontab. It then looks at the
incoming queue and processes any "READY" msgs (maybe only ready ones there)
should you have a number of queues : ready, processing, errors?
Actually, SQS uses a visibility timeout to make msgs "invisible" while being processed. Client process can up the timeout if necessary. May need similar behavior here.
while a msg in queue:
get msg and set timeout?
harvest from msg
delete message from queue
'''
import sys
import csv
import os
import codecs
import datetime
import time
import logging
logging.basicConfig(level=logging.INFO)
import json
import traceback
import hashlib
from sickle import Sickle
from sickle.models import Record
import solr
from lxml import etree
import boto.sqs as sqs
import dateutil.parser
QUEUE_OAI_HARVEST = os.environ.get('QUEUE_OAI_HARVEST', 'OAI_harvest')
QUEUE_OAI_HARVEST_ERR = os.environ.get('QUEUE_OAI_HARVEST_ERR', 'OAI_harvest_error')
QUEUE_OAI_HARVEST_HARVESTING = os.environ.get('QUEUE_OAI_HARVEST_HARVESTING', 'OAI_harvest_harvesting')
#INTIAL dev machine (nutch-dev) URL_SOLR = os.environ.get('URL_SOLR', 'http://54.243.192.165:8080/solr/dc-collection/')
URL_SOLR = os.environ.get('URL_SOLR', 'http://107.21.228.130:8080/solr/dc-collection/')
SQS_CONNECTION = sqs.connect_to_region('us-east-1')
def harvest_to_solr_oai_set(oai_set):
'''Harvest the oai set and return a list of records?
The oai_set is the message dict from SQS'''
client=Sickle(oai_set['url'])
records = client.ListRecords(set=oai_set['set_spec'], metadataPrefix='oai_dc')
n = 0
dt_start = datetime.datetime.now()
for rec in records:
n += 1
dt_iter = datetime.datetime.now()
elapsed_time = (dt_iter -dt_start).seconds
if (n % 100) == 0:
logging.info("Set has taken :" + str(elapsed_time) + " seconds.")
logging.info("OAI REC NUM: " + str(n) + " SET:" + str(oai_set))
solr_index_record(rec, extra_metadata=oai_set)
def datetime_to_solr_date(dt):
'''Return the UTC solr style date string for the given datetime object
'''
#need to get zulu time for obj?
return dt.isoformat() + 'Z'
# doesn't work for dates < 1900 return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_md5_id_from_oai_identifiers(ids):
'''From a list of oai identifier fields, pick a URL and convert to md5
to use as solr id
'''
for i in ids:
if i[:5] == 'http:':
md5= hashlib.md5()
md5.update(i)
return md5.hexdigest()
raise Exception("NO URL found in identifiers")
def solr_index_record(sickle_rec, extra_metadata=None):
'''Index the sickle record object in solr'''
#TODO: make this global for efficiency?
s = solr.Solr(URL_SOLR)
sdoc = sickle_rec.metadata
#use URL identifier md5 hash as id
#should probably move to solr, to help with other inputs
sdoc['id'] = get_md5_id_from_oai_identifiers(sdoc['identifier'])
oai_dt = datetime_to_solr_date(dateutil.parser.parse(sickle_rec.header.datestamp))
#collisions here?
#sdoc['title_exact'] = sdoc['title'][0]
# how to make created write once, then read only - update processor in
# solr
sdoc['created'] = sdoc['last_modified'] = oai_dt
if 'campus' in extra_metadata:
sdoc['campus'] = []
for campus in extra_metadata['campus']:
if 'publisher' in sdoc:
sdoc['publisher'].append(campus['name'])
else:
sdoc['publisher'] = [campus['name'],]
sdoc['campus'].append(campus['name'])
if 'repository' in extra_metadata:
sdoc['repository'] = []
for repository in extra_metadata['repository']:
if 'publisher' in sdoc:
sdoc['publisher'].append(repository['name'])
else:
sdoc['publisher'] = [repository['name'],]
sdoc['repository'].append(repository['name'])
sdoc['collection_name'] = extra_metadata['collection_name']
#convert various dc dates into solr date fields
#need date_facet, date_facet_start, date_facet_end?
#for each dc date value parse into one or more values.
#if exists, update, so later values not deleted
if s.select('id:'+sdoc['id']):
logging.info( 'Updating:'+sdoc['id'])
s.update(sdoc, commit=True)
else:
logging.info( 'Adding:'+sdoc['id'])
s.add(sdoc, commit=True)
def delete_msg_by_content_from_queue(q, msg):
'''Can't just hold an added message object, must retrieve from
queue and then delete. Just delete the first matching body
'''
m = q.read()
while m:
if m.get_body() == msg.get_body():
m.delete()
return
m = q.read()
def process_oai_queue():
'''Run on any messages in the OAI_harvest queue'''
q_oai = SQS_CONNECTION.get_queue(QUEUE_OAI_HARVEST)
q_harvesting = SQS_CONNECTION.get_queue(QUEUE_OAI_HARVEST_HARVESTING)
n = 0
m = q_oai.read()
while m:
m_harvesting = q_harvesting.write(m)
q_oai.delete_message(m) #delete, will pass result to another queue
n += 1
dt_start = datetime.datetime.now()
logging.info("\n" + str(dt_start) + " START MESSAGE " + str(n) + "\n\n")
msg_dict = json.loads(m.get_body())
#msg_dict is {url:XX, set_spec:YY, campus:[{resource_uri:ZZ, slug:TT, name: QQ},]}
logging.info(msg_dict)
try:
harvest_to_solr_oai_set(msg_dict)
dt_end = datetime.datetime.now()
logging.info("\n\n\n============== " + str((dt_end-dt_start).seconds) + " seconds Done with Message:" + str(n) + " : " + m.get_body() + "\n\n\n\n")
except Exception, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
# add message to error q
q_err = SQS_CONNECTION.get_queue(QUEUE_OAI_HARVEST_ERR)
msg_dict['exceptinfo'] = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
logging.error(str(msg_dict))
msg = json.dumps(msg_dict)
q_msg = sqs.message.Message()
q_msg.set_body(msg)
status = q_err.write(q_msg)
time.sleep(10) #make sure harvesting message back on queue
# this doesn't work, need to "read" the message from queue to
# get a receipt handle that can be used to delete
delete_msg_by_content_from_queue(q_harvesting, m_harvesting)
m = q_oai.read()
def main(args):
process_oai_queue()
if __name__=='__main__':
#TODO: test here?
main(sys.argv)
|
bsd-3-clause
| -3,734,049,030,187,922,000
| 38.672515
| 167
| 0.630159
| false
| 3.343519
| false
| false
| false
|
egitto/parchment-and-copper
|
scratch/cryptopals/ctr.py
|
1
|
1530
|
from cbc import CBC_encrypt
from ecb import ECB_encrypt
from bytestring_tools import xor, data
from math import ceil
def counter_function(n):
return int.to_bytes(n,9,'big')+b'\x00'*7
def CTR_keystream(key,counter_function,length,start):
# start = nonce = first byte we haven't generated keystream for yet
# I have a feeling this isn't how I should use nonces, but
# this matches the problem specification. Should have more flexible way, though
block_n = start//16
n = start%16
accum = ECB_encrypt(counter_function(block_n),key)[n:]
for _ in range(ceil((length)/16)):
block_n += 1
accum += ECB_encrypt(counter_function(block_n),key)
return accum[:length]
def CTR_encrypt(_bytes,key,counter_function=counter_function,nonce=0):
return xor(_bytes,CTR_keystream(key,counter_function,len(_bytes),nonce))
class CTR_cypher():
def __init__(self,key,counter_function=counter_function,nonce=0):
self.key = key
self.counter_function = counter_function
self.zero = nonce
self.n = nonce
def set_offset(self,offset):
self.n = self.zero + offset
def encrypt(self,_bytes):
x = CTR_encrypt(_bytes,self.key,self.counter_function,self.n)
self.n += len(_bytes)
return x
# a = CTR_cypher(b'yellow submarine',counter_function,0)
# b = a.encrypt(b'some stuff')
# a.set_offset(0)
# print(a.encrypt(b))
# cyphertext = data('L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvoOLSFQ==','b64').bytes
# print(CTR_encrypt(cyphertext,"YELLOW SUBMARINE",counter_function,0))
|
gpl-3.0
| 8,308,754,510,609,592,000
| 33
| 107
| 0.713725
| false
| 2.931034
| false
| false
| false
|
siconos/siconos-deb
|
examples/Control/Zhuravlev/ZhuravlevIvanovMCP_C.py
|
1
|
4846
|
import siconos.numerics as SN
import numpy as np
import matplotlib.pyplot as plt
try:
from cffi import FFI
except:
import sys
print('no cffi module installed, exiting')
sys.exit(0)
withPlot = False
if __name__ == '__main__':
xk = np.array((1., 10.))
T = 10.0
t = 0.0
h = 1e-3
z = np.zeros((4,))
w = np.empty((4,))
kappa = 0.9
g = 9.81
theta = 1.0
gamma = 1.0
mcp = SN.MixedComplementarityProblem2(0, 4)
ffi = FFI()
ffi.cdef('void set_cstruct(uintptr_t p_env, void* p_struct);')
ffi.cdef('''typedef struct
{
int id;
double* xk;
double h;
double theta;
double gamma;
double g;
double kappa;
unsigned int f_eval;
unsigned int nabla_eval;
} data;
''')
data_struct = ffi.new('data*')
data_struct.id = -1 # to avoid freeing the data in the destructor
data_struct.xk = ffi.cast('double *', xk.ctypes.data)
data_struct.h = h
data_struct.theta = theta
data_struct.gamma = gamma
data_struct.g = g
data_struct.kappa = kappa
D = ffi.dlopen(SN._numerics.__file__)
D.set_cstruct(mcp.get_env_as_long(), ffi.cast('void*', data_struct))
mcp.set_compute_F_and_nabla_F_as_C_functions('ZhuravlevIvanov.so', 'compute_Fmcp', 'compute_nabla_Fmcp')
SO=SN.SolverOptions(mcp, SN.SICONOS_MCP_NEWTON_FBLSA)
SO.dparam[0] = 1.0e-24
SO.iparam[0] = 150
SO.iparam[3] = 2
SO.iparam[4] = 10
N = int(T/h + 10)
print(N)
lambdaPM = np.empty((N, 4))
signs = np.empty((N, 2))
sol = np.empty((N, 2))
sol[0, :] = xk
k = 0
while t <= T:
k += 1
info = SN.mcp_newton_minFBLSA(mcp, z, w, SO)
#info = SN.mcp_newton_FBLSA(mcp, z, w, SO)
#print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
if info > 0:
#zi_syst.compute_Fmcp(0, 4, z, w)
sol[k, 0] = w[0] - z[1]
sol[k, 1] = w[2] - z[3]
if sol[k, 0] < -1e-7 and np.abs(z[1]) < 1e-10:
z[1] = -sol[k, 0]
z[0] = 1.0
if xk[1] < -1e-7 and np.abs(z[3]) < 1e-10:
z[3] = -sol[k, 1]
z[2] = 1.0
if z[1] < -1e-7:
z[1] = 0.0
z[0] = 0.0
if z[3] < -1e-7:
z[3] = 0.0
z[2] = 0.0
if z[1] > 1e-7 and z[0] < 1.0 - 1e-7:
z[0] = 1.0
if z[3] > 1e-7 and z[2] < 1.0 - 1e-7:
z[2] = 1.0
info = SN.mcp_newton_minFBLSA(mcp, z, w, SO)
print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
if info >0:
print('MCP solver failed ! info = {:}'.format(info))
print(xk)
print(z)
print(w)
# else:
# print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
#zi_syst.compute_Fmcp(0 ,4, z, w)
sol[k, 0] = w[0] - z[1]
sol[k, 1] = w[2] - z[3]
xk[:] = sol[k, :]
signs[k, 0] = z[0] - w[1]
signs[k, 1] = z[2] - w[3]
t = k*h
#z[:] = 0.0
print('f_eval', data_struct.f_eval, 'nabla_eval', data_struct.nabla_eval)
# np.savetxt("dataZIsol.txt", sol)
# np.savetxt("dataZIlambdaPM.txt", lambdaPM)
# np.savetxt("dataZIsign.txt", signs)
if withPlot:
plt.figure()
plt.plot(sol[:, 0], sol[:, 1], 'b-*')
plt.xlabel('s')
plt.ylabel('v')
plt.figure()
plt.plot(sol[:, 0], label=r's')
plt.plot(sol[:, 1], label=r'v')
plt.legend(loc='best')
plt.figure()
plt.plot(signs[:, 0], label=r'$\lambda_1$')
plt.plot(signs[:, 1], label=r'$\lambda_2$')
plt.legend(loc='best')
plt.show()
pos = np.abs(sol[:, 0])
velocity = (1 - kappa*np.sign(sol[:, 0]*sol[:, 1]))*sol[:, 1]*np.sign(sol[:, 0])
plt.subplot(211)
plt.title('position')
plt.plot(pos)
plt.grid()
plt.subplot(212)
plt.title('velocity')
plt.plot(velocity)
plt.grid()
# plt.subplot(313)
# plt.title('control input')
# plt.plot(dataPlot[:,0], control)
# plt.grid()
plt.show()
# indx = np.nonzero(dataPlot[:, 0]>30)
# ttt = dataPlot[indx, 0].flatten()
#
# plt.subplot(311)
# plt.title('position')
# plt.plot(ttt, pos[indx])
# plt.grid()
# plt.subplot(312)
# plt.title('velocity')
# plt.plot(ttt, velocity[indx])
# plt.grid()
## plt.subplot(313)
## plt.title('control input')
## plt.plot(ttt, control[indx])
# plt.grid()
# plt.show()
|
apache-2.0
| 1,840,833,609,540,054,800
| 26.072626
| 108
| 0.470491
| false
| 2.751846
| false
| false
| false
|
tinyms/ArchiveX
|
tinyms/bpm/entity.py
|
1
|
2197
|
__author__ = 'tinyms'
#coding=UTF8
from sqlalchemy import Column, Integer, String, Text, LargeBinary, DateTime
from tinyms.core.orm import Simplify, Entity, many_to_one, many_to_many
#通知引擎处理节点
@many_to_one("BPMProcessInstance")
class BPMWorkflow(Entity, Simplify):
node_id = Column(Integer(), nullable=False)
#行为: 'execute','leave','execute-leave'
behavior = Column(String(20))
params = Column(Text())
#流程定义
class BPMProcessDef(Entity, Simplify):
name = Column(String(100), nullable=False, unique=True)
#Json格式或者类全名
define = Column(Text(), nullable=False)
#是否发布为可用流程,1是,0否
release = Column(Integer(), default=0)
#只有允许的人才可以使用此流程
security_point = Column(String(60))
#流程实例
@many_to_one("BPMProcessDef")
@many_to_one("Archives")
class BPMProcessInstance(Entity, Simplify):
#序列化
bin = Column(LargeBinary(), nullable=False)
#实例是否完成,1完成,0未完成
finish = Column(Integer(), default=0)
start_time = Column(DateTime(), nullable=False)
end_time = Column(DateTime())
#流程实例值
@many_to_one("BPMProcessInstance")
class BPMProcessVars(Entity, Simplify):
name = Column(String(255), nullable=False)
val = Column(Text())
@many_to_one("BPMProcessInstance")
class BPMProcessInstanceNotify(Entity, Simplify):
node_id = Column(Integer(), nullable=False)
tip_content = Column(Text(), nullable=False)
#wait,finish
result = Column(String(20))
@many_to_one("BPMProcessInstance")
class BPMWorklist(Entity, Simplify):
task_name = Column(String(255), nullable=False)
forms = Column(Text(), nullable=False)
#多少小时内过期,则流程自动结束
valid_time_space = Column(Integer(), default=0)
expired = Column(Integer(), default=0)
create_time = Column(DateTime(), nullable=False)
finish_time = Column(DateTime())
#完成者 from Archives
worker = Column(Integer())
#@many_to_one("BPMWorklist")
#@many_to_one("Archives")
#class BPMWorklistAuth(Entity, Simplify):
# #是否允许编辑
# editable = Column(Integer(), nullable=False)
|
bsd-3-clause
| 3,168,598,971,852,385,300
| 28.028986
| 75
| 0.695457
| false
| 2.793584
| false
| false
| false
|
simonsdave/yar
|
yar/key_service/async_creds_retriever.py
|
1
|
2387
|
"""This module contains functionality to async'ly retrieve
credentials from the key store."""
import httplib
import logging
from ks_util import filter_out_non_model_creds_properties
from ks_util import AsyncAction
_logger = logging.getLogger("KEYSERVICE.%s" % __name__)
class AsyncCredsRetriever(AsyncAction):
def fetch(self,
callback,
key=None,
principal=None,
is_filter_out_non_model_properties=False):
self._key = key
self._principal = principal
self._callback = callback
self._is_filter_out_non_model_properties = \
is_filter_out_non_model_properties
if key:
fmt = '_design/by_identifier/_view/by_identifier?key="%s"'
path = fmt % key
else:
fmt = '_design/by_principal/_view/by_principal?key="%s"'
path = fmt % principal
self.async_req_to_key_store(
path,
"GET",
None,
self._on_async_req_to_key_store_done)
def _on_async_req_to_key_store_done(self, is_ok, code=None, body=None):
"""Called when async_req_to_key_store() is done."""
if not is_ok or httplib.OK != code or body is None:
self._callback(None, None)
return
creds = []
for row in body.get("rows", []):
doc = row.get("value", {})
if self._is_filter_out_non_model_properties:
doc = filter_out_non_model_creds_properties(doc)
creds.append(doc)
if self._key:
# asked to retrive a single set of creds so
# expecting 1 or 0 values in "creds"
num_creds = len(creds)
if 0 == num_creds:
self._callback(None, False)
else:
if 1 == num_creds:
self._callback(creds[0], False)
else:
# this is an error case with either the view or the
# data in the key store - we should never here.
fmt = (
"Got %d docs from Key Store for key '%s'. "
"Expected 1 or 0 docs."
)
_logger.error(fmt, num_creds, self._key)
self._callback(None, None)
else:
self._callback(creds, True)
|
mit
| 6,111,016,473,754,680,000
| 30.826667
| 75
| 0.516967
| false
| 4.158537
| false
| false
| false
|
j-dasilva/COMP4350
|
apartment/rest/serializers.py
|
1
|
1201
|
from rest_framework import serializers
class MessageSerializer(serializers.Serializer):
sender = serializers.CharField(max_length=32)
recipient = serializers.CharField(max_length=32)
urgency = serializers.IntegerField()
content = serializers.CharField(max_length=256)
timestamp = serializers.IntegerField()
read = serializers.BooleanField()
class BulletinSerializer(serializers.Serializer):
sender = serializers.CharField(max_length=32)
timestamp = serializers.IntegerField()
subject = serializers.CharField(max_length=256)
content = serializers.CharField(max_length=256)
class CommentSerializer(serializers.Serializer):
bulletin_reference = serializers.CharField(max_length=128)
timestamp = serializers.IntegerField()
sender = serializers.CharField(max_length=32)
content = serializers.CharField(max_length=256)
class EventSerializer(serializers.Serializer):
timestamp = serializers.IntegerField()
sender = serializers.CharField(max_length=32)
content = serializers.CharField(max_length=256)
starttime = serializers.IntegerField()
endtime = serializers.IntegerField()
title = serializers.CharField(max_length=256)
|
gpl-2.0
| 6,784,690,892,494,369,000
| 36.5625
| 62
| 0.766861
| false
| 4.464684
| false
| false
| false
|
bowen0701/algorithms_data_structures
|
lc0419_battleships_in_a_board.py
|
1
|
3508
|
"""Leetcode 419. Battleships in a Board
Medium
Given an 2D board, count how many battleships are in it.
The battleships are represented with 'X's, empty slots are represented with '.'s.
You may assume the following rules:
- You receive a valid board, made of only battleships or empty slots.
- Battleships can only be placed horizontally or vertically. In other words,
they can only be made of the shape 1xN (1 row, N columns) or
Nx1 (N rows, 1 column), where N can be of any size.
- At least one horizontal or vertical cell separates between two battleships -
there are no adjacent battleships.
Example:
X..X
...X
...X
In the above board there are 2 battleships.
Invalid Example:
...X
XXXX
...X
This is an invalid board that you will not receive -
as battleships will always have a cell separating between them.
Follow up:
Could you do it in one-pass, using only O(1) extra memory and
without modifying the value of the board?
"""
class SolutionDFSRecur(object):
def _dfsRecur(self, board, r, c):
# Check exit condition: out of boundary or empty.
if (r < 0 or r >= len(board) or c < 0 or c >= len(board[0]) or
board[r][c] == '.'):
return None
# Update board as visited.
board[r][c] = '.'
# Recursively DFS 4 dirs: up, down, left, and right.
dirs = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]
for r_next, c_next in dirs:
self._dfsRecur(board, r_next, c_next)
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
Time complexity: O(m*n), where
- m: number of rows.
- n: number of columns.
Space complexity: O(m*n).
"""
if not board or not board[0]:
return 0
count = 0
# For each slot, start DFS if satisfies entry condition.
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] == 'X':
count += 1
self._dfsRecur(board, r, c)
return count
class SolutionCheckFirst(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
Time complexity: O(m*n).
Space complexity: O(1).
"""
if not board or not board[0]:
return 0
count = 0
# Start from top-left to check the 1st only.
for r in range(len(board)):
for c in range(len(board[0])):
# Skip if empty.
if board[r][c] == '.':
continue
# Skip if its up is 'X'.
if r > 0 and board[r - 1][c] == 'X':
continue
# Skip if its left is 'X'.
if c > 0 and board[r][c - 1] == 'X':
continue
count += 1
return count
def main():
import time
print 'By DFS recur:'
start_time = time.time()
board = [['X','.','.','X'],
['.','.','.','X'],
['.','.','.','X']]
print SolutionDFSRecur().countBattleships(board)
print 'Time:', time.time() - start_time
print 'By checking the 1st:'
start_time = time.time()
board = [['X','.','.','X'],
['.','.','.','X'],
['.','.','.','X']]
print SolutionCheckFirst().countBattleships(board)
print 'Time:', time.time() - start_time
if __name__ == '__main__':
main()
|
bsd-2-clause
| -7,352,669,705,755,854,000
| 26.622047
| 81
| 0.531357
| false
| 3.64657
| false
| false
| false
|
firulais/snap-RPi
|
RPiGPIO.py
|
1
|
3629
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Snap! extension to support Raspberry Pi -- server component.
Copyright (C) 2014 Paul C. Brown <p_brown@gmx.com>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import http.server
import os
import re
import socketserver
import urllib.request
import logging
if __debug__:
import RPi.GPIO as GPIO
else:
import MockupRPi.GPIO as GPIO
class CORSHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
regex = re.compile('.*pin=([0-9]*).*state=(LOW|HIGH)')
ospath = os.path.abspath('')
def send_head(self):
path = self.path
logging.info(path)
# path looks like this:
# /pinwrite?pin=1&state=LOW
# or
# /pinread?pin=1&state=LOW
self.pin = 0
self.state = False
GPIO.setmode(GPIO.BCM)
m = self.regex.match(path)
if 'pinwrite' in path: # write HIGH or LOW to pin
self.pin = int(m.group(1))
self.state = True
if m.group(2) == 'LOW':
self.state = False
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, self.state)
#The Snap! block reports the body of the Web server’s response
#(minus HTTP header), without interpretation.
#At a minimum, we must provide a header with a status line and a date.
self.send_response(200)
self.send_header('Date', self.date_time_string())
self.end_headers()
elif 'pinread' in path:
# Read state of pin.
self.pin = int(m.group(1))
self.state = True
if m.group(2) == 'LOW':
self.state = False
f = open(self.ospath + '/return', 'w+')
GPIO.setup(self.pin, GPIO.IN)
if GPIO.input(self.pin) == self.state:
f.write(str(True))
else:
f.write(str(False))
f.close()
f = open(self.ospath + '/return', 'rb')
ctype = self.guess_type(self.ospath + '/rpireturn')
#create minimal response
self.send_response(200)
self.send_header('Date', self.date_time_string())
self.send_header('Content-type', ctype)
fs = os.fstat(f.fileno())
self.send_header('Content-Length', str(fs[6]))
self.send_header('Last-Modified',
self.date_time_string(fs.st_mtime))
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
return f
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, handlers=[logging.FileHandler("access.log"), logging.StreamHandler()])
PORT = 8280 # R+P in ASCII Decimal
Handler = CORSHTTPRequestHandler
httpd = socketserver.TCPServer(('', PORT), Handler)
logging.info('serving at port ' + str(PORT))
print('Go ahead and launch Snap!')
httpd.serve_forever()
|
gpl-3.0
| -8,815,553,636,177,159,000
| 29.225
| 114
| 0.587538
| false
| 3.908405
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/network_interface_ip_configuration.py
|
1
|
5689
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class NetworkInterfaceIPConfiguration(SubResource):
"""IPConfiguration in a network interface.
:param id: Resource ID.
:type id: str
:param application_gateway_backend_address_pools: The reference of
ApplicationGatewayBackendAddressPool resource.
:type application_gateway_backend_address_pools:
list[~azure.mgmt.network.v2017_10_01.models.ApplicationGatewayBackendAddressPool]
:param load_balancer_backend_address_pools: The reference of
LoadBalancerBackendAddressPool resource.
:type load_balancer_backend_address_pools:
list[~azure.mgmt.network.v2017_10_01.models.BackendAddressPool]
:param load_balancer_inbound_nat_rules: A list of references of
LoadBalancerInboundNatRules.
:type load_balancer_inbound_nat_rules:
list[~azure.mgmt.network.v2017_10_01.models.InboundNatRule]
:param private_ip_address: Private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: Defines how a private IP address is
assigned. Possible values are: 'Static' and 'Dynamic'. Possible values
include: 'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2017_10_01.models.IPAllocationMethod
:param private_ip_address_version: Available from Api-Version 2016-03-30
onwards, it represents whether the specific ipconfiguration is IPv4 or
IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'.
Possible values include: 'IPv4', 'IPv6'
:type private_ip_address_version: str or
~azure.mgmt.network.v2017_10_01.models.IPVersion
:param subnet: Subnet bound to the IP configuration.
:type subnet: ~azure.mgmt.network.v2017_10_01.models.Subnet
:param primary: Gets whether this is a primary customer address on the
network interface.
:type primary: bool
:param public_ip_address: Public IP address bound to the IP configuration.
:type public_ip_address:
~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:param application_security_groups: Application security groups in which
the IP configuration is included.
:type application_security_groups:
list[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup]
:param provisioning_state: The provisioning state of the network interface
IP configuration. Possible values are: 'Updating', 'Deleting', and
'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancer_inbound_nat_rules': {'key': 'properties.loadBalancerInboundNatRules', 'type': '[InboundNatRule]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NetworkInterfaceIPConfiguration, self).__init__(**kwargs)
self.application_gateway_backend_address_pools = kwargs.get('application_gateway_backend_address_pools', None)
self.load_balancer_backend_address_pools = kwargs.get('load_balancer_backend_address_pools', None)
self.load_balancer_inbound_nat_rules = kwargs.get('load_balancer_inbound_nat_rules', None)
self.private_ip_address = kwargs.get('private_ip_address', None)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.private_ip_address_version = kwargs.get('private_ip_address_version', None)
self.subnet = kwargs.get('subnet', None)
self.primary = kwargs.get('primary', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.application_security_groups = kwargs.get('application_security_groups', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
|
mit
| -2,864,916,992,377,040,400
| 55.89
| 163
| 0.687291
| false
| 4.031892
| true
| false
| false
|
gaccardo/buxfer_api
|
api/buxfer.py
|
1
|
3631
|
import requests
import settings
from pybles import pybles
from account import Account
from transaction import Transaction
from budget import Budget
from reminder import Reminder
requests.packages.urllib3.disable_warnings()
class ErrorWithBuxferAPI( Exception ): pass
class BuxferAPIUnauthorized( Exception ): pass
class BuxferAPI( object ):
def __init__(self):
self.base_url = settings.BASE_URL
self.token = None
def __get_request(self, resource):
url = "%s/%s?token=%s" % (self.base_url,
resource, self.token)
response = requests.get(url)
if response.status_code == 400:
error = response.json()
error = error['error']
print "ERROR"
print "* Resource: %s" % resource
print "* Type: %s" % error['type']
print "* Request id: %d" % error['request_id']
print "* Message: %s" % error['message']
raise BuxferAPIUnauthorized
if response.status_code != 200:
raise ErrorWithBuxferAPI
return response.json()
def login(self, user, password):
response = requests.get("%s/login?userid=%s" \
"&password=%s" % (self.base_url, user, password))
if response.status_code != 200:
raise ErrorWithBuferAPI
token = response.json()
self.token = token['response']['token']
def logout(self):
pass
def __from_json_accounts_to_objects(self, accounts):
accounts_list = list()
for acc in accounts['response']['accounts']:
accounts_list.append(Account(currency=acc['currency'],
balance=acc['balance'],
id=acc['id'],
bank=acc['bank'],
name=acc['name']))
return accounts_list
def get_accounts(self):
response = self.__get_request('accounts')
return self.__from_json_accounts_to_objects(response)
def __from_json_transactions_to_objects(self, transactions):
transactions_list = list()
for tra in transactions['response']['transactions']:
transactions_list.append(Transaction(description=tra['description'],
account=tra['accountName'],
expense=tra['expenseAmount'],
amount=tra['amount'],
t_type=tra['transactionType'],
date=tra['normalizedDate'],
tags=tra['tagNames']))
return transactions_list
def get_transactions(self):
response = self.__get_request('transactions')
return self.__from_json_transactions_to_objects(response)
def __from_json_reminder_to_objects(self, reminders):
reminders_list = list()
for rem in reminders['response']['reminders']:
reminders_list.append(Reminder(start_date=rem['startDate'],
description=rem['description'], amount=rem['amount'],
id=rem['id'], account_id=['account_id']))
return reminders_list
def get_reminders(self):
response = self.__get_request('reminders')
return self.__from_json_reminder_to_objects(response)
def __from_json_budgets_to_objects(self, budgets):
budgets_list = list()
for bud in budgets['response']['budgets']:
budgets_list.append(Budget(name=bud['name'],
spent=bud['spent'], limit=bud['limit'],
balance=bud['balance']))
return budgets_list
def get_budgets(self):
response = self.__get_request('budgets')
return self.__from_json_budgets_to_objects(response)
|
gpl-2.0
| 1,461,001,327,499,848,700
| 30.850877
| 80
| 0.590746
| false
| 4.093574
| false
| false
| false
|
jhford/picsort
|
picsort/sort.py
|
1
|
8858
|
import os
import optparse
import hashlib
import json
import shutil
from xml.dom import minidom
import multiprocessing # Only for CPU Count
import Queue
import threading
import time
import re
try:
import exifread
except ImportError:
print 'You are missing the exifread module. Try installing it'
print 'with "sudo pip install exifread" or "sudo easy_install exifread"'
exit(1)
digest_type = 'sha1'
picture_extensions = ['.jpg', '.jpeg', '.psd', '.nef', '.cr2', '.png']
stdout_lock = threading.Lock()
def split_filename(filename):
for e in picture_extensions:
if filename.lower().endswith(e):
ext = e
basename = os.path.basename(filename)
return os.path.dirname(filename), basename[:-len(e)], basename[-len(e):]
def find_pictures(root):
img_files = []
for root, dirs, files in os.walk(root):
for f in sorted(files):
dirname, basename, ext = split_filename(f)
if ext.lower() in picture_extensions:
img_files.append(os.path.abspath(os.path.join(root, f)))
return img_files
def build_hashes(file_lists, num_threads, bufsize=1024*1024):
directory = {}
def update_directory(digest, new_file):
if directory.has_key(digest):
directory[digest].append(new_file)
else:
directory[digest] = [new_file]
def hash_file(filename):
with open(filename) as f:
h = hashlib.new(digest_type)
while True:
d = f.read(bufsize)
if not d:
break
h.update(d)
return h.hexdigest()
def worker():
while True:
item = q.get()
if item is DONE:
q.task_done()
break
digest = hash_file(item)
with directory_lock:
update_directory(digest, item)
q.task_done()
if num_threads == 0:
for l in file_lists:
for f in l:
digest = hash_file(f)
update_directory(digest, f)
else:
directory_lock = threading.Lock()
threads = []
DONE = 'DONE'
q = Queue.Queue()
for i in range(num_threads):
t = threading.Thread(target=worker)
threads.append(t)
t.daemon = True
t.start()
for l in file_lists:
for f in l:
q.put(f)
q.join()
while len([x for x in threads if x.isAlive()]) != 0:
q.put(DONE)
for thread in threads:
thread.join(0.001)
return directory
def verify_files(file_lists, num_threads):
hash_len = len(hashlib.new(digest_type).hexdigest())
pattern = re.compile('.*_%s_(?P<digest>[a-fA-F0-9]{%d}).*' % (digest_type, hash_len))
directory = build_hashes(file_lists, num_threads)
failed_files = []
for digest in directory.keys():
filename = directory[digest][0]
match = pattern.match(filename)
if match:
found_digest = match.group('digest')
if found_digest == digest:
print 'verified %s' % filename
else:
failed_files.append(filename)
print '%s failed to verify: %s vs %s' % (filename, digest, found_digest)
else:
print '%s does not have a hash, skipping' % filename
return failed_files
def dirs_from_image_data(source):
# http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/EXIF.html
try:
with open(source) as f:
exifdata = exifread.process_file(f, details=False)
except:
return os.path.join('bad exif')
dirs = []
if exifdata.has_key('Image Model'):
dirs.append(exifdata['Image Model'].printable)
else:
dirs.append('unknown camera')
if exifdata.has_key('EXIF DateTimeOriginal'):
date, time = exifdata['EXIF DateTimeOriginal'].printable.split(' ')
year, month, day = date.split(':')
dirs.extend([year, month, day])
else:
dirs.append('unknown date')
return os.path.join(*dirs)
def find_sidecars(img_files):
sidecars = []
for img_file in img_files:
dirname, basename, ext = split_filename(img_file)
sidecar = os.path.join(dirname, basename + '.xmp')
if os.path.exists(sidecar):
sidecars.append(sidecar)
return sidecars
mkdir_lock = threading.Lock()
def make_dirs_p(name):
with mkdir_lock:
if not os.path.exists(name):
os.makedirs(name)
def copy_file(source, dest):
with stdout_lock:
print 'Copying %s ==> %s' % (source, dest)
make_dirs_p(os.path.dirname(dest))
shutil.copy2(source, dest)
def alter_sidecar(source, dest, image_dest):
with stdout_lock:
print 'New sidecar for %s ==> %s' % (source, dest)
make_dirs_p(os.path.dirname(dest))
dom = minidom.parse(source)
dom.getElementsByTagName('rdf:Description')[0].attributes.get('crs:RawFileName').value = image_dest
with open(dest, 'w+') as f:
f.write(dom.toxml())
def handle_file(new_root, digest, filenames):
source = filenames[0]
dirname, filename, ext = split_filename(source)
data_based_directories = dirs_from_image_data(source)
output_directory = os.path.join(new_root, data_based_directories)
base_dest = '%s_%s_%s' % (filename, digest_type, digest)
image_dest = base_dest + ext
copy_file(source, os.path.join(output_directory, image_dest))
sidecars = find_sidecars(filenames)
if len(sidecars) == 0:
return
default_sidecar_dest = os.path.join(output_directory, base_dest + '.xmp')
newest_sidecar = max(sidecars, key=os.path.getctime)
alter_sidecar(newest_sidecar, default_sidecar_dest, image_dest)
i = 1
for sidecar in sidecars:
if sidecar is newest_sidecar:
continue
sidecar_dest = os.path.join(output_directory, '%s_sidecar%d.xmp' %(base_dest, i))
i += 1
alter_sidecar(sidecar, sidecar_dest, image_dest)
def handle_files(new_root, file_lists, num_threads):
directory = build_hashes(file_lists, num_threads)
if num_threads == 0:
for digest in directory.keys():
handle_file(new_root, digest, directory[digest])
return
threads = []
q = Queue.Queue()
bad_files = Queue.Queue()
DONE = 'DONE'
def worker():
while True:
item = q.get()
if item is DONE:
q.task_done()
break
try:
handle_file(new_root, item, directory[item])
except:
bad_files.put({'hash': item, 'files': directory[item]})
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=worker)
threads.append(t)
t.daemon = True
t.start()
for digest in directory.keys():
q.put(digest)
q.join()
while len([x for x in threads if x.isAlive()]) != 0:
q.put(DONE)
for thread in threads:
thread.join(0.001)
failing_files = []
while not bad_files.empty():
bad_file = bad_files.get()
failing_files.append(bad_file)
bad_files.task_done()
return failing_files
def main():
print 'Find and sort pictures'
parser = optparse.OptionParser('%prog <dir1> <dirN>');
parser.add_option('-o', '--output', help='Root directory for output',
action='store', dest='output', default=None)
parser.add_option('-t', '--threads', help='Number of work threads to use. ' +
'0 means ignore threading',
action='store', dest='threads', default=multiprocessing.cpu_count())
parser.add_option('--verify', help='Verify files instead of sorting them',
action='store_true', default=False, dest='only_verify')
opts, args = parser.parse_args();
try:
threads = int(opts.threads)
except ValueError:
parser.error("Thread count must be an integer")
if not opts.output and not opts.only_verify:
parser.error("You must specify an output directory")
elif opts.only_verify:
outputdir = None
else:
outputdir = os.path.abspath(opts.output)
print "Output directory: %s" % outputdir
if len(args) < 1:
parser.error("You haven't specified any input directories")
file_lists = []
for arg in args:
file_lists.append(find_pictures(arg))
if opts.only_verify:
failures = verify_files(file_lists, threads)
else:
failures = handle_files(outputdir, file_lists, threads)
with open('failed_files.json', 'w+') as f:
json.dump(failures, f, indent=2)
print 'Done!'
if __name__ == '__main__':
main()
|
gpl-2.0
| -670,674,206,479,652,600
| 29.335616
| 103
| 0.581847
| false
| 3.686226
| false
| false
| false
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/recipes/transparent_legends.py
|
1
|
1700
|
"""
Transparent, fancy legends
==========================
Sometimes you know what your data looks like before you plot it, and
may know for instance that there won't be much data in the upper right
hand corner. Then you can safely create a legend that doesn't overlay
your data:
ax.legend(loc='upper right')
Other times you don't know where your data is, and loc='best' will try
and place the legend::
ax.legend(loc='best')
but still, your legend may overlap your data, and in these cases it's
nice to make the legend frame transparent.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(1234)
fig, ax = plt.subplots(1)
ax.plot(np.random.randn(300), 'o-', label='normal distribution')
ax.plot(np.random.rand(300), 's-', label='uniform distribution')
ax.set_ylim(-3, 3)
ax.legend(loc='best', fancybox=True, framealpha=0.5)
ax.set_title('fancy, transparent legends')
pltshow(plt)
|
mit
| 154,122,098,309,639,550
| 25.169231
| 82
| 0.641765
| false
| 3.346457
| false
| false
| false
|
lcy-seso/models
|
fluid/image_classification/caffe2fluid/kaffe/shapes.py
|
1
|
5047
|
import math
from collections import namedtuple
from .errors import KaffeError
Tensor4DShape = namedtuple('Tensor4DShape',
['batch_size', 'channels', 'height', 'width'])
Tensor3DShape = namedtuple('Tensor3DShape', ['batch_size', 'data1', 'data2'])
Tensor2DShape = namedtuple('Tensor2DShape', ['batch_size', 'data'])
ScalarShape = namedtuple('ScalarShape', ['batch_size'])
def make_tensor(batch_size, d1=None, d2=None, d3=None):
if d3 is not None:
return Tensor4DShape(batch_size, d1, d2, d3)
elif d1 is not None and d2 is not None:
return Tensor3DShape(batch_size, d1, d2)
elif d1 is not None and d2 is None:
return Tensor2DShape(batch_size, d1)
elif d1 is None and d2 is None and d3 is None:
return ScalarShape(batch_size)
else:
raise NotImplementedError('invalid params for make_tensor %s' \
% (str((batch_size, d1, d2, d3))))
def get_filter_output_shape(i_h, i_w, params, round_func):
dila_h = getattr(params, 'dila_h', 1)
dila_w = getattr(params, 'dila_w', 1)
o_h = (i_h + 2 * params.pad_h -
(dila_h * (params.kernel_h - 1) + 1)) / float(params.stride_h) + 1
o_w = (i_w + 2 * params.pad_w -
(dila_w * (params.kernel_w - 1) + 1)) / float(params.stride_w) + 1
return (int(round_func(o_h)), int(round_func(o_w)))
def get_strided_kernel_output_shape(node, round_func):
assert node.layer is not None
input_shape = node.get_only_parent().output_shape
o_h, o_w = get_filter_output_shape(input_shape.height, input_shape.width,
node.layer.kernel_parameters, round_func)
params = node.layer.parameters
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape.channels
return make_tensor(input_shape.batch_size, c, o_h, o_w)
def shape_not_implemented(node):
raise NotImplementedError
def shape_identity(node):
assert len(node.parents) > 0
return node.parents[0].output_shape
def shape_scalar(node):
return make_tensor(1, 1, 1, 1)
def shape_crop(node):
raise KaffeError('crop function had been defined in customer_layers')
def shape_data(node):
if node.output_shape:
# Old-style input specification
shape = node.output_shape
else:
try:
# New-style input specification
shape = map(int, node.parameters.shape[0].dim)
except:
# We most likely have a data layer on our hands. The problem is,
# Caffe infers the dimensions of the data from the source (eg: LMDB).
# We want to avoid reading datasets here. Fail for now.
# This can be temporarily fixed by transforming the data layer to
# Caffe's "input" layer (as is usually used in the "deploy" version).
# TODO: Find a better solution for this.
raise KaffeError(
'Cannot determine dimensions of data layer.\n'
'See comments in function shape_data for more info.')
return shape
def shape_mem_data(node):
params = node.parameters
return make_tensor(params.batch_size, params.channels, params.height,
params.width)
def shape_concat(node):
axis = node.layer.parameters.axis
output_shape = None
for parent in node.parents:
if output_shape is None:
output_shape = list(parent.output_shape)
else:
output_shape[axis] += parent.output_shape[axis]
return tuple(output_shape)
def shape_convolution(node):
return get_strided_kernel_output_shape(node, math.floor)
def shape_deconvolution(node):
assert node.layer is not None
input_shape = node.get_only_parent().output_shape
h_i = input_shape.height
w_i = input_shape.width
params = node.layer.kernel_parameters
p_h = params.pad_h
p_w = params.pad_w
dila_h = params.dila_h
dila_w = params.dila_w
k_h = params.kernel_h
k_w = params.kernel_w
s_h = params.stride_h
s_w = params.stride_w
h_o = (h_i - 1) * s_h - 2 * p_h + dila_h * (k_h - 1) + 1
w_o = (w_i - 1) * s_w - 2 * p_w + dila_w * (k_w - 1) + 1
params = node.layer.parameters
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape.channels
return make_tensor(input_shape.batch_size, c, h_o, w_o)
def shape_pool(node):
global_pool = getattr(node.layer.parameters, 'global_pooling', False)
if global_pool:
input_shape = node.get_only_parent().output_shape
return make_tensor(input_shape.batch_size, input_shape.channels, 1, 1)
ceil_mode = getattr(node.layer.parameters, 'ceil_mode', True)
if ceil_mode is True:
method = math.ceil
else:
method = math.floor
return get_strided_kernel_output_shape(node, method)
def shape_inner_product(node):
input_shape = node.get_only_parent().output_shape
return make_tensor(input_shape.batch_size, node.layer.parameters.num_output)
|
apache-2.0
| -4,642,686,649,229,181,000
| 31.772727
| 81
| 0.632059
| false
| 3.285807
| false
| false
| false
|
underloki/Cyprium
|
app/cli/root/crypto/text/atomicdigits.py
|
1
|
12349
|
#! /usr/bin/python3
########################################################################
# #
# Cyprium is a multifunction cryptographic, steganographic and #
# cryptanalysis tool developped by members of The Hackademy. #
# French White Hat Hackers Community! #
# cyprium.hackademics.fr # #
# Authors: SAKAROV, mont29, afranck64 #
# Contact: admin@hackademics.fr #
# Forum: hackademics.fr #
# Twitter: @hackademics_ #
# #
# Cyprium is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but without any warranty; without even the implied warranty of #
# merchantability or fitness for a particular purpose. See the #
# GNU General Public License for more details. #
# #
# The terms of the GNU General Public License is detailed in the #
# COPYING attached file. If not, see : http://www.gnu.org/licenses #
# #
########################################################################
import sys
import os
import random
# In case we directly run that file, we need to add the whole cyprium to path,
# to get access to CLI stuff!
if __name__ == "__main__":
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..", "..", "..", "..",
"..")))
import app.cli
import kernel.crypto.text.atomicdigits as atomicdigits
import kernel.utils as utils
class AtomicDigits(app.cli.Tool):
"""CLI wrapper for atomicdigits crypto text tool."""
def main(self, ui):
ui.message("********** Welcome to Cyprium.AtomicDigits! **********")
quit = False
while not quit:
options = [(self.about, "*about", "Show some help!"),
(self.demo, "*demo", "Show some examples"),
(self.cypher, "*cypher",
"Cypher some text in atomic digits"),
(self.decypher, "d*ecypher",
"Decypher atomic digits into text"),
("", "-----", ""),
("tree", "*tree", "Show the whole tree"),
("quit", "*quit", "Quit Cyprium.AtomicDigits")]
msg = "Cyprium.AtomicDigits"
answ = ui.get_choice(msg, options)
if answ == 'tree':
self._tree.print_tree(ui, self._tree.FULL)
elif answ == 'quit':
self._tree.current = self._tree.current.parent
quit = True
else:
answ(ui)
ui.message("Back to Cyprium menus! Bye.")
def _get_exhaustive_txt(self, out, ui, min_cypher, act=None):
ui.message("Exaustive found {} solutions for a minimum cyphering of "
"{}, among which {} solutions with the highest possible "
"cyphering ({})."
"".format(out["n_solutions"], min_cypher,
out["best_n_solutions"],
out["best_cypher"]))
if act not in {"all", "best", "rand", "rand_best"}:
options = [("all", "*all solutions", ""),
("best", "all $best solutions", ""),
("rand", "*one random solution", ""),
("rand_best", "or one *random best solution", "")]
act = ui.get_choice("Do you want to get", options,
oneline=True)
if act == "all":
lines = utils.format_multiwords(out["solutions"], sep=" ")
return "\n {}".format("\n ".join(lines))
elif act == "best":
lines = utils.format_multiwords(out["best_solutions"], sep=" ")
return "\n {}".format("\n ".join(lines))
elif act == "rand":
return " ".join((random.choice(w) for w in out["solutions"]))
else:
return " ".join((random.choice(w) for w in out["best_solutions"]))
def about(self, ui):
ui.message(atomicdigits.__about__)
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def demo(self, ui):
ui.message("===== Demo Mode =====")
ui.message("Running a small demo/testing!")
ui.message("")
ui.message("--- Cyphering ---")
ui.message("Data to cypher: {}".format("HOW ARE YOU NICEDAYISNTIT"))
out = atomicdigits.cypher("HOW ARE YOU NICEDAYISNTIT")
ui.message("Atomic digits cyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out,
sep=" "))))
ui.message("")
htext = "90 53 16 53 16 A Q 92 53 52 16 53 M 15 L E 52 16 T"
ui.message("--- Decyphering ---")
ui.message("Atomic digits text used as input: {}".format(htext))
out = atomicdigits.decypher(htext)
ui.message("The decyphered data is:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
ui.message("")
ui.message("--- Notes ---")
ui.message("+ You can choose the optionnal Exhaustive option, to get "
"all possible encodings of each words higher than the "
"given threshold of cyphering (or the highest possible):")
ui.message("Data to cypher: {}".format("HOW ARE YOU NICEDAYISNTIT"))
out = atomicdigits.cypher("HOW ARE YOU NICEDAYISNTIT", exhaustive=True,
min_cypher=0.8)
out = self._get_exhaustive_txt(out, ui, min_cypher=0.8, act="all")
ui.message(out)
ui.message("")
htext = "1874 A75 39892 75358DA39535081T"
ui.message("+ You can try to decypher a text with atomic numbers "
"merged (i.e. no more spaces between them – nasty!):")
ui.message("Data to decypher: {}".format(htext))
out = atomicdigits.decypher(htext)
ui.message("Atomic digits decyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
ui.message("")
ui.message("--- Won’t work ---")
ui.message("+ The input text to cypher must be ASCII uppercase "
"chars only:")
ui.message("Data to cypher: {}\n".format("Hello WORLD !"))
try:
out = atomicdigits.cypher("Hello WORLD !")
ui.message("Atomic digits cyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
except Exception as e:
ui.message(str(e), level=ui.ERROR)
ui.message("")
ui.message("+ The input text to decypher must be valid atomic digits:")
htext = "90 53 016 53 16 A Q 922 53 52 16 53 M 15 L E 52 16 T"
ui.message("Atomic digits text used as input: {}".format(htext))
try:
out = atomicdigits.decypher(htext)
ui.message("Atomic digits decyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
except Exception as e:
ui.message(str(e), level=ui.ERROR)
ui.message("")
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def cypher(self, ui):
"""Interactive version of cypher()."""
txt = ""
ui.message("===== Cypher Mode =====")
while 1:
done = False
while 1:
exhaustive = False
threshold = 0.8
txt = ui.text_input("Text to cypher to atomic digits",
sub_type=ui.UPPER)
if txt is None:
break # Go back to main Cypher menu.
options = [("exhst", "*exhaustive cyphering", ""),
("simple", "or $simple one", "")]
answ = ui.get_choice("Do you want to use", options,
oneline=True)
if answ == "exhst":
exhaustive = True
t = ui.get_data("Cypher threshold (nothing to use default "
"{} one): ".format(threshold),
sub_type=ui.FLOAT, allow_void=True)
if t is not None:
threshold = t
try:
# Will also raise an exception if data is None.
txt = atomicdigits.cypher(txt, exhaustive=exhaustive,
min_cypher=threshold)
if exhaustive:
txt = self._get_exhaustive_txt(txt, ui,
min_cypher=threshold)
done = True # Out of those loops, output result.
break
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("retry", "*try again", ""),
("menu", "or go back to *menu", "")]
answ = ui.get_choice("Could not convert that data into "
"atomic digits, please", options,
oneline=True)
if answ in {None, "menu"}:
return # Go back to main Sema menu.
# Else, retry with another data to hide.
if done:
ui.text_output("Text successfully converted", txt,
"Atomic digits version of text")
options = [("redo", "*cypher another text", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ in {None, "quit"}:
return
def decypher(self, ui):
"""Interactive version of decypher()."""
txt = ""
ui.message("===== Decypher Mode =====")
while 1:
txt = ui.text_input("Please choose some atomic digits text",
sub_type=ui.UPPER)
try:
txt = atomicdigits.decypher(txt)
txt = "\n " + "\n ".join(utils.format_multiwords(txt))
ui.text_output("Text successfully decyphered",
txt,
"The decyphered text is")
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("redo", "*decypher another data", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ == "quit":
return
NAME = "atomic"
TIP = "Tool to convert text to/from atomic digits code."
TYPE = app.cli.Node.TOOL
CLASS = AtomicDigits
# Allow tool to be used directly, without using Cyprium menu.
if __name__ == "__main__":
import app.cli.ui
ui = app.cli.ui.UI()
tree = app.cli.NoTree("AtomicDigits")
AtomicDigits(tree).main(ui)
|
gpl-3.0
| -3,187,348,069,849,674,000
| 44.881041
| 123
| 0.458273
| false
| 4.381257
| false
| false
| false
|
patrickfuller/imolecule
|
imolecule/format_converter.py
|
1
|
7752
|
"""
Methods to interconvert between json and other (cif, mol, smi, etc.) files
"""
import imolecule.json_formatter as json
from collections import Counter
from fractions import gcd
from functools import reduce
# Open Babel <= '2.4.1'
try:
import pybel
ob = pybel.ob
table = ob.OBElementTable()
GetAtomicNum = table.GetAtomicNum
GetSymbol = table.GetSymbol
has_ob = True
except ImportError:
has_ob = False
# Open Babel >= '3.0.0'
try:
from openbabel import pybel
ob = pybel.ob
GetAtomicNum = ob.GetAtomicNum
GetSymbol = ob.GetSymbol
has_ob = True
except ImportError:
has_ob = False
def convert(data, in_format, out_format, name=None, pretty=False):
"""Converts between two inputted chemical formats.
Args:
data: A string representing the chemical file to be converted. If the
`in_format` is "json", this can also be a Python object
in_format: The format of the `data` string. Can be "json" or any format
recognized by Open Babel
out_format: The format to convert to. Can be "json" or any format
recognized by Open Babel
name: (Optional) If `out_format` is "json", will save the specified
value in a "name" property
pretty: (Optional) If True and `out_format` is "json", will pretty-
print the output for human readability
Returns:
A string representing the inputted `data` in the specified `out_format`
"""
# Decide on a json formatter depending on desired prettiness
dumps = json.dumps if pretty else json.compress
# Shortcut for avoiding pybel dependency
if not has_ob and in_format == 'json' and out_format == 'json':
return dumps(json.loads(data) if is_string(data) else data)
elif not has_ob:
raise ImportError("Chemical file format conversion requires pybel.")
# These use the open babel library to interconvert, with additions for json
if in_format == 'json':
mol = json_to_pybel(json.loads(data) if is_string(data) else data)
elif in_format == 'pybel':
mol = data
else:
mol = pybel.readstring(in_format, data)
# Infer structure in cases where the input format has no specification
if not mol.OBMol.HasNonZeroCoords():
mol.make3D()
# Make P1 if that's a thing, recalculating bonds in process
if in_format == 'mmcif' and hasattr(mol, 'unitcell'):
mol.unitcell.FillUnitCell(mol.OBMol)
mol.OBMol.ConnectTheDots()
mol.OBMol.PerceiveBondOrders()
mol.OBMol.Center()
if out_format == 'pybel':
return mol
elif out_format == 'object':
return pybel_to_json(mol, name)
elif out_format == 'json':
return dumps(pybel_to_json(mol, name))
else:
return mol.write(out_format)
def json_to_pybel(data, infer_bonds=False):
"""Converts python data structure to pybel.Molecule.
This will infer bond data if not specified.
Args:
data: The loaded json data of a molecule, as a Python object
infer_bonds (Optional): If no bonds specified in input, infer them
Returns:
An instance of `pybel.Molecule`
"""
obmol = ob.OBMol()
obmol.BeginModify()
for atom in data['atoms']:
obatom = obmol.NewAtom()
obatom.SetAtomicNum(GetAtomicNum(str(atom['element'])))
obatom.SetVector(*atom['location'])
if 'label' in atom:
pd = ob.OBPairData()
pd.SetAttribute('_atom_site_label')
pd.SetValue(atom['label'])
obatom.CloneData(pd)
# If there is no bond data, try to infer them
if 'bonds' not in data or not data['bonds']:
if infer_bonds:
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
# Otherwise, use the bonds in the data set
else:
for bond in data['bonds']:
if 'atoms' not in bond:
continue
obmol.AddBond(bond['atoms'][0] + 1, bond['atoms'][1] + 1,
bond['order'])
# Check for unit cell data
if 'unitcell' in data:
uc = ob.OBUnitCell()
uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))
uc.SetSpaceGroup('P1')
obmol.CloneData(uc)
obmol.EndModify()
mol = pybel.Molecule(obmol)
# Add partial charges
if 'charge' in data['atoms'][0]:
mol.OBMol.SetPartialChargesPerceived()
for atom, pyatom in zip(data['atoms'], mol.atoms):
pyatom.OBAtom.SetPartialCharge(atom['charge'])
return mol
def pybel_to_json(molecule, name=None):
"""Converts a pybel molecule to json.
Args:
molecule: An instance of `pybel.Molecule`
name: (Optional) If specified, will save a "name" property
Returns:
A Python dictionary containing atom and bond data
"""
# Save atom element type and 3D location.
atoms = [{'element': GetSymbol(atom.atomicnum),
'location': list(atom.coords)}
for atom in molecule.atoms]
# Recover auxiliary data, if exists
for json_atom, pybel_atom in zip(atoms, molecule.atoms):
if pybel_atom.partialcharge != 0:
json_atom['charge'] = pybel_atom.partialcharge
if pybel_atom.OBAtom.HasData('_atom_site_label'):
obatom = pybel_atom.OBAtom
json_atom['label'] = obatom.GetData('_atom_site_label').GetValue()
if pybel_atom.OBAtom.HasData('color'):
obatom = pybel_atom.OBAtom
json_atom['color'] = obatom.GetData('color').GetValue()
# Save number of bonds and indices of endpoint atoms
bonds = [{'atoms': [b.GetBeginAtom().GetIndex(),
b.GetEndAtom().GetIndex()],
'order': b.GetBondOrder()}
for b in ob.OBMolBondIter(molecule.OBMol)]
output = {'atoms': atoms, 'bonds': bonds, 'units': {}}
# If there's unit cell data, save it to the json output
if hasattr(molecule, 'unitcell'):
uc = molecule.unitcell
output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()]
for v in uc.GetCellVectors()]
density = (sum(atom.atomicmass for atom in molecule.atoms) /
(uc.GetCellVolume() * 0.6022))
output['density'] = density
output['units']['density'] = 'kg / L'
# Save the formula to json. Use Hill notation, just to have a standard.
element_count = Counter(GetSymbol(a.atomicnum) for a in molecule)
hill_count = []
for element in ['C', 'H']:
if element in element_count:
hill_count += [(element, element_count[element])]
del element_count[element]
hill_count += sorted(element_count.items())
# If it's a crystal, then reduce the Hill formula
div = (reduce(gcd, (c[1] for c in hill_count))
if hasattr(molecule, 'unitcell') else 1)
output['formula'] = ''.join(n if c / div == 1 else '%s%d' % (n, c / div)
for n, c in hill_count)
output['molecular_weight'] = molecule.molwt / div
output['units']['molecular_weight'] = 'g / mol'
# If the input has been given a name, add that
if name:
output['name'] = name
return output
def is_string(obj):
"""Wraps Python2.x and 3.x ways to test if string."""
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
if __name__ == '__main__':
# Lazy converter to test this out
import sys
in_data, in_format, out_format = sys.argv[1:]
try:
with open(in_data) as in_file:
data = in_file.read()
except IOError:
data = in_data
print(convert(data, in_format, out_format, pretty=True))
|
mit
| -2,810,372,494,749,063,700
| 33.14978
| 79
| 0.611326
| false
| 3.658329
| false
| false
| false
|
Dangetsu/vnr
|
Frameworks/Sakura/py/apps/browser/core/network.py
|
1
|
6136
|
# coding: utf8
# network.py
# 12/13/2012 jichi
__all__ = 'WbNetworkAccessManager',
import os
from PySide.QtNetwork import QNetworkAccessManager, QNetworkRequest, QNetworkDiskCache
from sakurakit import skfileio, sknetwork
from sakurakit.skdebug import dprint
import proxy, rc
## Cookie ##
class WbNetworkCookieJar(sknetwork.SkNetworkCookieJar):
def __init__(self, path, parent=None): # unicode
super(WbNetworkCookieJar, self).__init__(parent)
self.path = path
self.load()
self._injectCookies()
# Automatically save cookies using timer
from PySide.QtCore import QCoreApplication
qApp = QCoreApplication.instance()
qApp.aboutToQuit.connect(self.save)
# Persistent storage
def load(self): # unicode ->
path = self.path
if path and os.path.exists(path):
self.unmarshal(skfileio.readdata(path))
def save(self): # unicode -> bool
return bool(self.path) and skfileio.writedata(self.path, self.marshal())
def _injectCookies(self):
#from PySide.QtCore import QUrl
from PySide.QtNetwork import QNetworkCookie
import cookies
# Use parent cookie
setCookiesFromUrl = super(WbNetworkCookieJar, self).setCookiesFromUrl
for kvdict,urls in cookies.itercookies():
cookies = [QNetworkCookie(k,v) for k,v in kvdict.iteritems()]
for url in urls:
l = cookies
if url.startswith("http://www."):
domain = url.replace("http://www", '') # such as .dmm.co.jp
l = [QNetworkCookie(it) for it in l] # copy l
for c in l:
c.setDomain(domain)
self.setCookiesFromOriginalUrl(l, url)
# See: http://kancolle.wikia.com/wiki/Tutorial:_Proxy_Connection
#c = QNetworkCookie('ckcy', '1')
#c.setDomain("http://www.dmm.com")
#c.setPath("/netgame")
#self.setCookiesFromUrl([c], "http://www.dmm.com/netgame")
#c.setPath("/netgame_s")
#self.setCookiesFromUrl([c], "http://www.dmm.com/netgame_s")
# Proxy
def cookiesForUrl(self, url): # override
"""@reimp"""
url = proxy.fromproxyurl(url) or url
return super(WbNetworkCookieJar, self).cookiesForUrl(url)
def setCookiesFromUrl(self, cookies, url):
"""@reimp"""
url = proxy.fromproxyurl(url) or url
return super(WbNetworkCookieJar, self).setCookiesFromUrl(cookies, url)
# Expose API to set cookies without proxy
def setCookiesFromOriginalUrl(self, cookies, url):
return super(WbNetworkCookieJar, self).setCookiesFromUrl(cookies, url)
## Network ##
REQ_PROXY_URL = 'proxy'
class WbNetworkAccessManager(QNetworkAccessManager):
def __init__(self, parent=None):
super(WbNetworkAccessManager, self).__init__(parent)
self.sslErrors.connect(_WbNetworkAccessManager.onSslErrors)
self.finished.connect(_WbNetworkAccessManager.onReplyFinished)
# Enable offline cache
cache = QNetworkDiskCache(self)
cache.setCacheDirectory(rc.DIR_CACHE_NETMAN) # QNetworkDiskCache will create this directory if it does not exists.
self.setCache(cache)
# Load cookies
jar = WbNetworkCookieJar(rc.COOKIES_LOCATION)
self.setCookieJar(jar)
# QNetworkReply *createRequest(Operation op, const QNetworkRequest &req, QIODevice *outgoingData = nullptr) override;
def createRequest(self, op, req, outgoingData=None): # override
url = req.url()
#print url
#if url.scheme() == 'https' and url.host() in ('www.dmm.com', 'dmm.com'):
# path = url.path()
# if path.startswith('/js/') or path.startswith('/css/'):
# url.setScheme('http') # downgrade to http
# req.setUrl(url)
# dprint("downgrade https to http:", url)
#print url
newurl = _WbNetworkAccessManager.getBlockedUrl(url)
if newurl:
req = QNetworkRequest(newurl)
else:
newurl = proxy.toproxyurl(url)
if newurl and newurl != url:
req = QNetworkRequest(req) # since request tis constent
req.setUrl(newurl)
_WbNetworkAccessManager.setRequestHeaders(req)
reply = super(WbNetworkAccessManager, self).createRequest(op, req, outgoingData)
#if url.host().lower().endswith('dmm.co.jp'):
reply.setUrl(url) # restore the old url
reply.setProperty(REQ_PROXY_URL, url)
#print newurl
return reply
#else:
# print url
_WbNetworkAccessManager.setRequestHeaders(req)
return super(WbNetworkAccessManager, self).createRequest(op, req, outgoingData)
class _WbNetworkAccessManager:
@staticmethod
def setRequestHeaders(req):
"""Set the http header
@param req QNetworkRequest
"""
if req.hasRawHeader('Referer'): # Delete Referer so that it will not get tracked
req.setRawHeader('Referer', '')
#req.setRawHeader('User-Agent', config.USER_AGENT) # handled in WebKit
#IP = '153.121.52.138'
#keys = 'X-Forwarded-For', 'Client-IP', 'X-Client-IP', 'Real-IP', 'X-Real-IP'
#for k in keys:
# req.setRawHeader(k, IP)
@staticmethod
def onReplyFinished(reply):
"""Fix the redirect URL
@param reply QNetworkReply
"""
proxyUrl = reply.property(REQ_PROXY_URL)
if proxyUrl:
#statusCode = reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
redirectUrl = reply.attribute(QNetworkRequest.RedirectionTargetAttribute)
if redirectUrl:
if not redirectUrl.host() and redirectUrl != reply.url() and redirectUrl != proxyUrl:
redirectUrl.setHost(proxyUrl.host())
else:
redirectUrl = proxy.fromproxyurl(redirectUrl)
if redirectUrl:
reply.setAttribute(QNetworkRequest.RedirectionTargetAttribute, redirectUrl)
@staticmethod
def getBlockedUrl(url):
"""
@param url QUrl
@return unicode or QUrl or None
"""
if url.path() == '/js/localize_welcome.js': # for DMM
dprint("block dmm localize welcome")
return rc.DMM_LOCALIZED_WELCOME_URL
# http://stackoverflow.com/questions/8362506/qwebview-qt-webkit-wont-open-some-ssl-pages-redirects-not-allowed
@staticmethod
def onSslErrors(reply, errors): # QNetworkReply, [QSslError] ->
reply.ignoreSslErrors()
#dprint("ignore ssl error")
#print errors
# EOF
|
gpl-3.0
| -7,939,426,695,941,175,000
| 33.088889
| 119
| 0.683833
| false
| 3.437535
| false
| false
| false
|
jpvmm/DLearningExp
|
fuzzy.py
|
1
|
4518
|
#Fuzzy Algorithm to mark calcifications in mammography
#I'm using Mandani Defuzzification
#FutureBox Analytics
from __future__ import division
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as p
from skimage.io import imread
from skimage.measure import label, regionprops
from skimage.exposure import histogram
from skimage.morphology import binary_closing,binary_opening, disk, skeletonize, dilation,rectangle
class Fuzzy:
''' A fuzzy class '''
def __init__(self, path):
self.path = path
def open_image(self):
''' Open the image given the path'''
img = imread(self.path)
counts,grays = histogram(img, nbins = 256)
#img = (img-0)/(255 -0)
return img,counts,grays
def Fuzzification(self,counts,grays):
''' Fuzzificate a image '''
# Regra 1 (Claros,Poucos) = 1
u1 = []
# Regra 2 (Claros,Medios) = 1
u2 = []
# Regra 3 (Claros,Muitos) == 0.5
u3 = []
# Regra 4 Aplicada (Cinza,Poucos) =0.5
u4 = []
# Regra 5 (Cinza,Medios) = 0.5
u5 = []
for i in range(counts.shape[0]):
u1 = np.append(u1,self.Few(counts[i])*self.White(grays[i]))
u2 = np.append(u2,self.Moderate(counts[i]) * self.White(grays[i]))
u3 = np.append(u3,self.Many(counts[i]) * self.White(grays[i]))
u4 = np.append(u4,self.Few(counts[i]) * self.Grey(grays[i]))
u5 = np.append(u5,self.Moderate(counts[i]) * self.Grey(grays[i]))
return u1,u2,u3,u4,u5
def Defuzzification(self,u1,u2,u3,u4,u5):
''' Defuzzification of an image using singletons function '''
CA = ((1*u1+1*u2+0.5*u3+0.5*u4+0.5*u5)/(u1+u2+u3+u4+u5)) #Center of AVERAGE!! function
CA[np.isnan(CA)] = 0
return CA
def Inference(self,img,counts,grays):
''' Fuzzy Inference System '''
u1,u2,u3,u4,u5 = self.Fuzzification(counts,grays)
defuzz = self.Defuzzification(u1,u2,u3,u4,u5)
imgdefuzz = img
imgsuspeito = img
for i in range(counts.shape[0]):
if defuzz[i] ==1 :
imgdefuzz[imgdefuzz == grays[i]] == 255
if defuzz[i] >= 0.5 and defuzz[i] < 0.9:
imgsuspeito[imgsuspeito == grays[i]] = 255
return defuzz,imgdefuzz,imgsuspeito
def Grey(self,gray_scale):
''' Grey Pixels fuzzy function '''
if gray_scale > 122 and gray_scale <= 141:
c = (gray_scale - 122)/(141-122)
elif gray_scale > 141 and gray_scale <= 179:
c = (179 - gray_scale)/(179 - 141)
else:
c = 0
return c
def White(self,gray_scale):
''' White Pixels Fuzzy Function'''
if gray_scale >= 170 and gray_scale <= 230:
c = (gray_scale - 170)/(230 -170)
elif gray_scale > 230:
c = 1
else:
c = 0
return c
def Black(self,gray_scale):
''' Black Pixels Fuzzy Function '''
if gray_scale >=77 and gray_scale <= 128:
c = (128 - gray_scale)/(128-77)
elif gray_scale < 77:
c = 1
else:
c = 0
return c
def Few(self,counts):
''' Fuzzy function for few pixels '''
if counts <= 250:
p = 1
elif counts > 250 and counts <= 450:
p = (450 - counts)/(450-250)
else:
p = 0
return p
def Moderate(self,counts):
''' Fuzzy Function to a reasonable amount of pixels '''
if counts >= 600 and counts <=750:
p = (counts - 600)/(750-600)
elif counts > 750 and counts <= 850:
p = 1
elif counts > 850 and counts <= 1050:
p = (1050 - counts)/(1050-850)
else:
p = 0
return p
def Many(self,counts):
''' Fuzzy function for many pixels '''
if counts >= 1000 and counts <= 2000:
p = (counts - 1000)/(2000-1000)
elif counts > 2000:
p = 1
else:
p = 0
return p
t = Fuzzy('/home/joao/UnB/Academia/Git/DATA/ROIs/49.pgm')
img2,counts,grays = t.open_image()
teste,imgdefuz,imgsus = t.Inference(img2,counts,grays)
#Binarization
binary = imgdefuz >= 255
se = disk(10)
binary_image = dilation(binary, se)
se3 = disk(20)
binary_image = binary_closing(binary_image,se3)
p.imshow(imgdefuz, cmap = cm.Greys_r)
p.show()
|
gpl-3.0
| 8,367,580,780,083,394,000
| 27.77707
| 99
| 0.542939
| false
| 3.172753
| false
| false
| false
|
RaVenHelm/SortingClass
|
SortingClass.py
|
1
|
9012
|
from copy import copy
class SortingUtilClass:
@staticmethod
def list_to_string(values):
count = len(values) - 1
res = ''
for n in values:
fmt_string = '{:<3}'
res += fmt_string.format(n)
return res
@staticmethod
def print_results(comparisons, swaps):
print()
print('Analysis: ')
print('\t{0:<12} {1:>3}'.format('Comarisons:', comparisons))
print('\t{0:<12} {1:>3}'.format('Swaps:', swaps))
print('\t{0:<12} {1:>3}'.format('Work:', comparisons + (5 * swaps)))
print()
@staticmethod
def print_title(assign_num, title):
print('Tyberius Enders')
print('Assignment {} - {}'.format(assign_num, title))
print()
@staticmethod
def print_loop_position(num, array):
print('Loop #{0} Array = {1}'.format(num, SortingUtilClass.list_to_string(array)))
@staticmethod
def print_comparison_level(array, comparison, spacing, print_list, adjust):
print('Comparison'.rjust(14), end=' ')
print('#{}'.format(comparison).ljust(spacing), end='')
print('{}'.format(SortingUtilClass.list_to_string(print_list)).rjust(adjust))
@staticmethod
def print_comparison_simple(comparison, values, low, high, **kwargs):
fmt = 'Comparison #{}'.format(comparison)
end_char = '\n'
if kwargs['stop']:
end_char = ''
if (comparison / 10) < 1:
print(fmt.rjust(18), end='')
else:
print(fmt.rjust(19), end='')
base_spaces = 3*low + 6
print('{}'.format(values[low]).rjust(base_spaces), end=end_char)
if kwargs['stop']:
print('{}'.format('(Stop)').rjust(8))
@staticmethod
def print_swap_simple(swap, values, low, high):
fmt = 'Swap #{}'.format(swap)
if (swap / 10) < 1:
print(fmt.rjust(12), end='')
else:
print(fmt.rjust(13), end='')
print('{}'.format(values[low]).rjust(3*low+12), end='')
print('{}'.format(values[high]).rjust(3*(high-low)))
@staticmethod
def print_swap_level(array, swap, spacing, print_list, adjust):
print('Swap'.rjust(8), end=' ')
print('#{}'.format(swap).ljust(spacing), end='')
print('{}'.format(SortingUtilClass.list_to_string(print_list).rjust(adjust)))
@staticmethod
def print_pivot_level(pivot):
pivot_spaces = 14
if (pivot / 10) >= 1:
pivot_spaces = 15
print('Pivot = {}'.format(pivot).rjust(pivot_spaces))
@staticmethod
def print_level_with_array(level, array):
print('Level {}:'.format(level), end='')
print('Array = {}'.format(SortingUtilClass.list_to_string(array)).rjust(15 + len(array) * 3))
@staticmethod
def print_high_low(high, low):
low_spaces = 12
if (low / 10) >= 1:
low_spaces = 11
print('Low = {}'.format(low).rjust(low_spaces))
high_spaces = 13
if (high / 10) >= 1:
high_spaces = 14
print('High = {}'.format(high).rjust(high_spaces))
@staticmethod
def print_qs_fn(low, high, index):
a = low
b = index - 1
c = index + 1
d = high
if a > b:
a, b = b, a
if c > d:
c, d = d, c
print('Calling QS ({}-{}) and ({}-{})'.format(a, b, c, d))
@staticmethod
def print_char_line(char):
for i in range(1,55):
print(char, end='')
print()
@staticmethod
def print_algorithm_title(title):
SortingUtilClass.print_char_line('#')
print(title)
print()
class SortingClass:
def __init__(self, to_print=True):
self.comparisons = 1
self.swaps = 1
self.level = 1
self.print = to_print
def set_defaults(self):
self.comparisons = 1
self.swaps = 1
self.level = 1
def finish(self):
if self.print:
SortingUtilClass.print_results(self.comparisons, self.swaps)
work = self.comparisons+(5*self.swaps)
return dict(comparisons=self.comparisons, swaps=self.swaps, work=work, level=self.level)
def get_analysis(self):
work = self.comparisons + (5*self.swaps)
return dict(comparisons=self.comparisons, swaps=self.swaps, work=work)
def bubble_sort(self, values):
n = len(values)
for i in range(n):
# print loop level
if self.print:
SortingUtilClass.print_loop_position(i+1,values)
for j in range(1,n):
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 2) + 7, [values[j-1], values[j]], j)
self.comparisons += 1
if values[j-1] > values[j]:
values[j-1], values[j] = values[j], values[j-1]
# print swaps
if self.print:
SortingUtilClass.print_swap_level(values, self.swaps, 3*(j - 2) + 13, [values[j-1], values[j]], j)
self.swaps += 1
return self
def insertion_sort(self, values):
n = len(values)
for i in range(1,n):
j = i
if self.print:
SortingUtilClass.print_loop_position(j, values)
while j > 0 and values[j-1] > values[j]:
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 1) + 4, [values[j-1], values[j]], j)
self.comparisons += 1
# swap values
values[j-1], values[j] = values[j], values[j-1]
# print swaps
if self.print:
SortingUtilClass.print_swap_level(values, self.swaps, 3*(j - 1) + 10, [values[j-1], values[j]], j)
self.swaps += 1
j -= 1
else:
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 1) + 4, [values[j-1], values[j]], j)
self.comparisons += 1
return self
def selection_sort(self, values):
n = len(values)
count = 1
for i in range(n-1, 0, -1):
# print loop level
if self.print:
SortingUtilClass.print_loop_position(count, values)
maximum = self.max_key(0, i, values)
# swap values
values[maximum], values[i] = values[i], values[maximum]
# print swaps
if self.print:
SortingUtilClass.print_swap_level(values, self.swaps, 3*(i-1) + 10, [values[maximum], values[i]], i)
self.swaps += 1
count += 1
return self
def max_key(self, low, high, values):
largest = low
for j in range(low+1, high+1):
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 1) + 4, [values[j-1], values[j]], j)
self.comparisons += 1
if values[largest] < values[j]:
largest = j
# print max and array
if self.print:
pass
return largest
# Assignment 4 methods
def partition(self, values, low, high):
pivot = values[high]
i = low
# print pivot
if self.print:
SortingUtilClass.print_pivot_level(pivot)
for j in range(low, high):
# print comparison
if self.print:
to_stop = False
if values[j] >= pivot:
to_stop = True
SortingUtilClass.print_comparison_simple(self.comparisons, values, j, high, stop=to_stop)
self.comparisons += 1
if values[j] <= pivot:
if self.print:
print('Moving high...')
values[i], values[j] = values[j], values[i]
i += 1
# swap values
values[i], values[high] = values[high], values[i]
if self.print:
SortingUtilClass.print_swap_simple(self.swaps, values, i, high)
print('{}'.format('Moving low...').rjust(18))
self.swaps += 1
return i
def quick_sort(self, values, low, high):
# print level
# print array
# print low
# print high
if self.print:
SortingUtilClass.print_level_with_array(self.level, values)
SortingUtilClass.print_high_low(high, low)
if low < high:
p = self.partition(values, low, high)
self.level += 1
# print 'Calling QS'...
if self.print:
SortingUtilClass.print_qs_fn(low, high, p)
self.quick_sort(values, low, p - 1)
self.quick_sort(values, p + 1, high)
return self
# Heap sort methods (Extra)
def heap_sort(self, values, count):
self.heapify(values, count)
end = count - 1
while end > 0:
values[end], values[0] = values[0], values[end]
self.swaps += 1
end -= 1
self.sift_down(values, 0, end)
return self
def heapify(self, values, count):
start = int((count - 2) / 2)
while start >= 0:
self.sift_down(values, start, count - 1)
start -= 1
def sift_down(self, values, start, end):
root = start
while root * 2 + 1 <= end:
child = root * 2 + 1
if child + 1 <= end and values[child] < values[child + 1]:
child += 1
self.comparisons += 1
if values[root] < values[child]:
values[root], values[child] = values[child], values[root]
self.swaps += 1
root = child
self.comparisons += 1
else:
return
def all(self, orig):
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Bubble Sort')
self.bubble_sort(values).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Insertion Sort')
self.insertion_sort(values).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Selection Sort')
self.selection_sort(values).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Quick Sort')
self.quick_sort(values, 0, len(values) - 1).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Heap Sort')
self.heap_sort(values, len(values)).finish()
|
mit
| 2,211,133,984,667,170,600
| 26.063063
| 114
| 0.642255
| false
| 2.809227
| false
| false
| false
|
dgouldin/myspaceid-python-sdk
|
src/openid/fetchers.py
|
1
|
14001
|
# -*- test-case-name: openid.test.test_fetchers -*-
"""
This module contains the HTTP fetcher interface and several implementations.
"""
__all__ = ['fetch', 'getDefaultFetcher', 'setDefaultFetcher', 'HTTPResponse',
'HTTPFetcher', 'createHTTPFetcher', 'HTTPFetchingError',
'HTTPError']
import urllib2
import time
import cStringIO
import sys
import openid
import openid.urinorm
# Try to import httplib2 for caching support
# http://bitworking.org/projects/httplib2/
try:
import httplib2
except ImportError:
# httplib2 not available
httplib2 = None
# try to import pycurl, which will let us use CurlHTTPFetcher
try:
import pycurl
except ImportError:
pycurl = None
USER_AGENT = "python-openid/%s (%s)" % (openid.__version__, sys.platform)
MAX_RESPONSE_KB = 1024
def fetch(url, body=None, headers=None):
"""Invoke the fetch method on the default fetcher. Most users
should need only this method.
@raises Exception: any exceptions that may be raised by the default fetcher
"""
fetcher = getDefaultFetcher()
return fetcher.fetch(url, body, headers)
def createHTTPFetcher():
"""Create a default HTTP fetcher instance
prefers Curl to urllib2."""
if pycurl is None:
fetcher = Urllib2Fetcher()
else:
fetcher = CurlHTTPFetcher()
return fetcher
# Contains the currently set HTTP fetcher. If it is set to None, the
# library will call createHTTPFetcher() to set it. Do not access this
# variable outside of this module.
_default_fetcher = None
def getDefaultFetcher():
"""Return the default fetcher instance
if no fetcher has been set, it will create a default fetcher.
@return: the default fetcher
@rtype: HTTPFetcher
"""
global _default_fetcher
if _default_fetcher is None:
setDefaultFetcher(createHTTPFetcher())
return _default_fetcher
def setDefaultFetcher(fetcher, wrap_exceptions=True):
"""Set the default fetcher
@param fetcher: The fetcher to use as the default HTTP fetcher
@type fetcher: HTTPFetcher
@param wrap_exceptions: Whether to wrap exceptions thrown by the
fetcher wil HTTPFetchingError so that they may be caught
easier. By default, exceptions will be wrapped. In general,
unwrapped fetchers are useful for debugging of fetching errors
or if your fetcher raises well-known exceptions that you would
like to catch.
@type wrap_exceptions: bool
"""
global _default_fetcher
if fetcher is None or not wrap_exceptions:
_default_fetcher = fetcher
else:
_default_fetcher = ExceptionWrappingFetcher(fetcher)
def usingCurl():
"""Whether the currently set HTTP fetcher is a Curl HTTP fetcher."""
return isinstance(getDefaultFetcher(), CurlHTTPFetcher)
class HTTPResponse(object):
"""XXX document attributes"""
headers = None
status = None
body = None
final_url = None
def __init__(self, final_url=None, status=None, headers=None, body=None):
self.final_url = final_url
self.status = status
self.headers = headers
self.body = body
def __repr__(self):
return "<%s status %s for %s>" % (self.__class__.__name__,
self.status,
self.final_url)
class HTTPFetcher(object):
"""
This class is the interface for openid HTTP fetchers. This
interface is only important if you need to write a new fetcher for
some reason.
"""
def fetch(self, url, body=None, headers=None):
"""
This performs an HTTP POST or GET, following redirects along
the way. If a body is specified, then the request will be a
POST. Otherwise, it will be a GET.
@param headers: HTTP headers to include with the request
@type headers: {str:str}
@return: An object representing the server's HTTP response. If
there are network or protocol errors, an exception will be
raised. HTTP error responses, like 404 or 500, do not
cause exceptions.
@rtype: L{HTTPResponse}
@raise Exception: Different implementations will raise
different errors based on the underlying HTTP library.
"""
raise NotImplementedError
def _allowedURL(url):
return url.startswith('http://') or url.startswith('https://')
class HTTPFetchingError(Exception):
"""Exception that is wrapped around all exceptions that are raised
by the underlying fetcher when using the ExceptionWrappingFetcher
@ivar why: The exception that caused this exception
"""
def __init__(self, why=None):
Exception.__init__(self, why)
self.why = why
class ExceptionWrappingFetcher(HTTPFetcher):
"""Fetcher that wraps another fetcher, causing all exceptions
@cvar uncaught_exceptions: Exceptions that should be exposed to the
user if they are raised by the fetch call
"""
uncaught_exceptions = (SystemExit, KeyboardInterrupt, MemoryError)
def __init__(self, fetcher):
self.fetcher = fetcher
def fetch(self, *args, **kwargs):
try:
return self.fetcher.fetch(*args, **kwargs)
except self.uncaught_exceptions:
raise
except:
exc_cls, exc_inst = sys.exc_info()[:2]
if exc_inst is None:
# string exceptions
exc_inst = exc_cls
raise HTTPFetchingError(why=exc_inst)
class Urllib2Fetcher(HTTPFetcher):
"""An C{L{HTTPFetcher}} that uses urllib2.
"""
# Parameterized for the benefit of testing frameworks, see
# http://trac.openidenabled.com/trac/ticket/85
urlopen = staticmethod(urllib2.urlopen)
def fetch(self, url, body=None, headers=None):
if not _allowedURL(url):
raise ValueError('Bad URL scheme: %r' % (url,))
if headers is None:
headers = {}
headers.setdefault(
'User-Agent',
"%s Python-urllib/%s" % (USER_AGENT, urllib2.__version__,))
headers.setdefault(
'Range',
'0-%s' % (1024*MAX_RESPONSE_KB,))
req = urllib2.Request(url, data=body, headers=headers)
try:
f = self.urlopen(req)
try:
return self._makeResponse(f)
finally:
f.close()
except urllib2.HTTPError, why:
try:
return self._makeResponse(why)
finally:
why.close()
def _makeResponse(self, urllib2_response):
resp = HTTPResponse()
resp.body = urllib2_response.read(MAX_RESPONSE_KB * 1024)
resp.final_url = urllib2_response.geturl()
resp.headers = dict(urllib2_response.info().items())
if hasattr(urllib2_response, 'code'):
resp.status = urllib2_response.code
else:
resp.status = 200
return resp
class HTTPError(HTTPFetchingError):
"""
This exception is raised by the C{L{CurlHTTPFetcher}} when it
encounters an exceptional situation fetching a URL.
"""
pass
# XXX: define what we mean by paranoid, and make sure it is.
class CurlHTTPFetcher(HTTPFetcher):
"""
An C{L{HTTPFetcher}} that uses pycurl for fetching.
See U{http://pycurl.sourceforge.net/}.
"""
ALLOWED_TIME = 20 # seconds
def __init__(self):
HTTPFetcher.__init__(self)
if pycurl is None:
raise RuntimeError('Cannot find pycurl library')
def _parseHeaders(self, header_file):
header_file.seek(0)
# Remove the status line from the beginning of the input
unused_http_status_line = header_file.readline()
lines = [line.strip() for line in header_file]
# and the blank line from the end
empty_line = lines.pop()
if empty_line:
raise HTTPError("No blank line at end of headers: %r" % (line,))
headers = {}
for line in lines:
try:
name, value = line.split(':', 1)
except ValueError:
raise HTTPError(
"Malformed HTTP header line in response: %r" % (line,))
value = value.strip()
# HTTP headers are case-insensitive
name = name.lower()
headers[name] = value
return headers
def _checkURL(self, url):
# XXX: document that this can be overridden to match desired policy
# XXX: make sure url is well-formed and routeable
return _allowedURL(url)
def fetch(self, url, body=None, headers=None):
stop = int(time.time()) + self.ALLOWED_TIME
off = self.ALLOWED_TIME
if headers is None:
headers = {}
headers.setdefault('User-Agent',
"%s %s" % (USER_AGENT, pycurl.version,))
header_list = []
if headers is not None:
for header_name, header_value in headers.iteritems():
header_list.append('%s: %s' % (header_name, header_value))
c = pycurl.Curl()
try:
c.setopt(pycurl.NOSIGNAL, 1)
if header_list:
c.setopt(pycurl.HTTPHEADER, header_list)
# Presence of a body indicates that we should do a POST
if body is not None:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, body)
while off > 0:
if not self._checkURL(url):
raise HTTPError("Fetching URL not allowed: %r" % (url,))
data = cStringIO.StringIO()
def write_data(chunk):
if data.tell() > 1024*MAX_RESPONSE_KB:
return 0
else:
return data.write(chunk)
response_header_data = cStringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, write_data)
c.setopt(pycurl.HEADERFUNCTION, response_header_data.write)
c.setopt(pycurl.TIMEOUT, off)
c.setopt(pycurl.URL, openid.urinorm.urinorm(url))
c.setopt(pycurl.RANGE, '0-%s'%(MAX_RESPONSE_KB*1024))
c.perform()
response_headers = self._parseHeaders(response_header_data)
code = c.getinfo(pycurl.RESPONSE_CODE)
if code in [301, 302, 303, 307]:
url = response_headers.get('location')
if url is None:
raise HTTPError(
'Redirect (%s) returned without a location' % code)
# Redirects are always GETs
c.setopt(pycurl.POST, 0)
# There is no way to reset POSTFIELDS to empty and
# reuse the connection, but we only use it once.
else:
resp = HTTPResponse()
resp.headers = response_headers
resp.status = code
resp.final_url = url
resp.body = data.getvalue()
return resp
off = stop - int(time.time())
raise HTTPError("Timed out fetching: %r" % (url,))
finally:
c.close()
class HTTPLib2Fetcher(HTTPFetcher):
"""A fetcher that uses C{httplib2} for performing HTTP
requests. This implementation supports HTTP caching.
@see: http://bitworking.org/projects/httplib2/
"""
def __init__(self, cache=None):
"""@param cache: An object suitable for use as an C{httplib2}
cache. If a string is passed, it is assumed to be a
directory name.
"""
if httplib2 is None:
raise RuntimeError('Cannot find httplib2 library. '
'See http://bitworking.org/projects/httplib2/')
super(HTTPLib2Fetcher, self).__init__()
# An instance of the httplib2 object that performs HTTP requests
self.httplib2 = httplib2.Http(cache)
# We want httplib2 to raise exceptions for errors, just like
# the other fetchers.
self.httplib2.force_exception_to_status_code = False
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by httplib2
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
method = 'POST'
else:
method = 'GET'
if headers is None:
headers = {}
headers.setdefault(
'Range',
'0-%s' % (1024*MAX_RESPONSE_KB,))
# httplib2 doesn't check to make sure that the URL's scheme is
# 'http' so we do it here.
if not (url.startswith('http://') or url.startswith('https://')):
raise ValueError('URL is not a HTTP URL: %r' % (url,))
httplib2_response, content = self.httplib2.request(
url, method, body=body, headers=headers)
# Translate the httplib2 response to our HTTP response abstraction
# When a 400 is returned, there is no "content-location"
# header set. This seems like a bug to me. I can't think of a
# case where we really care about the final URL when it is an
# error response, but being careful about it can't hurt.
try:
final_url = httplib2_response['content-location']
except KeyError:
# We're assuming that no redirects occurred
assert not httplib2_response.previous
# And this should never happen for a successful response
assert httplib2_response.status != 200
final_url = url
return HTTPResponse(
body=content,
final_url=final_url,
headers=dict(httplib2_response.items()),
status=httplib2_response.status,
)
|
apache-2.0
| -4,169,098,957,301,690,000
| 31.560465
| 79
| 0.592029
| false
| 4.300061
| false
| false
| false
|
g-weatherill/oq-risklib
|
openquake/commonlib/tests/_utils.py
|
1
|
1745
|
# Copyright (c) 2010-2014, GEM Foundation.
#
# NRML is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NRML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with NRML. If not, see <http://www.gnu.org/licenses/>.
from xml.etree.ElementTree import parse
from openquake.baselib.general import writetmp
from openquake.commonlib.writers import tostring
from openquake.commonlib.nrml import PARSE_NS_MAP
def get_path(fname_or_fileobject):
if isinstance(fname_or_fileobject, str):
return fname_or_fileobject
elif hasattr(fname_or_fileobject, 'getvalue'):
return writetmp(fname_or_fileobject.getvalue())
elif hasattr(fname_or_fileobject, 'name'):
return fname_or_fileobject.name
else:
return TypeError(fname_or_fileobject)
def assert_xml_equal(a, b):
"""
Compare two XML artifacts for equality.
:param a, b:
Paths to XML files, or a file-like object containing the XML
contents.
"""
path_a = get_path(a)
path_b = get_path(b)
content_a = tostring(parse(a).getroot(), nsmap=PARSE_NS_MAP)
content_b = tostring(parse(b).getroot(), nsmap=PARSE_NS_MAP)
if content_a != content_b:
raise AssertionError('The files %s and %s are different!' %
(path_a, path_b))
|
agpl-3.0
| -301,822,622,540,493,440
| 35.354167
| 74
| 0.699713
| false
| 3.635417
| false
| false
| false
|
resolutedreamer/IR-Transmit-Receive
|
scripts/etc/init.d/notify_ip.py
|
1
|
6098
|
import smtplib
import subprocess
import sys
import time
import threading
import datetime
from email.mime.text import MIMEText
# Please Fill in with Correct Information to use
SMTP_SERVER = "smtp.gmail.com:587"
SMTP_UNAME = "email@email.com"
SMTP_PASSWD = "incorrect_password"
DEFAULT_NOTIFY_PAUSE = 3600
DEFAULT_CHECK_INTERVAL = 60
CELL_SERVICE_DOMAIN = {
"att": "txt.att.net",
"verizon": "vtext.com",
"tmobile": "tmomail.net",
"sprint": "messaging.sprintpcs.com",
"virgin": "vmobl.com",
"uscellular": "email.uscc.net",
"nextel": "messaging.nextel.com",
"boost": "myboostmobile.com",
"alltel": "message.alltel.com"
}
class Notification(object):
"""The base class for all notification objects.
Notification subclasses must implement the notify() method, which
actually sends the notification.
"""
def __init__(self, condition, arg, notify_pause=DEFAULT_NOTIFY_PAUSE):
"""
Args:
condition (function): A function that takes one argument (arg) and
and returns a boolean, which indicates whether the notification
should be sent.
arg (any) : arg is passed to condition function. It can be anything
the user wants.
notify_pause (int, optional): The number of seconds to wait after
sending a notification before sending a repeat notification.
"""
self.condition = condition
self.arg = arg
self.notify_pause = notify_pause
def try_notify(self):
"""Tries to send the notification if the condition is satisfied and
we haven't already sent a notification too recently.
"""
if self.last_notify_time == 0:
notify_ready_time = 0
else:
notify_ready_time = self.last_notify_time + self.notify_pause
if self.condition(self.arg) and notify_ready_time < time.time():
self.notify()
self.last_notify_time = time.time()
class EmailNotification(Notification):
"""Sends email notifications"""
def __init__(self, email, msg, condition, arg,
notify_pause=DEFAULT_NOTIFY_PAUSE):
"""
Args:
email (string): The email to send the notification to.
msg (string): The message to send in the email.
condition, arg, notify_pause: Same as for Notification.
"""
self.email = email
self.msg = msg
super(EmailNotification, self).__init__(condition, arg,
notify_pause)
self.last_notify_time = 0
def notify(self):
"""Sends the email notification"""
subject = "Energy Dashboard Notification"
from_addr = "enerdash@gmail.com"
if hasattr(self.msg, "__call__"):
mimetext = MIMEText(self.msg())
else:
mimetext = MIMEText(self.msg)
mimetext["Subject"] = subject
mimetext["From"] = from_addr
mimetext["To"] = self.email
server = smtplib.SMTP(SMTP_SERVER)
server.starttls()
server.login(SMTP_UNAME, SMTP_PASSWD)
server.sendmail(from_addr, [self.email], mimetext.as_string())
server.quit()
class TxtNotification(EmailNotification):
"""Sends text message notifications"""
def __init__(self, number, service, msg, condition, args,
notify_pause=DEFAULT_NOTIFY_PAUSE):
"""
Args:
number (int or string): The phone number to receive the text
message.
service (string): Must be one of the keys of CELL_SERVICE_DOMAIN.
msg (string): The content of the text message.
condition, args, notify_pause: Same as for Notification.
"""
email = str(number) + "@" + CELL_SERVICE_DOMAIN[service]
super(TxtNotification, self).__init__(email, msg, condition, args,
notify_pause)
class NotificationManager(threading.Thread):
"""Thread that will continue to try to send notifications if the
notification conditions are satisfied.
"""
def __init__(self, check_interval=DEFAULT_CHECK_INTERVAL):
"""
Args:
check_interval (int, optional): The number of seconds to wait in
between checking the conditions of the notifications.
"""
super(NotificationManager, self).__init__()
self.notifications = []
self.active = False
self.check_interval = check_interval
def add_notification(self, notification):
"""Adds a notification to monitor.
Args:
notification (subclass of Notification): The notification to
monitor.
"""
self.notifications.append(notification)
def start_notifications(self):
"""Start the notification thread."""
self.active = True
self.start()
def stop_notifications(self):
"""Stop the notification thread."""
self.active = False
def run(self):
"""Runs inside the thread. You don't need to call this."""
while self.active:
self._send_notifications()
time.sleep(self.check_interval)
def _send_notifications(self):
for notif in self.notifications:
notif.try_notify()
if __name__ == "__main__":
time.sleep(10)
nm = NotificationManager()
output = subprocess.check_output("ifconfig", shell=True)
addr_index = output.rfind('inet addr:')
ip_start_index = (addr_index + 10)
ip_max_index = (ip_start_index + 16)
ip = output[ip_start_index:ip_max_index]
ip_true_end_index = ip.find(' ')
ip = ip[:ip_true_end_index]
msg = "Time is: " + str(datetime.datetime.now()) + '\n' + 'ip is ' + ip
email_notification = EmailNotification("an_email@email.com", msg, lambda x: True, None, notify_pause=60)
txtnot = TxtNotification("8001234567", "att", msg,
lambda x: True, None, notify_pause=60)
nm.add_notification(email_notification)
nm.add_notification(txtnot)
nm.start_notifications()
time.sleep(5)
nm.stop_notifications()
sys.exit()
|
apache-2.0
| 7,988,192,192,993,335,000
| 33.067039
| 108
| 0.61594
| false
| 4.111935
| false
| false
| false
|
shreesundara/netsnmp
|
pysmi/compiler.py
|
1
|
21084
|
import sys
import os
import time
try:
from pwd import getpwuid
except ImportError:
getpwuid = lambda x: ['<unknown>']
from pysmi import __name__ as packageName
from pysmi import __version__ as packageVersion
from pysmi.mibinfo import MibInfo
from pysmi.codegen.symtable import SymtableCodeGen
from pysmi import error
from pysmi import debug
class MibStatus(str):
"""Indicate MIB transformation result.
*MibStatus* is a subclass of Python string type. Some additional
attributes may be set to indicate the details.
The following *MibStatus* class instances are defined:
* *compiled* - MIB is successfully transformed
* *untouched* - fresh transformed version of this MIB already exisits
* *failed* - MIB transformation failed. *error* attribute carries details.
* *unprocessed* - MIB transformation required but waived for some reason
* *missing* - ASN.1 MIB source can't be found
* *borrowed* - MIB transformation failed but pre-transformed version was used
"""
def setOptions(self, **kwargs):
n = self.__class__(self)
for k in kwargs:
setattr(n, k, kwargs[k])
return n
statusCompiled = MibStatus('compiled')
statusUntouched = MibStatus('untouched')
statusFailed = MibStatus('failed')
statusUnprocessed = MibStatus('unprocessed')
statusMissing = MibStatus('missing')
statusBorrowed = MibStatus('borrowed')
class MibCompiler(object):
"""Top-level, user-facing, composite MIB compiler object.
MibCompiler implements high-level MIB transformation processing logic.
It executes its actions by calling the following specialized objects:
* *readers* - to acquire ASN.1 MIB data
* *searchers* - to see if transformed MIB already exists and no processing is necessary
* *parser* - to parse ASN.1 MIB into AST
* *code generator* - to perform actual MIB transformation
* *borrowers* - to fetch pre-transformed MIB if transformation is impossible
* *writer* - to store transformed MIB data
Required components must be passed to MibCompiler on instantiation. Those
components are: *parser*, *codegenerator* and *writer*.
Optional components could be set or modified at later phases of MibCompiler
life. Unlike singular, required components, optional one can be present
in sequences to address many possible sources of data. They are
*readers*, *searchers* and *borrowers*.
Examples: ::
from pysmi.reader.localfile import FileReader
from pysmi.searcher.pyfile import PyFileSearcher
from pysmi.searcher.pypackage import PyPackageSearcher
from pysmi.searcher.stub import StubSearcher
from pysmi.writer.pyfile import PyFileWriter
from pysmi.parser.smi import SmiV2Parser
from pysmi.codegen.pysnmp import PySnmpCodeGen, baseMibs
mibCompiler = MibCompiler(SmiV2Parser(),
PySnmpCodeGen(),
PyFileWriter('/tmp/pysnmp/mibs'))
mibCompiler.addSources(FileReader('/usr/share/snmp/mibs'))
mibCompiler.addSearchers(PyFileSearcher('/tmp/pysnmp/mibs'))
mibCompiler.addSearchers(PyPackageSearcher('pysnmp.mibs'))
mibCompiler.addSearchers(StubSearcher(*baseMibs))
results = mibCompiler.compile('IF-MIB', 'IP-MIB')
"""
indexFile = 'index'
def __init__(self, parser, codegen, writer):
"""Creates an instance of *MibCompiler* class.
Args:
parser: ASN.1 MIB parser object
codegen: MIB transformation object
writer: transformed MIB storing object
"""
self._parser = parser
self._codegen = codegen
self._symbolgen = SymtableCodeGen()
self._writer = writer
self._sources = []
self._searchers = []
self._borrowers = []
def addSources(self, *sources):
"""Add more ASN.1 MIB source repositories.
MibCompiler.compile will invoke each of configured source objects
in order of their addition asking each to fetch MIB module specified
by name.
Args:
sources: reader object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._sources.extend(sources)
debug.logger & debug.flagCompiler and debug.logger('current MIB source(s): %s' % ', '.join([str(x) for x in self._sources]))
return self
def addSearchers(self, *searchers):
"""Add more transformed MIBs repositories.
MibCompiler.compile will invoke each of configured searcher objects
in order of their addition asking each if already transformed MIB
module already exists and is more recent than specified.
Args:
searchers: searcher object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._searchers.extend(searchers)
debug.logger & debug.flagCompiler and debug.logger('current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))
return self
def addBorrowers(self, *borrowers):
"""Add more transformed MIBs repositories to borrow MIBs from.
Whenever MibCompiler.compile encounters MIB module which neither of
the *searchers* can find or fetched ASN.1 MIB module can not be
parsed (due to syntax errors), these *borrowers* objects will be
invoked in order of their addition asking each if already transformed
MIB can be fetched (borrowed).
Args:
borrowers: borrower object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._borrowers.extend(borrowers)
debug.logger & debug.flagCompiler and debug.logger('current MIB borrower(s): %s' % ', '.join([str(x) for x in self._borrowers]))
return self
def compile(self, *mibnames, **options):
"""Transform requested and possibly referred MIBs.
The *compile* method should be invoked when *MibCompiler* object
is operational meaning at least *sources* are specified.
Once called with a MIB module name, *compile* will:
* fetch ASN.1 MIB module with given name by calling *sources*
* make sure no such transformed MIB already exists (with *searchers*)
* parse ASN.1 MIB text with *parser*
* perform actual MIB transformation into target format with *code generator*
* may attempt to borrow pre-transformed MIB through *borrowers*
* write transformed MIB through *writer*
The above sequence will be performed for each MIB name given in
*mibnames* and may be performed for all MIBs referred to from
MIBs being processed.
Args:
mibnames: list of ASN.1 MIBs names
options: options that affect the way PySMI components work
Returns:
A dictionary of MIB module names processed (keys) and *MibStatus*
class instances (values)
"""
processed = {}
parsedMibs = {}; failedMibs = {}; borrowedMibs = {}; builtMibs = {}
symbolTableMap = {}
originalMib = mibnames[0]
mibsToParse = [x for x in mibnames]
while mibsToParse:
mibname = mibsToParse.pop(0)
if mibname in parsedMibs:
debug.logger & debug.flagCompiler and debug.logger('MIB %s already parsed' % mibname)
continue
if mibname in failedMibs:
debug.logger & debug.flagCompiler and debug.logger('MIB %s already failed' % mibname)
continue
for source in self._sources:
debug.logger & debug.flagCompiler and debug.logger('trying source %s' % source)
try:
fileInfo, fileData = source.getData(mibname)
for mibTree in self._parser.parse(fileData):
mibInfo, symbolTable = self._symbolgen.genCode(
mibTree, symbolTableMap
)
symbolTableMap[mibInfo.name] = symbolTable
parsedMibs[mibInfo.name] = fileInfo, mibInfo, mibTree
if mibname in failedMibs:
del failedMibs[mibname]
mibsToParse.extend(mibInfo.imported)
debug.logger & debug.flagCompiler and debug.logger('%s (%s) read from %s, immediate dependencies: %s' % (mibInfo.name, mibname, fileInfo.path, ', '.join(mibInfo.imported) or '<none>'))
break
except error.PySmiReaderFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger('no %s found at %s' % (mibname, source))
continue
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.source = source
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('%serror %s from %s' % (options.get('ignoreErrors') and 'ignoring ' or 'failing on ', exc, source))
failedMibs[mibname] = exc
processed[mibname] = statusFailed.setOptions(error=exc)
else:
exc = error.PySmiError('MIB source %s not found' % mibname)
exc.mibname = mibname
debug.logger & debug.flagCompiler and debug.logger('no %s found everywhare' % mibname)
if mibname not in failedMibs:
failedMibs[mibname] = exc
if mibname not in processed:
processed[mibname] = statusMissing
debug.logger & debug.flagCompiler and debug.logger('MIBs analized %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# See what MIBs need generating
#
for mibname in parsedMibs.copy():
fileInfo, mibInfo, mibTree = parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('checking if %s requires updating' % mibname)
for searcher in self._searchers:
try:
searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
except error.PySmiFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
continue
except error.PySmiFileNotModifiedError:
debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
del parsedMibs[mibname]
processed[mibname] = statusUntouched
break
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.searcher = searcher
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
continue
else:
debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from code generation' % mibname)
del parsedMibs[mibname]
processed[mibname] = statusUntouched
continue
debug.logger & debug.flagCompiler and debug.logger('MIBs parsed %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# Generate code for parsed MIBs
#
for mibname in [originalMib]:
fileInfo, mibInfo, mibTree = parsedMibs[mibname]
comments = [
'ASN.1 source %s' % fileInfo.path,
'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
'On host %s platform %s version %s by user %s' % (hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?', hasattr(os, 'uname') and os.uname()[2] or '?', hasattr(os, 'getuid') and getpwuid(os.getuid())[0] or '?'),
'Using Python version %s' % sys.version.split('\n')[0]
]
try:
mibInfo, mibData = self._codegen.genCode(
mibTree,
symbolTableMap,
comments=comments,
genTexts=options.get('genTexts'),
parsedMibs = parsedMibs
)
builtMibs[mibname] = fileInfo, mibInfo, mibData
del parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('%s read from %s and compiled by %s' % (mibname, fileInfo.path, self._writer))
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.handler = self._codegen
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (self._codegen, exc))
processed[mibname] = statusFailed.setOptions(error=exc)
failedMibs[mibname] = exc
del parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# Try to borrow pre-compiled MIBs for failed ones
#
for mibname in failedMibs.copy():
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
continue
for borrower in self._borrowers:
debug.logger & debug.flagCompiler and debug.logger('trying to borrow %s from %s' % (mibname, borrower))
try:
fileInfo, fileData = borrower.getData(
mibname,
genTexts=options.get('genTexts')
)
borrowedMibs[mibname] = fileInfo, MibInfo(name=mibname, imported=[]), fileData
del failedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('%s borrowed with %s' % (mibname, borrower))
break
except error.PySmiError:
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (borrower, exc))
debug.logger & debug.flagCompiler and debug.logger('MIBs available for borrowing %s, MIBs failed %s' % (len(borrowedMibs), len(failedMibs)))
#
# See what MIBs need borrowing
#
for mibname in borrowedMibs.copy():
debug.logger & debug.flagCompiler and debug.logger('checking if failed MIB %s requires borrowing' % mibname)
fileInfo, mibInfo, mibData = borrowedMibs[mibname]
for searcher in self._searchers:
try:
searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
except error.PySmiFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
continue
except error.PySmiFileNotModifiedError:
debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
del borrowedMibs[mibname]
processed[mibname] = statusUntouched
break
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.searcher = searcher
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
continue
else:
debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
processed[mibname] = statusUntouched
else:
debug.logger & debug.flagCompiler and debug.logger('will borrow MIB %s' % mibname)
builtMibs[mibname] = borrowedMibs[mibname]
processed[mibname] = statusBorrowed.setOptions(
path=fileInfo.path, file=fileInfo.file,
alias=fileInfo.name
)
del borrowedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(builtMibs), len(failedMibs)))
#
# We could attempt to ignore missing/failed MIBs
#
if failedMibs and not options.get('ignoreErrors'):
debug.logger & debug.flagCompiler and debug.logger('failing with problem MIBs %s' % ', '.join(failedMibs))
for mibname in builtMibs:
processed[mibname] = statusUnprocessed
return processed
debug.logger & debug.flagCompiler and debug.logger('proceeding with built MIBs %s, failed MIBs %s' % (', '.join(builtMibs), ', '.join(failedMibs)))
#
# Store compiled MIBs
#
for mibname in builtMibs.copy():
fileInfo, mibInfo, mibData = builtMibs[mibname]
try:
self._writer.putData(
mibname, mibData, dryRun=options.get('dryRun')
)
debug.logger & debug.flagCompiler and debug.logger('%s stored by %s' % (mibname, self._writer))
del builtMibs[mibname]
if mibname not in processed:
processed[mibname] = statusCompiled.setOptions(
path=fileInfo.path, file=fileInfo.file,
alias=fileInfo.name
)
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.handler = self._codegen
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error %s from %s' % (exc, self._writer))
processed[mibname] = statusFailed.setOptions(error=exc)
failedMibs[mibname] = exc
del builtMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('MIBs modifed: %s' % ', '.join([x for x in processed if processed[x] in ('compiled', 'borrowed')]))
return processed
def buildIndex(self, processedMibs, **options):
comments = [
'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
'On host %s platform %s version %s by user %s' % (hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?', hasattr(os, 'uname') and os.uname()[2] or '?', hasattr(os, 'getuid') and getpwuid(os.getuid())[0]) or '?',
'Using Python version %s' % sys.version.split('\n')[0]
]
try:
self._writer.putData(
self.indexFile,
self._codegen.genIndex(
dict([(x, x.oid) for x in processedMibs if hasattr(x, 'oid')]),
comments=comments
),
dryRun=options.get('dryRun')
)
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.msg += ' at MIB index %s' % self.indexFile
debug.logger & debug.flagCompiler and debug.logger('error %s when building %s' % (exc, self.indexFile))
if options.get('ignoreErrors'):
return
if hasattr(exc, 'with_traceback'):
raise exc.with_traceback(tb)
else:
raise exc
|
bsd-2-clause
| 2,209,945,687,160,253,700
| 42.955224
| 266
| 0.572282
| false
| 4.151211
| false
| false
| false
|
ST-Data-Mining/crater
|
george/klazzifiers.py
|
1
|
5539
|
from __future__ import division,print_function
from os import environ
import sys
HOME=environ['HOME']
PROJECT_ROOT=HOME+'/Panzer/NCSU/Spatial and Temporal/crater'
EXPTS = PROJECT_ROOT+'/expts'
sys.path.extend([PROJECT_ROOT,EXPTS])
sys.dont_write_bytecode = True
from george.lib import *
from expts.csvParser import parseCSV, randomPoints
import config
def normalize_points(points):
if not len(points): return
tot = sum([point.w for point in points])
for point in points:
point.w = point.w/tot
def best_weak_classifier(points, attrLen, ignores=None, start=0):
best_c = None
if not ignores: ignores = []
for i in range(0,attrLen):
if i in ignores:
continue
classifier = WeakClassifier(points, i)
if (not best_c) or (classifier.trainError(start) < best_c.trainError(start)):
best_c = classifier
return best_c
def booster(fname, mu=0.475, T=150):
def updateWeights(classifier, b):
for p in points:
predicted = classifier.predict(p.x)
actual = int(p.y)
e = 0 if predicted == actual else 1
p.w *= b**(1-e)
points = parseCSV(fname)
strong = StrongClassifier(mu, T)
ignores = []
for t in range(0,T):
say(t+1, ' ')
normalize_points(points)
weak_classifier = best_weak_classifier(points, len(points[0].x), ignores)
ignores.append(weak_classifier.index)
error = weak_classifier.trainError()
beta = error/(1-error)
if beta == 0:
strong.T = t
break
updateWeights(weak_classifier, beta)
alpha = math.log(1/beta)
strong.update(weak_classifier,alpha)
print('')
return strong
def _booster(fname, T=150):
print('***BOOSTER CLASSIFIER***')
boost_classifier = booster(fname, T=T)
#print(boost_classifier)
for region,test_files in [('west',['1_24.csv','1_25.csv']), ('center',['2_24.csv','2_25.csv']),
('east',['3_24.csv','3_25.csv']), ('all',['all.csv']) ]:
points = parseCSV(config.FEATURES_FOLDER+test_files[0], False)
if len(test_files) > 1:
points += parseCSV(config.FEATURES_FOLDER+test_files[1], False)
stat = ABCD()
for point in points:
pred = boost_classifier.predict(point.x)
act = int(point.y)
stat.update(pred, act)
print('\n'+region)
print(stat)
def greedy(fname, mu=0.325, T=150):
points = parseCSV(fname)
strong = StrongClassifier(mu, T)
ignores = []
normalize_points(points)
for t in range(0, T):
say(t+1,' ')
weak_classifier = best_weak_classifier(points, len(points[0].x), ignores)
ignores.append(weak_classifier.index)
error = weak_classifier.trainError()
beta = error/(1-error)
if beta == 0:
strong.T = t
break
alpha = math.log(1/beta)
strong.update(weak_classifier,alpha)
print('')
return strong
def _greedy(fname, T=150):
print('***GREEDY CLASSIFIER***')
greedy_classifier = greedy(fname, T=T)
#print(greedy_classifier)
for region,test_files in [('west',['1_24.csv','1_25.csv']), ('center',['2_24.csv','2_25.csv']),
('east',['3_24.csv','3_25.csv']), ('all',['all.csv']) ]:
points = parseCSV(config.FEATURES_FOLDER+test_files[0], False)
if len(test_files) > 1:
points += parseCSV(config.FEATURES_FOLDER+test_files[1], False)
stat = ABCD()
for point in points:
pred = greedy_classifier.predict(point.x)
act = int(point.y)
stat.update(pred, act)
print('\n'+region)
print(stat)
def transfer(fname, sameFiles, mu=0.5, T=150):
def craterCount(points):
count=0
for point in points:
if point.y==0: count+=1
return count
def updateWeights(classifier, b, b_t):
for i, point in enumerate(total):
predicted = classifier.predict(point.x)
actual = int(point.y)
e = 0 if predicted == actual else 1
if i<len(diff):
point.w *= b**e
else:
point.w *= b_t**-e
diff = parseCSV(fname, False)
same = randomPoints(sameFiles, craters=102, non_craters=153)
total = diff+same
craters = craterCount(total)
non_craters = len(total) - craters
[p.updateWeight(non_craters, craters) for p in total]
strong = StrongClassifier(mu, T)
ignores=[]
for t in range(0,T):
say(t+1,' ')
normalize_points(total)
weak_classifier = best_weak_classifier(total, len(total[0].x), ignores, len(diff))
ignores.append(weak_classifier.index)
error = weak_classifier.trainError(start=len(diff))
if error == 0:
strong.T = t
break
beta_t = error/(1-error)
beta = 1/(1+(2*math.log(len(total)/T))**0.5)
updateWeights(weak_classifier, beta, beta_t)
alpha = math.log(1/beta_t)
strong.update(weak_classifier, alpha)
print('')
return strong
def _transfer(fname, T=150):
print('***TRANSFER CLASSIFIER***')
#print(tl_classifier)
for region,test_files in [('west',['1_24.csv','1_25.csv']), ('center',['2_24.csv','2_25.csv']),
('east',['3_24.csv','3_25.csv']), ('all',['all.csv']) ]:
tl_classifier = transfer(fname, test_files, T=T)
points = parseCSV(config.FEATURES_FOLDER+test_files[0], True)
if len(test_files) > 1:
points += parseCSV(config.FEATURES_FOLDER+test_files[1], True)
stat = ABCD()
for point in points:
pred = tl_classifier.predict(point.x)
act = int(point.y)
stat.update(pred, act)
print('\n'+region)
print(stat)
def _runner(T=150):
train = config.TRAIN_FILE
_booster(train,T)
_greedy(train,T)
_transfer(train,T)
if __name__=="__main__":
_runner(150)
|
mit
| -4,664,917,264,122,412,000
| 28.462766
| 97
| 0.625925
| false
| 3.111798
| true
| false
| false
|
YoungKwonJo/mlxtend
|
mlxtend/data/iris.py
|
1
|
6046
|
# Sebastian Raschka 2015
# mlxtend Machine Learning Library Extensions
import numpy as np
def iris_data():
"""Iris flower dataset.
Returns
--------
X, y : [n_samples, n_features], [n_class_labels]
X is the feature matrix with 150 flower samples as rows,
and the 3 feature columns sepal length, sepal width,
petal length, and petal width.
y is a 1-dimensional array of the class labels where
0 = setosa, 1 = versicolor, 2 = virginica.
Reference: https://archive.ics.uci.edu/ml/datasets/Iris
"""
X = np.array([[ 5.1, 3.5, 1.4, 0.2],
[ 4.9, 3. , 1.4, 0.2],
[ 4.7, 3.2, 1.3, 0.2],
[ 4.6, 3.1, 1.5, 0.2],
[ 5. , 3.6, 1.4, 0.2],
[ 5.4, 3.9, 1.7, 0.4],
[ 4.6, 3.4, 1.4, 0.3],
[ 5. , 3.4, 1.5, 0.2],
[ 4.4, 2.9, 1.4, 0.2],
[ 4.9, 3.1, 1.5, 0.1],
[ 5.4, 3.7, 1.5, 0.2],
[ 4.8, 3.4, 1.6, 0.2],
[ 4.8, 3. , 1.4, 0.1],
[ 4.3, 3. , 1.1, 0.1],
[ 5.8, 4. , 1.2, 0.2],
[ 5.7, 4.4, 1.5, 0.4],
[ 5.4, 3.9, 1.3, 0.4],
[ 5.1, 3.5, 1.4, 0.3],
[ 5.7, 3.8, 1.7, 0.3],
[ 5.1, 3.8, 1.5, 0.3],
[ 5.4, 3.4, 1.7, 0.2],
[ 5.1, 3.7, 1.5, 0.4],
[ 4.6, 3.6, 1. , 0.2],
[ 5.1, 3.3, 1.7, 0.5],
[ 4.8, 3.4, 1.9, 0.2],
[ 5. , 3. , 1.6, 0.2],
[ 5. , 3.4, 1.6, 0.4],
[ 5.2, 3.5, 1.5, 0.2],
[ 5.2, 3.4, 1.4, 0.2],
[ 4.7, 3.2, 1.6, 0.2],
[ 4.8, 3.1, 1.6, 0.2],
[ 5.4, 3.4, 1.5, 0.4],
[ 5.2, 4.1, 1.5, 0.1],
[ 5.5, 4.2, 1.4, 0.2],
[ 4.9, 3.1, 1.5, 0.1],
[ 5. , 3.2, 1.2, 0.2],
[ 5.5, 3.5, 1.3, 0.2],
[ 4.9, 3.1, 1.5, 0.1],
[ 4.4, 3. , 1.3, 0.2],
[ 5.1, 3.4, 1.5, 0.2],
[ 5. , 3.5, 1.3, 0.3],
[ 4.5, 2.3, 1.3, 0.3],
[ 4.4, 3.2, 1.3, 0.2],
[ 5. , 3.5, 1.6, 0.6],
[ 5.1, 3.8, 1.9, 0.4],
[ 4.8, 3. , 1.4, 0.3],
[ 5.1, 3.8, 1.6, 0.2],
[ 4.6, 3.2, 1.4, 0.2],
[ 5.3, 3.7, 1.5, 0.2],
[ 5. , 3.3, 1.4, 0.2],
[ 7. , 3.2, 4.7, 1.4],
[ 6.4, 3.2, 4.5, 1.5],
[ 6.9, 3.1, 4.9, 1.5],
[ 5.5, 2.3, 4. , 1.3],
[ 6.5, 2.8, 4.6, 1.5],
[ 5.7, 2.8, 4.5, 1.3],
[ 6.3, 3.3, 4.7, 1.6],
[ 4.9, 2.4, 3.3, 1. ],
[ 6.6, 2.9, 4.6, 1.3],
[ 5.2, 2.7, 3.9, 1.4],
[ 5. , 2. , 3.5, 1. ],
[ 5.9, 3. , 4.2, 1.5],
[ 6. , 2.2, 4. , 1. ],
[ 6.1, 2.9, 4.7, 1.4],
[ 5.6, 2.9, 3.6, 1.3],
[ 6.7, 3.1, 4.4, 1.4],
[ 5.6, 3. , 4.5, 1.5],
[ 5.8, 2.7, 4.1, 1. ],
[ 6.2, 2.2, 4.5, 1.5],
[ 5.6, 2.5, 3.9, 1.1],
[ 5.9, 3.2, 4.8, 1.8],
[ 6.1, 2.8, 4. , 1.3],
[ 6.3, 2.5, 4.9, 1.5],
[ 6.1, 2.8, 4.7, 1.2],
[ 6.4, 2.9, 4.3, 1.3],
[ 6.6, 3. , 4.4, 1.4],
[ 6.8, 2.8, 4.8, 1.4],
[ 6.7, 3. , 5. , 1.7],
[ 6. , 2.9, 4.5, 1.5],
[ 5.7, 2.6, 3.5, 1. ],
[ 5.5, 2.4, 3.8, 1.1],
[ 5.5, 2.4, 3.7, 1. ],
[ 5.8, 2.7, 3.9, 1.2],
[ 6. , 2.7, 5.1, 1.6],
[ 5.4, 3. , 4.5, 1.5],
[ 6. , 3.4, 4.5, 1.6],
[ 6.7, 3.1, 4.7, 1.5],
[ 6.3, 2.3, 4.4, 1.3],
[ 5.6, 3. , 4.1, 1.3],
[ 5.5, 2.5, 4. , 1.3],
[ 5.5, 2.6, 4.4, 1.2],
[ 6.1, 3. , 4.6, 1.4],
[ 5.8, 2.6, 4. , 1.2],
[ 5. , 2.3, 3.3, 1. ],
[ 5.6, 2.7, 4.2, 1.3],
[ 5.7, 3. , 4.2, 1.2],
[ 5.7, 2.9, 4.2, 1.3],
[ 6.2, 2.9, 4.3, 1.3],
[ 5.1, 2.5, 3. , 1.1],
[ 5.7, 2.8, 4.1, 1.3],
[ 6.3, 3.3, 6. , 2.5],
[ 5.8, 2.7, 5.1, 1.9],
[ 7.1, 3. , 5.9, 2.1],
[ 6.3, 2.9, 5.6, 1.8],
[ 6.5, 3. , 5.8, 2.2],
[ 7.6, 3. , 6.6, 2.1],
[ 4.9, 2.5, 4.5, 1.7],
[ 7.3, 2.9, 6.3, 1.8],
[ 6.7, 2.5, 5.8, 1.8],
[ 7.2, 3.6, 6.1, 2.5],
[ 6.5, 3.2, 5.1, 2. ],
[ 6.4, 2.7, 5.3, 1.9],
[ 6.8, 3. , 5.5, 2.1],
[ 5.7, 2.5, 5. , 2. ],
[ 5.8, 2.8, 5.1, 2.4],
[ 6.4, 3.2, 5.3, 2.3],
[ 6.5, 3. , 5.5, 1.8],
[ 7.7, 3.8, 6.7, 2.2],
[ 7.7, 2.6, 6.9, 2.3],
[ 6. , 2.2, 5. , 1.5],
[ 6.9, 3.2, 5.7, 2.3],
[ 5.6, 2.8, 4.9, 2. ],
[ 7.7, 2.8, 6.7, 2. ],
[ 6.3, 2.7, 4.9, 1.8],
[ 6.7, 3.3, 5.7, 2.1],
[ 7.2, 3.2, 6. , 1.8],
[ 6.2, 2.8, 4.8, 1.8],
[ 6.1, 3. , 4.9, 1.8],
[ 6.4, 2.8, 5.6, 2.1],
[ 7.2, 3. , 5.8, 1.6],
[ 7.4, 2.8, 6.1, 1.9],
[ 7.9, 3.8, 6.4, 2. ],
[ 6.4, 2.8, 5.6, 2.2],
[ 6.3, 2.8, 5.1, 1.5],
[ 6.1, 2.6, 5.6, 1.4],
[ 7.7, 3. , 6.1, 2.3],
[ 6.3, 3.4, 5.6, 2.4],
[ 6.4, 3.1, 5.5, 1.8],
[ 6. , 3. , 4.8, 1.8],
[ 6.9, 3.1, 5.4, 2.1],
[ 6.7, 3.1, 5.6, 2.4],
[ 6.9, 3.1, 5.1, 2.3],
[ 5.8, 2.7, 5.1, 1.9],
[ 6.8, 3.2, 5.9, 2.3],
[ 6.7, 3.3, 5.7, 2.5],
[ 6.7, 3. , 5.2, 2.3],
[ 6.3, 2.5, 5. , 1.9],
[ 6.5, 3. , 5.2, 2. ],
[ 6.2, 3.4, 5.4, 2.3],
[ 5.9, 3. , 5.1, 1.8]])
y = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
return X, y
|
bsd-3-clause
| -8,277,334,933,845,212,000
| 32.588889
| 86
| 0.275885
| false
| 1.697361
| false
| false
| false
|
isb-cgc/ISB-CGC-data-proc
|
bigquery_etl/extract/utils.py
|
1
|
2377
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Copyright 2015, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract Utils
"""
import json
import pandas as pd
import logging
log = logging.getLogger(__name__)
def convert_file_to_dataframe(filepath_or_buffer, sep="\t", skiprows=0, rollover=False, nrows=None, header = 'infer'):
"""does some required data cleaning and
then converts into a dataframe
"""
log.info("Converting file to a dataframe")
try:
# items to change to NaN/NULL
# when you change something here, remember to change in clean_up_dataframe too.
na_values = ['none', 'None', 'NONE', 'null', 'Null', 'NULL', ' ', 'NA', '__UNKNOWN__', '?']
# read the table/file
data_df = pd.read_table(filepath_or_buffer, sep=sep, skiprows=skiprows, lineterminator='\n',
comment='#', na_values=na_values, dtype='object', nrows=nrows, header = header,
encoding='utf-8')
except Exception as exp:
log.exception('problem converting to dataframe: %s' % (exp.message))
raise
filepath_or_buffer.close() # close StringIO
return data_df
#----------------------------------------
# Convert newline-delimited JSON string to dataframe
# -- should work for a small to medium files
# we are not loading into string, but into a temp file
# works only in a single bucket
#----------------------------------------
def convert_njson_file_to_df(filebuffer):
"""Converting new-line delimited JSON file into dataframe"""
log.info("Converting new-line delimited JSON file into dataframe")
# convert the file into a dataframe
lines = [json.loads(l) for l in filebuffer.splitlines()]
data_df = pd.DataFrame(lines)
# delete the temp file
filebuffer.close()
return data_df
|
apache-2.0
| 8,472,730,164,494,883,000
| 33.449275
| 118
| 0.6504
| false
| 4.021997
| false
| false
| false
|
sargm/selenium-py-traning-barancev
|
php4dvd/model/application.py
|
1
|
4501
|
from selenium.common.exceptions import *
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from php4dvd.pages.page import Page
from php4dvd.pages.login_page import LoginPage
from php4dvd.pages.internal_page import InternalPage
from php4dvd.pages.user_management_page import UserManagementPage
from php4dvd.pages.user_profile_page import UserProfilePage
from php4dvd.pages.add_movie_page import AddMoviePage
from php4dvd.pages.view_movie_page import ViewMoviePage
from model.user import User
from model.movie import Movie
class Application(object):
def __init__(self, driver, base_url):
self.driver = driver
driver.get(base_url)
self.wait = WebDriverWait(driver, 10)
self.login_page = LoginPage(driver, base_url)
self.internal_page = InternalPage(driver, base_url)
self.user_management_page = UserManagementPage(driver, base_url)
self.user_profile_page = UserProfilePage(driver, base_url)
self.add_movie_page = AddMoviePage(driver, base_url)
self.view_movie_page = ViewMoviePage(driver, base_url)
def login(self, user):
lp = self.login_page
lp.is_this_page
#lp.username_field.clear()
lp.username_field.send_keys(user.username)
#lp.password_field.clear()
#print("sdadsadasd", user.password)
lp.password_field.send_keys(user.password)
lp.submit_button.click()
def logout(self):
self.internal_page.logout_button.click()
self.wait.until(EC.alert_is_present()).accept()
def is_logged_in(self):
return self.internal_page.is_this_page
def is_logged_in_as(self,user):
return self.is_logged_in() \
and self.get_logged_user().username == user.username
def get_logged_user(self):
self.internal_page.user_profile_link.click()
upp = self.user_profile_page
upp.is_this_page
return User(username=upp.user_form.username_field.get_attribute("value"),
email=upp.user_form.email_field.get_attribute("value"))
def is_not_logged_in(self):
return self.login_page.is_this_page
def add_user(self,user):
self.internal_page.user_management_link.click()
ump = self.user_management_page
ump.is_this_page
ump.user_form.username_field.send_keys(user.username)
ump.user_form.email_field.send_keys(user.email)
ump.user_form.password_field.send_keys(user.password)
ump.user_form.password1_field.send_keys(user.password)
#ump.user_form.role_select.select_by_visible_text(user.role)
ump.user_form.submit_button.click()
def ensure_logout(self):
element = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "nav, #loginform")))
if element.tag_name == "nav":
self.logout()
def ensure_login_as(self,user):
element = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "nav, #loginform")))
if element.tag_name == "nav":
#we are on internal page
if self.is_logged_in_as(user):
return
else:
self.logout()
self.login(user)
def add_movie(self, movie):
self.internal_page.add_movie_link.click()
amp = self.add_movie_page
amp.is_this_page
amp.movie_form.movietitle_field.send_keys(movie.title)
amp.movie_form.movieyear_field.send_keys(movie.year)
#amp.movie_form.movieformat_field.send_keys(movie.format)
amp.movie_form.submit_button.click()
def is_added_movie(self, movie):
return self.get_added_movie().title == movie.title + " (" + movie.year + ")"
def get_added_movie(self):
vmp = self.view_movie_page
vmp.is_this_page
return Movie(title=vmp.movietitle_field.text)
def delete_movie(self, movie):
vmp = self.view_movie_page
vmp.is_this_page
vmp.movie_delete_link.click()
try:
element = vmp.wait.until(EC.alert_is_present())
alert = vmp.driver.switch_to_alert()
alert.accept()
#alert_text = alert.text
#print("text", alert_text)
print("alert accepted")
return True
except TimeoutException:
print("no alert")
return False
|
apache-2.0
| -5,599,819,519,281,207,000
| 36.508333
| 103
| 0.647856
| false
| 3.519156
| false
| false
| false
|
pkilambi/ceilometer
|
ceilometer/storage/impl_mongodb.py
|
1
|
35357
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
# Copyright 2014 Red Hat, Inc
#
# Authors: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
# Eoghan Glynn <eglynn@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend"""
import calendar
import copy
import datetime
import json
import operator
import uuid
import bson.code
import bson.objectid
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import pymongo
import six
import ceilometer
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer.storage import pymongo_base
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'resources': {'query': {'simple': True,
'metadata': True}},
'statistics': {'groupby': True,
'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True,
'selectable': {'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}}
}
class Connection(pymongo_base.Connection):
"""Put the data into a MongoDB database
Collections::
- meter
- the raw incoming data
- resource
- the metadata for resources
- { _id: uuid of resource,
metadata: metadata dictionaries
user_id: uuid
project_id: uuid
meter: [ array of {counter_name: string, counter_type: string,
counter_unit: string} ]
}
"""
CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
CONNECTION_POOL = pymongo_utils.ConnectionPool()
STANDARD_AGGREGATES = dict(
emit_initial=dict(
sum='',
count='',
avg='',
min='',
max=''
),
emit_body=dict(
sum='sum: this.counter_volume,',
count='count: NumberInt(1),',
avg='acount: NumberInt(1), asum: this.counter_volume,',
min='min: this.counter_volume,',
max='max: this.counter_volume,'
),
reduce_initial=dict(
sum='',
count='',
avg='',
min='',
max=''
),
reduce_body=dict(
sum='sum: values[0].sum,',
count='count: values[0].count,',
avg='acount: values[0].acount, asum: values[0].asum,',
min='min: values[0].min,',
max='max: values[0].max,'
),
reduce_computation=dict(
sum='res.sum += values[i].sum;',
count='res.count = NumberInt(res.count + values[i].count);',
avg=('res.acount = NumberInt(res.acount + values[i].acount);'
'res.asum += values[i].asum;'),
min='if ( values[i].min < res.min ) {res.min = values[i].min;}',
max='if ( values[i].max > res.max ) {res.max = values[i].max;}'
),
finalize=dict(
sum='',
count='',
avg='value.avg = value.asum / value.acount;',
min='',
max=''
),
)
UNPARAMETERIZED_AGGREGATES = dict(
emit_initial=dict(
stddev=(
''
)
),
emit_body=dict(
stddev='sdsum: this.counter_volume,'
'sdcount: 1,'
'weighted_distances: 0,'
'stddev: 0,'
),
reduce_initial=dict(
stddev=''
),
reduce_body=dict(
stddev='sdsum: values[0].sdsum,'
'sdcount: values[0].sdcount,'
'weighted_distances: values[0].weighted_distances,'
'stddev: values[0].stddev,'
),
reduce_computation=dict(
stddev=(
'var deviance = (res.sdsum / res.sdcount) - values[i].sdsum;'
'var weight = res.sdcount / ++res.sdcount;'
'res.weighted_distances += (Math.pow(deviance, 2) * weight);'
'res.sdsum += values[i].sdsum;'
)
),
finalize=dict(
stddev=(
'value.stddev = Math.sqrt(value.weighted_distances /'
' value.sdcount);'
)
),
)
PARAMETERIZED_AGGREGATES = dict(
validate=dict(
cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id',
'source']
),
emit_initial=dict(
cardinality=(
'aggregate["cardinality/%(aggregate_param)s"] = 1;'
'var distinct_%(aggregate_param)s = {};'
'distinct_%(aggregate_param)s[this["%(aggregate_param)s"]]'
' = true;'
)
),
emit_body=dict(
cardinality=(
'distinct_%(aggregate_param)s : distinct_%(aggregate_param)s,'
'%(aggregate_param)s : this["%(aggregate_param)s"],'
)
),
reduce_initial=dict(
cardinality=''
),
reduce_body=dict(
cardinality=(
'aggregate : values[0].aggregate,'
'distinct_%(aggregate_param)s:'
' values[0].distinct_%(aggregate_param)s,'
'%(aggregate_param)s : values[0]["%(aggregate_param)s"],'
)
),
reduce_computation=dict(
cardinality=(
'if (!(values[i]["%(aggregate_param)s"] in'
' res.distinct_%(aggregate_param)s)) {'
' res.distinct_%(aggregate_param)s[values[i]'
' ["%(aggregate_param)s"]] = true;'
' res.aggregate["cardinality/%(aggregate_param)s"] += 1;}'
)
),
finalize=dict(
cardinality=''
),
)
EMIT_STATS_COMMON = """
var aggregate = {};
%(aggregate_initial_placeholder)s
emit(%(key_val)s, { unit: this.counter_unit,
aggregate : aggregate,
%(aggregate_body_placeholder)s
groupby : %(groupby_val)s,
duration_start : this.timestamp,
duration_end : this.timestamp,
period_start : %(period_start_val)s,
period_end : %(period_end_val)s} )
"""
MAP_STATS_PERIOD_VAR = """
var period = %(period)d * 1000;
var period_first = %(period_first)d * 1000;
var period_start = period_first
+ (Math.floor(new Date(this.timestamp.getTime()
- period_first) / period)
* period);
"""
MAP_STATS_GROUPBY_VAR = """
var groupby_fields = %(groupby_fields)s;
var groupby = {};
var groupby_key = {};
for ( var i=0; i<groupby_fields.length; i++ ) {
if (groupby_fields[i].search("resource_metadata") != -1) {
var key = "resource_metadata";
var j = groupby_fields[i].indexOf('.');
var value = groupby_fields[i].slice(j+1, groupby_fields[i].length);
groupby[groupby_fields[i]] = this[key][value];
groupby_key[groupby_fields[i]] = this[key][value];
} else {
groupby[groupby_fields[i]] = this[groupby_fields[i]]
groupby_key[groupby_fields[i]] = this[groupby_fields[i]]
}
}
"""
PARAMS_MAP_STATS = {
'key_val': '\'statistics\'',
'groupby_val': 'null',
'period_start_val': 'this.timestamp',
'period_end_val': 'this.timestamp',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS = bson.code.Code("function () {" +
EMIT_STATS_COMMON % PARAMS_MAP_STATS +
"}")
PARAMS_MAP_STATS_PERIOD = {
'key_val': 'period_start',
'groupby_val': 'null',
'period_start_val': 'new Date(period_start)',
'period_end_val': 'new Date(period_start + period)',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS_PERIOD = bson.code.Code(
"function () {" +
MAP_STATS_PERIOD_VAR +
EMIT_STATS_COMMON % PARAMS_MAP_STATS_PERIOD +
"}")
PARAMS_MAP_STATS_GROUPBY = {
'key_val': 'groupby_key',
'groupby_val': 'groupby',
'period_start_val': 'this.timestamp',
'period_end_val': 'this.timestamp',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS_GROUPBY = bson.code.Code(
"function () {" +
MAP_STATS_GROUPBY_VAR +
EMIT_STATS_COMMON % PARAMS_MAP_STATS_GROUPBY +
"}")
PARAMS_MAP_STATS_PERIOD_GROUPBY = {
'key_val': 'groupby_key',
'groupby_val': 'groupby',
'period_start_val': 'new Date(period_start)',
'period_end_val': 'new Date(period_start + period)',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS_PERIOD_GROUPBY = bson.code.Code(
"function () {" +
MAP_STATS_PERIOD_VAR +
MAP_STATS_GROUPBY_VAR +
" groupby_key['period_start'] = period_start\n" +
EMIT_STATS_COMMON % PARAMS_MAP_STATS_PERIOD_GROUPBY +
"}")
REDUCE_STATS = bson.code.Code("""
function (key, values) {
%(aggregate_initial_val)s
var res = { unit: values[0].unit,
aggregate: values[0].aggregate,
%(aggregate_body_val)s
groupby: values[0].groupby,
period_start: values[0].period_start,
period_end: values[0].period_end,
duration_start: values[0].duration_start,
duration_end: values[0].duration_end };
for ( var i=1; i<values.length; i++ ) {
%(aggregate_computation_val)s
if ( values[i].duration_start < res.duration_start )
res.duration_start = values[i].duration_start;
if ( values[i].duration_end > res.duration_end )
res.duration_end = values[i].duration_end;
if ( values[i].period_start < res.period_start )
res.period_start = values[i].period_start;
if ( values[i].period_end > res.period_end )
res.period_end = values[i].period_end; }
return res;
}
""")
FINALIZE_STATS = bson.code.Code("""
function (key, value) {
%(aggregate_val)s
value.duration = (value.duration_end - value.duration_start) / 1000;
value.period = NumberInt(%(period)d);
return value;
}""")
SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'),
'asc': (pymongo.ASCENDING, '$gt')}
MAP_RESOURCES = bson.code.Code("""
function () {
emit(this.resource_id,
{user_id: this.user_id,
project_id: this.project_id,
source: this.source,
first_timestamp: this.timestamp,
last_timestamp: this.timestamp,
metadata: this.resource_metadata})
}""")
REDUCE_RESOURCES = bson.code.Code("""
function (key, values) {
var merge = {user_id: values[0].user_id,
project_id: values[0].project_id,
source: values[0].source,
first_timestamp: values[0].first_timestamp,
last_timestamp: values[0].last_timestamp,
metadata: values[0].metadata}
values.forEach(function(value) {
if (merge.first_timestamp - value.first_timestamp > 0) {
merge.first_timestamp = value.first_timestamp;
merge.user_id = value.user_id;
merge.project_id = value.project_id;
merge.source = value.source;
} else if (merge.last_timestamp - value.last_timestamp <= 0) {
merge.last_timestamp = value.last_timestamp;
merge.metadata = value.metadata;
}
});
return merge;
}""")
_GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
_APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31,
hour=23, minute=59, second=59)
def __init__(self, url):
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instantiate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.4 to use $setOnInsert
if self.conn.server_info()['versionArray'] < [2, 4]:
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correctly updated if
# needed.
self.upgrade()
@staticmethod
def update_ttl(ttl, ttl_index_name, index_field, coll):
"""Update or ensure time_to_live indexes.
:param ttl: time to live in seconds.
:param ttl_index_name: name of the index we want to update or ensure.
:param index_field: field with the index that we need to update.
:param coll: collection which indexes need to be updated.
"""
indexes = coll.index_information()
if ttl <= 0:
if ttl_index_name in indexes:
coll.drop_index(ttl_index_name)
return
if ttl_index_name in indexes:
return coll.database.command(
'collMod', coll.name,
index={'keyPattern': {index_field: pymongo.ASCENDING},
'expireAfterSeconds': ttl})
coll.create_index([(index_field, pymongo.ASCENDING)],
expireAfterSeconds=ttl,
name=ttl_index_name)
def upgrade(self):
# Establish indexes
#
# We need variations for user_id vs. project_id because of the
# way the indexes are stored in b-trees. The user_id and
# project_id values are usually mutually exclusive in the
# queries, so the database won't take advantage of an index
# including both.
# create collection if not present
if 'resource' not in self.db.conn.collection_names():
self.db.conn.create_collection('resource')
if 'meter' not in self.db.conn.collection_names():
self.db.conn.create_collection('meter')
name_qualifier = dict(user_id='', project_id='project_')
background = dict(user_id=False, project_id=True)
for primary in ['user_id', 'project_id']:
name = 'resource_%sidx' % name_qualifier[primary]
self.db.resource.create_index([
(primary, pymongo.ASCENDING),
('source', pymongo.ASCENDING),
], name=name, background=background[primary])
name = 'meter_%sidx' % name_qualifier[primary]
self.db.meter.create_index([
('resource_id', pymongo.ASCENDING),
(primary, pymongo.ASCENDING),
('counter_name', pymongo.ASCENDING),
('timestamp', pymongo.ASCENDING),
('source', pymongo.ASCENDING),
], name=name, background=background[primary])
self.db.resource.create_index([('last_sample_timestamp',
pymongo.DESCENDING)],
name='last_sample_timestamp_idx',
sparse=True)
self.db.meter.create_index([('timestamp', pymongo.DESCENDING)],
name='timestamp_idx')
# update or ensure time_to_live index
ttl = cfg.CONF.database.metering_time_to_live
self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter)
self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp',
self.db.resource)
def clear(self):
self.conn.drop_database(self.db.name)
# Connection will be reopened automatically if needed
self.conn.close()
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
"""
# Record the updated resource metadata - we use $setOnInsert to
# unconditionally insert sample timestamps and resource metadata
# (in the update case, this must be conditional on the sample not
# being out-of-order)
data = copy.deepcopy(data)
data['resource_metadata'] = pymongo_utils.improve_keys(
data.pop('resource_metadata'))
resource = self.db.resource.find_one_and_update(
{'_id': data['resource_id']},
{'$set': {'project_id': data['project_id'],
'user_id': data['user_id'],
'source': data['source'],
},
'$setOnInsert': {'metadata': data['resource_metadata'],
'first_sample_timestamp': data['timestamp'],
'last_sample_timestamp': data['timestamp'],
},
'$addToSet': {'meter': {'counter_name': data['counter_name'],
'counter_type': data['counter_type'],
'counter_unit': data['counter_unit'],
},
},
},
upsert=True,
return_document=pymongo.ReturnDocument.AFTER,
)
# only update last sample timestamp if actually later (the usual
# in-order case)
last_sample_timestamp = resource.get('last_sample_timestamp')
if (last_sample_timestamp is None or
last_sample_timestamp <= data['timestamp']):
self.db.resource.update_one(
{'_id': data['resource_id']},
{'$set': {'metadata': data['resource_metadata'],
'last_sample_timestamp': data['timestamp']}}
)
# only update first sample timestamp if actually earlier (the unusual
# out-of-order case)
# NOTE: a null first sample timestamp is not updated as this indicates
# a pre-existing resource document dating from before we started
# recording these timestamps in the resource collection
first_sample_timestamp = resource.get('first_sample_timestamp')
if (first_sample_timestamp is not None and
first_sample_timestamp > data['timestamp']):
self.db.resource.update_one(
{'_id': data['resource_id']},
{'$set': {'first_sample_timestamp': data['timestamp']}}
)
# Record the raw data for the meter. Use a copy so we do not
# modify a data structure owned by our caller (the driver adds
# a new key '_id').
record = copy.copy(data)
record['recorded_at'] = timeutils.utcnow()
self.db.meter.insert_one(record)
def clear_expired_metering_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs with native MongoDB time-to-live feature.
"""
LOG.debug(_("Clearing expired metering data is based on native "
"MongoDB time to live feature and going in background."))
@staticmethod
def _get_marker(db_collection, marker_pairs):
"""Return the mark document according to the attribute-value pairs.
:param db_collection: Database collection that be query.
:param maker_pairs: Attribute-value pairs filter.
"""
if db_collection is None:
return
if not marker_pairs:
return
ret = db_collection.find(marker_pairs, limit=2)
if ret.count() == 0:
raise base.NoResultFound
elif ret.count() > 1:
raise base.MultipleResultsFound
else:
_ret = ret.__getitem__(0)
return _ret
@classmethod
def _recurse_sort_keys(cls, sort_keys, marker, flag):
_first = sort_keys[0]
value = marker[_first]
if len(sort_keys) == 1:
return {_first: {flag: value}}
else:
criteria_equ = {_first: {'eq': value}}
criteria_cmp = cls._recurse_sort_keys(sort_keys[1:], marker, flag)
return dict(criteria_equ, ** criteria_cmp)
@classmethod
def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'):
"""Returns a sort_instruction and paging operator.
Sort instructions are used in the query to determine what attributes
to sort on and what direction to use.
:param q: The query dict passed in.
:param sort_keys: array of attributes by which results be sorted.
:param sort_dir: direction in which results be sorted (asc, desc).
:return: sort instructions and paging operator
"""
sort_keys = sort_keys or []
sort_instructions = []
_sort_dir, operation = cls.SORT_OPERATION_MAPPING.get(
sort_dir, cls.SORT_OPERATION_MAPPING['desc'])
for _sort_key in sort_keys:
_instruction = (_sort_key, _sort_dir)
sort_instructions.append(_instruction)
return sort_instructions, operation
def _get_time_constrained_resources(self, query,
start_timestamp, start_timestamp_op,
end_timestamp, end_timestamp_op,
metaquery, resource):
"""Return an iterable of models.Resource instances
Items are constrained by sample timestamp.
:param query: project/user/source query
:param start_timestamp: modified timestamp start range.
:param start_timestamp_op: start time operator, like gt, ge.
:param end_timestamp: modified timestamp end range.
:param end_timestamp_op: end time operator, like lt, le.
:param metaquery: dict with metadata to match on.
:param resource: resource filter.
"""
if resource is not None:
query['resource_id'] = resource
# Add resource_ prefix so it matches the field in the db
query.update(dict(('resource_' + k, v)
for (k, v) in six.iteritems(metaquery)))
# FIXME(dhellmann): This may not perform very well,
# but doing any better will require changing the database
# schema and that will need more thought than I have time
# to put into it today.
# Look for resources matching the above criteria and with
# samples in the time range we care about, then change the
# resource query to return just those resources by id.
ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
end_timestamp,
start_timestamp_op,
end_timestamp_op)
if ts_range:
query['timestamp'] = ts_range
sort_keys = base._handle_sort_key('resource')
sort_instructions = self._build_sort_instructions(sort_keys)[0]
# use a unique collection name for the results collection,
# as result post-sorting (as oppposed to reduce pre-sorting)
# is not possible on an inline M-R
out = 'resource_list_%s' % uuid.uuid4()
self.db.meter.map_reduce(self.MAP_RESOURCES,
self.REDUCE_RESOURCES,
out=out,
sort={'resource_id': 1},
query=query)
try:
for r in self.db[out].find(sort=sort_instructions):
resource = r['value']
yield models.Resource(
resource_id=r['_id'],
user_id=resource['user_id'],
project_id=resource['project_id'],
first_sample_timestamp=resource['first_timestamp'],
last_sample_timestamp=resource['last_timestamp'],
source=resource['source'],
metadata=pymongo_utils.unquote_keys(resource['metadata']))
finally:
self.db[out].drop()
def _get_floating_resources(self, query, metaquery, resource):
"""Return an iterable of models.Resource instances
Items are unconstrained by timestamp.
:param query: project/user/source query
:param metaquery: dict with metadata to match on.
:param resource: resource filter.
"""
if resource is not None:
query['_id'] = resource
query.update(dict((k, v)
for (k, v) in six.iteritems(metaquery)))
keys = base._handle_sort_key('resource')
sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i
for i in keys]
sort_instructions = self._build_sort_instructions(sort_keys)[0]
for r in self.db.resource.find(query, sort=sort_instructions):
yield models.Resource(
resource_id=r['_id'],
user_id=r['user_id'],
project_id=r['project_id'],
first_sample_timestamp=r.get('first_sample_timestamp',
self._GENESIS),
last_sample_timestamp=r.get('last_sample_timestamp',
self._APOCALYPSE),
source=r['source'],
metadata=pymongo_utils.unquote_keys(r['metadata']))
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None):
"""Return an iterable of models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional start time operator, like gt, ge.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
"""
metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {}
query = {}
if user is not None:
query['user_id'] = user
if project is not None:
query['project_id'] = project
if source is not None:
query['source'] = source
if start_timestamp or end_timestamp:
return self._get_time_constrained_resources(query,
start_timestamp,
start_timestamp_op,
end_timestamp,
end_timestamp_op,
metaquery, resource)
else:
return self._get_floating_resources(query, metaquery, resource)
def _aggregate_param(self, fragment_key, aggregate):
fragment_map = self.STANDARD_AGGREGATES[fragment_key]
if not aggregate:
return ''.join([f for f in fragment_map.values()])
fragments = ''
for a in aggregate:
if a.func in self.STANDARD_AGGREGATES[fragment_key]:
fragment_map = self.STANDARD_AGGREGATES[fragment_key]
fragments += fragment_map[a.func]
elif a.func in self.UNPARAMETERIZED_AGGREGATES[fragment_key]:
fragment_map = self.UNPARAMETERIZED_AGGREGATES[fragment_key]
fragments += fragment_map[a.func]
elif a.func in self.PARAMETERIZED_AGGREGATES[fragment_key]:
fragment_map = self.PARAMETERIZED_AGGREGATES[fragment_key]
v = self.PARAMETERIZED_AGGREGATES['validate'].get(a.func)
if not (v and v(a.param)):
raise storage.StorageBadAggregate('Bad aggregate: %s.%s'
% (a.func, a.param))
params = dict(aggregate_param=a.param)
fragments += (fragment_map[a.func] % params)
else:
raise ceilometer.NotImplementedError(
'Selectable aggregate function %s'
' is not supported' % a.func)
return fragments
def get_meter_statistics(self, sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of models.Statistics instance.
Items are containing meter statistics described by the query
parameters. The filter must have a meter value set.
"""
if (groupby and set(groupby) -
set(['user_id', 'project_id', 'resource_id', 'source',
'resource_metadata.instance_type'])):
raise ceilometer.NotImplementedError(
"Unable to group by these fields")
q = pymongo_utils.make_query_from_filter(sample_filter)
if period:
if sample_filter.start_timestamp:
period_start = sample_filter.start_timestamp
else:
period_start = self.db.meter.find(
limit=1, sort=[('timestamp',
pymongo.ASCENDING)])[0]['timestamp']
period_start = int(calendar.timegm(period_start.utctimetuple()))
map_params = {'period': period,
'period_first': period_start,
'groupby_fields': json.dumps(groupby)}
if groupby:
map_fragment = self.MAP_STATS_PERIOD_GROUPBY
else:
map_fragment = self.MAP_STATS_PERIOD
else:
if groupby:
map_params = {'groupby_fields': json.dumps(groupby)}
map_fragment = self.MAP_STATS_GROUPBY
else:
map_params = dict()
map_fragment = self.MAP_STATS
sub = self._aggregate_param
map_params['aggregate_initial_val'] = sub('emit_initial', aggregate)
map_params['aggregate_body_val'] = sub('emit_body', aggregate)
map_stats = map_fragment % map_params
reduce_params = dict(
aggregate_initial_val=sub('reduce_initial', aggregate),
aggregate_body_val=sub('reduce_body', aggregate),
aggregate_computation_val=sub('reduce_computation', aggregate)
)
reduce_stats = self.REDUCE_STATS % reduce_params
finalize_params = dict(aggregate_val=sub('finalize', aggregate),
period=(period if period else 0))
finalize_stats = self.FINALIZE_STATS % finalize_params
results = self.db.meter.map_reduce(
map_stats,
reduce_stats,
{'inline': 1},
finalize=finalize_stats,
query=q,
)
# FIXME(terriyu) Fix get_meter_statistics() so we don't use sorted()
# to return the results
return sorted(
(self._stats_result_to_model(r['value'], groupby, aggregate)
for r in results['results']),
key=operator.attrgetter('period_start'))
@staticmethod
def _stats_result_aggregates(result, aggregate):
stats_args = {}
for attr in ['count', 'min', 'max', 'sum', 'avg']:
if attr in result:
stats_args[attr] = result[attr]
if aggregate:
stats_args['aggregate'] = {}
for a in aggregate:
ak = '%s%s' % (a.func, '/%s' % a.param if a.param else '')
if ak in result:
stats_args['aggregate'][ak] = result[ak]
elif 'aggregate' in result:
stats_args['aggregate'][ak] = result['aggregate'].get(ak)
return stats_args
@staticmethod
def _stats_result_to_model(result, groupby, aggregate):
stats_args = Connection._stats_result_aggregates(result, aggregate)
stats_args['unit'] = result['unit']
stats_args['duration'] = result['duration']
stats_args['duration_start'] = result['duration_start']
stats_args['duration_end'] = result['duration_end']
stats_args['period'] = result['period']
stats_args['period_start'] = result['period_start']
stats_args['period_end'] = result['period_end']
stats_args['groupby'] = (dict(
(g, result['groupby'][g]) for g in groupby) if groupby else None)
return models.Statistics(**stats_args)
|
apache-2.0
| 8,195,262,560,355,288,000
| 39.454233
| 79
| 0.535735
| false
| 4.363984
| false
| false
| false
|
ebakan/Python
|
urler.py
|
1
|
1086
|
#!/usr/bin/env python
import urllib.request
def genterm(inp):
def foo(x):
if x.isalpha():
return x
else:
return '%{0}'.format(hex(ord(x))[2:])
return ''.join(map(foo,inp))
def genresults(inp):
page=urllib.request.urlopen('http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q='+inp)
query=eval(page.read().decode().replace('null','None'))
page.close()
return int(list(list(query.values())[0].values())[0]['estimatedResultCount'])
def main():
terms={}
counter=3
for i in range(1,counter+1):
for k in range(1,counter+1):
terms['f'*i+'u'*k]=None
for i in terms.keys(): terms[i]=genresults(genterm(i))
return sorter(terms)
def sorter(dictionary):
keys=list(dictionary.keys())
keys.sort()
vals=[dictionary[i] for i in keys]
return dict(zip(keys,vals))
def output(terms):
f=open('fu.csv','w')
for i in terms:
f.write('{0},{1},{2}\n'.format(i[0].count('f'),i[0].count('u'),i[1]))
if __name__=='__main__':
output(main())
|
gpl-3.0
| -8,417,570,026,628,726,000
| 25.487805
| 99
| 0.571823
| false
| 3.12069
| false
| false
| false
|
grakiss888/testapi
|
update/templates/update_mongodb.py
|
1
|
2865
|
##############################################################################
# Copyright (c) 2016 ZTE Corporation
# feng.xiaowei@zte.com.cn
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import argparse
from pymongo import MongoClient
from changes_in_mongodb import collections_old2New, \
fields_old2New, docs_old2New
from utils import main, parse_mongodb_url
parser = argparse.ArgumentParser(description='Update MongoDBs')
parser.add_argument('-u', '--url',
type=str,
required=False,
default='mongodb://127.0.0.1:27017/',
help='Mongo DB URL for Backups')
parser.add_argument('-d', '--db',
type=str,
required=False,
default='test_results_collection',
help='database for the update.')
def assert_collections(a_dict):
if a_dict is not None:
collections = eval_db('collection_names')
no_collections = []
for collection in a_dict.keys():
if collection not in collections:
no_collections.append(collection)
assert len(no_collections) == 0, \
'collections {} not exist'.format(no_collections)
def rename_collections(a_dict):
if a_dict is not None:
for collection, new_name in a_dict.iteritems():
eval_collection(collection, 'rename', new_name)
def rename_fields(a_dict):
collection_update(a_dict, '$rename')
def change_docs(a_dict):
collection_update(a_dict, '$set')
def eval_db(method, *args, **kwargs):
exec_db = db.__getattribute__(method)
return exec_db(*args, **kwargs)
def eval_collection(collection, method, *args, **kwargs):
exec_collection = db.__getattr__(collection)
return exec_collection.__getattribute__(method)(*args, **kwargs)
def collection_update(a_dict, operator):
if a_dict is not None:
for collection, updates in a_dict.iteritems():
for (query, doc) in updates:
doc_dict = {operator: doc}
eval_collection(collection, 'update', query,
doc_dict, upsert=False, multi=True)
def update(args):
parse_mongodb_url(args.url)
client = MongoClient(args.url)
global db
db = client[args.db]
assert_collections(docs_old2New)
assert_collections(fields_old2New)
assert_collections(collections_old2New)
change_docs(docs_old2New)
rename_fields(fields_old2New)
rename_collections(collections_old2New)
if __name__ == '__main__':
main(update, parser)
|
apache-2.0
| -7,486,893,034,568,809,000
| 30.833333
| 78
| 0.597208
| false
| 4.035211
| false
| false
| false
|
Prev/jikji
|
tests/test_generator.py
|
1
|
1600
|
"""
tests.generator
---------------
Test generator of application
:author: Prev(prevdev@gmail.com)
"""
import pytest
import os
import shutil
from jikji import Jikji
def test_generate1() :
""" Testing for generating of testapp1
"""
jikji = Jikji('tests/testapp1', options=['sclear'])
OUTPUT_ROOT = jikji.settings.OUTPUT_ROOT
if os.path.exists( OUTPUT_ROOT ) :
shutil.rmtree( OUTPUT_ROOT )
jikji.generate()
for i in range(1, 5) :
with open('%s/%s.html' % (jikji.settings.OUTPUT_ROOT, i), 'r') as f:
c = f.read()
assert c == '<div>%s</div>' % i
def test_generate2() :
""" Testing for generating of testapp2
"""
jikji = Jikji('tests/testapp2', options=['sclear'])
OUTPUT_ROOT = jikji.settings.OUTPUT_ROOT
STATIC_ROOT = jikji.settings.STATIC_ROOT
if os.path.exists( OUTPUT_ROOT ) :
shutil.rmtree( OUTPUT_ROOT )
jikji.generate()
with open('%s/index.html' % OUTPUT_ROOT, 'r') as f : c = f.read()
assert c == '<p>Hello</p><i>home.html</i>'
with open('%s/README.md' % OUTPUT_ROOT, 'r') as f: c = f.read()
with open('%s/README.md' % STATIC_ROOT, 'r') as f: c2 = f.read()
assert c == c2
with open('%s/requirements.txt' % OUTPUT_ROOT, 'r') as f :
c = f.read()
assert c == 'jikji>=2.0\nrequests>=2.11'
def test_generate3() :
""" Testing for generating of testapp3
"""
jikji = Jikji('tests/testapp3')
OUTPUT_ROOT = jikji.settings.OUTPUT_ROOT
if os.path.exists( OUTPUT_ROOT ) :
shutil.rmtree( OUTPUT_ROOT )
jikji.generate()
with open('%s/event/2/index.html' % OUTPUT_ROOT, 'r') as f : c = f.read()
assert c == '<div>Event: 2</div>'
|
mit
| -1,156,732,441,587,812,000
| 18.277108
| 74
| 0.63375
| false
| 2.649007
| true
| false
| false
|
fffonion/xeHentai
|
xeHentai/i18n/__init__.py
|
1
|
1102
|
#!/usr/bin/env python
# coding:utf-8
# Contributor:
# fffonion <fffonion@gmail.com>
import importlib
from ..const import *
from . import en_us as lng_fallback
try:
_locale = LOCALE.lower() if LOCALE else 'en_us'
if _locale in ('zh_cn', 'zh_sg'):
_locale = 'zh_hans'
elif _locale in ('zh_tw', 'zh_hk', 'zh_mo'):
_locale = 'zh_hant'
lng = importlib.import_module("%s.i18n.%s" % (SCRIPT_NAME, _locale))
except (ImportError, ValueError):
lng = lng_fallback
class _(object):
def c(cls, code):
_ = code not in lng.err_msg and \
(code not in lng_fallback.err_msg and \
(cls.ERR_NOMSG % code) or \
lng_fallback.err_msg[code] ) or \
lng.err_msg[code]
return _ if PY3K else (
_ if isinstance(_, unicode) else _.decode('utf-8')) # cls.ERR_NOMSG % code is unicode
def __getattr__(cls, idx):
_ = not hasattr(lng, idx) and \
getattr(lng_fallback, idx) or \
getattr(lng, idx)
return _ if PY3K else _.decode('utf-8')
i18n = _()
|
gpl-3.0
| -3,419,455,272,191,152,000
| 28.783784
| 97
| 0.549002
| false
| 3.078212
| false
| false
| false
|
hubert667/AIR
|
src/scripts/kmeansScipy.py
|
1
|
3802
|
import random, pickle, os, sys
import numpy as np
from clusterData import *
from scipy.cluster.vq import kmeans,vq,whiten
class KMeans:
def __init__(self, fK, tK, filename, typeDataset):
self.queryRankerList = []
self.bestKClusterGroup = []
self.queryRankerDict = {}
self.fromK = fK
self.toK = tK + 1
self.bestRankersFile = filename
self.bestK = 0
self.dataset = typeDataset
def getClusters(self, thedata):
# data generation
data = whiten(thedata)
# computing K-Means with K = 2 (2 clusters)
centroids,_ = kmeans(data,self.fromK)
# assign each sample to a cluster
idx,_ = vq(data,centroids)
return idx
def getData(self):
loadedFile = pickle.load( open( self.bestRankersFile, "rb" ) ) #dict-->print i, test.query_ranker[i]
for i in loadedFile.query_ranker.keys():
self.queryRankerDict[i] = loadedFile.query_ranker[i]
print len(self.queryRankerDict)
for i in self.queryRankerDict.keys():
if type(self.queryRankerDict[i]) == list:
for j in self.queryRankerDict[i]:
self.queryRankerList.append(j)
else:
self.queryRankerList.append(self.queryRankerDict[i])
data = np.array(self.queryRankerList)
return data
def runScript(self):#"bestRanker.p" sys.argv[1]
#commented out part is for test purposes
#data = np.vstack((random(150,2) + np.array([.5,.5]),random(150,2), random(150,2) + np.array([2.5,2.5]), rand(150,2) + np.array([10.5,10.5])))
data = self.getData()
dataToClusters = self.getClusters(data) #list > list(cluster#) > np.array,np.array etc...
dataToClusters = list(dataToClusters)
clusterDataObject = clusterData()
data = list(data)
#make object ---> dict[clusterNumber:int] = list of all rankers (where rankers are also lists)
for i in range(len(dataToClusters)):
if not dataToClusters[i] in clusterDataObject.clusterToRanker.keys():
clusterDataObject.clusterToRanker[dataToClusters[i]] = [list(data[i])]
else:
clusterDataObject.clusterToRanker[dataToClusters[i]].append(list(data[i]))
#make object ---> dict[queryID:string] = list of cluster numbers as ints
for i in clusterDataObject.clusterToRanker:#for each cluster
for j in clusterDataObject.clusterToRanker[i]:#for each ranker in cluster
for q in self.queryRankerDict:#for each query
for r in self.queryRankerDict[q]:#for each ranker in query
if list(r) == j:#if ranker in query is equal to j (current ranker in cluster)
if q in clusterDataObject.queryToCluster:#if query key exists in dictionary
clusterDataObject.queryToCluster[q].append(i)
else:
clusterDataObject.queryToCluster[q] = [i]
for i in clusterDataObject.queryToCluster:
print i, len(clusterDataObject.queryToCluster[i]), clusterDataObject.queryToCluster[i]
for i in clusterDataObject.clusterToRanker:
print i, len(clusterDataObject.clusterToRanker[i])#, clusterDataObject.clusterToRanker[i]
if not os.path.exists("ClusterData"):
os.makedirs("ClusterData")
pickle.dump(clusterDataObject, open("ClusterData/"+self.dataset+".data", "wb"))
return clusterDataObject.queryToCluster, clusterDataObject.clusterToRanker
|
gpl-3.0
| -3,934,938,312,269,193,000
| 41.701149
| 150
| 0.593898
| false
| 3.760633
| false
| false
| false
|
denmojo/pygrow
|
grow/commands/filter.py
|
1
|
2992
|
from grow.pods import pods
from grow.pods import storage
import click
import os
@click.command()
@click.argument('pod_path', default='.')
@click.option('--include-obsolete/--no-include-obsolete', default=False,
is_flag=True,
help='Whether to include obsolete messages. If false, obsolete'
' messages will be removed from the catalog template. By'
' default, Grow cleans obsolete messages from the catalog'
' template.')
@click.option('--localized/--no-localized', default=False, is_flag=True,
help='Whether to create localized message catalogs. Use this'
' option if content varies by locale.')
@click.option('--locale', type=str, multiple=True,
help='Which locale(s) to analyze when creating template catalogs'
' that contain only untranslated messages. This option is'
' only applicable when using --untranslated.')
@click.option('--path', type=str, multiple=True,
help='Which paths to extract strings from. By default, all paths'
' are extracted. This option is useful if you\'d like to'
' generate a partial messages file representing just a'
' specific set of files.')
@click.option('-o', type=str, default=None,
help='Where to write the extracted translation catalog. The path'
' must be relative to the pod\'s root.')
@click.option('--include-header', default=False, is_flag=True,
help='Whether to preserve headers at the beginning of catalogs.')
@click.option('--out_dir', type=str, default=None,
help='Where to write extracted localized translation catalogs.'
' The path must be relative to the pod\'s root. This option'
' is only applicable when using --localized.')
@click.option('-f', default=False, is_flag=True,
help='Whether to force an update when writing localized message'
' catalogs.')
def filter(pod_path, locale, o, include_obsolete, localized, path,
include_header, out_dir, f):
"""Filters untranslated messages from catalogs into new catalogs."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
pod = pods.Pod(root, storage=storage.FileStorage)
catalogs = pod.get_catalogs()
if not locale:
locale = catalogs.list_locales()
if out_dir and pod.file_exists(out_dir) and not f:
raise click.UsageError(
'{} exists. You must specify a directory that does not exist, or '
'use the "-f" flag, which will force update catalogs within the '
'specified directory.'.format(out_dir))
catalogs.filter(out_path=o, out_dir=out_dir,
include_obsolete=include_obsolete,
localized=localized, paths=path,
include_header=include_header, locales=locale)
|
mit
| 5,153,167,556,879,288,000
| 53.4
| 79
| 0.621658
| false
| 4.367883
| false
| false
| false
|
alex4108/scLikesDownloader
|
scLikesDownloader.py
|
1
|
12232
|
import soundcloud as sc
from soundcloud.resource import Resource
import sys
import os
import urllib2
import re
class downloader:
def __init__(self, UserURL, PATH):
try:
self.client = sc.Client(client_id='',
client_secret='',
)
self.user = self.client.get('/resolve', url=UserURL)
self.path = PATH
self.reports = list()
except:
self.report('Constructor Exception Raised!')
self.report(sys.exc_info()[0])
self.report(sys.exc_info()[1])
return False
# Constructor
def __str__(self):
return 'Downloader Client v1 | Username: ' + self.user.username
def isMp3Valid(self, file_path):
is_valid = False
f = open(file_path, 'r')
block = f.read(1024)
frame_start = block.find(chr(255))
block_count = 0 #abort after 64k
while len(block)>0 and frame_start == -1 and block_count<64:
block = f.read(1024)
frame_start = block.find(chr(255))
block_count+=1
if frame_start > -1:
frame_hdr = block[frame_start:frame_start+4]
is_valid = frame_hdr[0] == chr(255)
mpeg_version = ''
layer_desc = ''
uses_crc = False
bitrate = 0
sample_rate = 0
padding = False
frame_length = 0
if is_valid:
is_valid = ord(frame_hdr[1]) & 0xe0 == 0xe0 #validate the rest of the frame_sync bits exist
if is_valid:
if ord(frame_hdr[1]) & 0x18 == 0:
mpeg_version = '2.5'
elif ord(frame_hdr[1]) & 0x18 == 0x10:
mpeg_version = '2'
elif ord(frame_hdr[1]) & 0x18 == 0x18:
mpeg_version = '1'
else:
is_valid = False
if is_valid:
if ord(frame_hdr[1]) & 6 == 2:
layer_desc = 'Layer III'
elif ord(frame_hdr[1]) & 6 == 4:
layer_desc = 'Layer II'
elif ord(frame_hdr[1]) & 6 == 6:
layer_desc = 'Layer I'
else:
is_valid = False
if is_valid:
uses_crc = ord(frame_hdr[1]) & 1 == 0
bitrate_chart = [
[0,0,0,0,0],
[32,32,32,32,8],
[64,48,40,48,16],
[96,56,48,56,24],
[128,64,56,64,32],
[160,80,64,80,40],
[192,96,80,96,40],
[224,112,96,112,56],
[256,128,112,128,64],
[288,160,128,144,80],
[320,192,160,160,96],
[352,224,192,176,112],
[384,256,224,192,128],
[416,320,256,224,144],
[448,384,320,256,160]]
bitrate_index = ord(frame_hdr[2]) >> 4
if bitrate_index==15:
is_valid=False
else:
bitrate_col = 0
if mpeg_version == '1':
if layer_desc == 'Layer I':
bitrate_col = 0
elif layer_desc == 'Layer II':
bitrate_col = 1
else:
bitrate_col = 2
else:
if layer_desc == 'Layer I':
bitrate_col = 3
else:
bitrate_col = 4
bitrate = bitrate_chart[bitrate_index][bitrate_col]
is_valid = bitrate > 0
if is_valid:
sample_rate_chart = [
[44100, 22050, 11025],
[48000, 24000, 12000],
[32000, 16000, 8000]]
sample_rate_index = (ord(frame_hdr[2]) & 0xc) >> 2
if sample_rate_index != 3:
sample_rate_col = 0
if mpeg_version == '1':
sample_rate_col = 0
elif mpeg_version == '2':
sample_rate_col = 1
else:
sample_rate_col = 2
sample_rate = sample_rate_chart[sample_rate_index][sample_rate_col]
else:
is_valid = False
if is_valid:
padding = ord(frame_hdr[2]) & 1 == 1
padding_length = 0
if layer_desc == 'Layer I':
if padding:
padding_length = 4
frame_length = (12 * bitrate * 1000 / sample_rate + padding_length) * 4
else:
if padding:
padding_length = 1
frame_length = 144 * bitrate * 1000 / sample_rate + padding_length
is_valid = frame_length > 0
# Verify the next frame
if(frame_start + frame_length < len(block)):
is_valid = block[frame_start + frame_length] == chr(255)
else:
offset = (frame_start + frame_length) - len(block)
block = f.read(1024)
if len(block) > offset:
is_valid = block[offset] == chr(255)
else:
is_valid = False
f.close()
return is_valid
def directory(self, path,extension = ''):
list_dir = []
list_dir = os.listdir(path)
count = 0
for file in list_dir:
if file.endswith(extension): # eg: '.txt'
count += 1
return count
'''
Gets list of likes
'''
def trackList(self, downloadable_only = False):
# API: Get favorites count, save data from /users/{id}/favorites
offset = 0
limit = 1
favorites = list()
retry = 0
#self.user.public_favorites_count = 5 # Test data
while offset < self.user.public_favorites_count:
if offset is -1:
break
try:
uri = '/users/' + str(self.user.id) + '/favorites'
favoritesToJoin = self.client.get(uri, offset=offset, limit=limit)
if len(favoritesToJoin) == 0 or not favoritesToJoin:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' is hiding. Trying again.'
if retry != 0 :
retry = retry + 1
else:
retry = 1
print '(Retry ' + str(retry) + ')'
if retry >= 5:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' won\'t retrieve. Aborting...'
self.report('(trackList) Can\'t select track #' + str(offset))
self.report('To download this manually, please visit https://api.soundcloud.com/users/' + str(self.user.id) + '/favorites/' + str(offset) + '.json')
retry = 0
offset += 1
elif hasattr(self.trackData(favoritesToJoin[0].id), 'download_url'):
if len(favoritesToJoin) < limit:
offset = offset + limit - len(favoritesToJoin)
if len(favorites) == 0:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' retrieved from API '
favorites.append(favoritesToJoin[0])
if offset + 1 < self.user.public_favorites_count:
offset += 1
else:
offset = -1
elif len(favorites) != 0 and not favorites[len(favorites)-1] == favoritesToJoin[0]:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' retrieved from API '
favorites.append(favoritesToJoin[0])
if offset + 1 < self.user.public_favorites_count:
offset += 1
else:
offset = -1
else:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' isn\'t downloadable. Skipping...'
offset += 1
except:
self.report('(trackList) ' + str(sys.exc_info()[0]))
self.report('(trackList) ' + str(sys.exc_info()[1]))
self.report('(trackList) ' + str(favoritesToJoin[0].download_url))
print 'All tracks have been retrieved'
return favorites
'''
Adds a report for later viewing
:param str msg Message to report
'''
def report(self, msg):
self.reports.append(msg)
'''
Gets data on specific track
:param int trackid The Track's API ID
:return (Resource|Boolean) Track Resource or false on failure
'''
def trackData(self, trackid):
try:
track = self.client.get('/tracks/' + str(trackid))
except:
self.report('(trackData) Failed to select Track ID ' + str(trackid))
return False
return track
'''
Get data on specific user
:param int User's ID in the API
:return Resource User Resource
'''
def getUser(self, userid):
try:
user = self.client.get('/users/' + str(userid))
except:
self.report('(getUser) Failed to select User ID ' + str(userid))
return False
return user
'''
Takes the inputted path and makes it system-safe by stripping characters
:param str path Path to clean
:return str Clean path
'''
def validPath(self, path):
cleaned_up_filename = re.sub(r'[\/\\\:\*\?\"\<\>\|]', '', path)
return self.path + "".join(c for c in cleaned_up_filename if c.isalnum()).rstrip()
def getReports(self):
return self.reports
'''
Saves a file
:param (Resource|int) the Track Resource to download or the track's ID
:param bool False on failure, True on success
'''
def saveFile(self, track):
if isinstance(track, int):
track = trackData(track.id)
artist = self.getUser(track.user_id)
filepath = self.validPath(artist.username + '_' + track.permalink + '.' + track.original_format)
url = track.download_url + '?client_id=1fbdfddf1e6711cd0aff00f3b92e7cbf'
try:
req = urllib2.urlopen(urllib2.Request(url=url))
if req.getcode() != 200:
self.report('HTTPError Code: ' + str(req.getcode()) + ' url: ' + req.geturl())
return False
try:
if not os.path.exists(self.path):
os.makedirs(self.path)
if os.path.exists(filepath):
os.remove(filepath)
file = open(filepath, 'wb')
file.write(req.read())
file.close()
except:
raise
except:
self.report('(saveFile) Failed to save file! Manual download required! URL: ' + req.geturl())
self.report('(saveFile)' + str(sys.exc_info()[0]))
self.report('(saveFile)' + str(sys.exc_info()[1]))
return False
return True
|
gpl-2.0
| 3,850,365,799,363,366,400
| 36.179331
| 172
| 0.435988
| false
| 4.381089
| true
| false
| false
|
ksmit799/Toontown-Source
|
toontown/safezone/DistributedCheckers.py
|
1
|
29924
|
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.task.Task import Task
from direct.interval.IntervalGlobal import *
from TrolleyConstants import *
from direct.gui.DirectGui import *
from toontown.toonbase import TTLocalizer
from direct.distributed import DistributedNode
from direct.distributed.ClockDelta import globalClockDelta
from CheckersBoard import CheckersBoard
from direct.fsm import ClassicFSM, State
from direct.fsm import StateData
from toontown.toonbase.ToontownTimer import ToontownTimer
from toontown.toonbase import ToontownGlobals
from direct.distributed.ClockDelta import *
from otp.otpbase import OTPGlobals
from direct.showbase import PythonUtil
class DistributedCheckers(DistributedNode.DistributedNode):
def __init__(self, cr):
NodePath.__init__(self, 'DistributedCheckers')
DistributedNode.DistributedNode.__init__(self, cr)
self.cr = cr
self.reparentTo(render)
self.boardNode = loader.loadModel('phase_6/models/golf/regular_checker_game.bam')
self.boardNode.reparentTo(self)
self.board = CheckersBoard()
self.exitButton = None
self.inGame = False
self.waiting = True
self.startButton = None
self.playerNum = None
self.turnText = None
self.isMyTurn = False
self.wantTimer = True
self.leaveButton = None
self.screenText = None
self.turnText = None
self.exitButton = None
self.numRandomMoves = 0
self.blinker = Sequence()
self.moveList = []
self.mySquares = []
self.myKings = []
self.isRotated = False
self.accept('mouse1', self.mouseClick)
self.traverser = base.cTrav
self.pickerNode = CollisionNode('mouseRay')
self.pickerNP = camera.attachNewNode(self.pickerNode)
self.pickerNode.setFromCollideMask(ToontownGlobals.WallBitmask)
self.pickerRay = CollisionRay()
self.pickerNode.addSolid(self.pickerRay)
self.myHandler = CollisionHandlerQueue()
self.traverser.addCollider(self.pickerNP, self.myHandler)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
self.clockNode = ToontownTimer()
self.clockNode.setPos(1.16, 0, -0.83)
self.clockNode.setScale(0.3)
self.clockNode.hide()
self.playerColors = [Vec4(0, 0, 1, 1), Vec4(0, 1, 0, 1)]
self.tintConstant = Vec4(0.25, 0.25, 0.25, 0.5)
self.ghostConstant = Vec4(0, 0, 0, 0.8)
self.startingPositions = [[0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11], [20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31]]
self.knockSound = base.loadSfx('phase_5/audio/sfx/GUI_knock_1.mp3')
self.clickSound = base.loadSfx('phase_3/audio/sfx/GUI_balloon_popup.mp3')
self.moveSound = base.loadSfx('phase_6/audio/sfx/CC_move.mp3')
self.accept('stoppedAsleep', self.handleSleep)
self.fsm = ClassicFSM.ClassicFSM('ChineseCheckers', [State.State('waitingToBegin', self.enterWaitingToBegin, self.exitWaitingToBegin, ['playing', 'gameOver']), State.State('playing', self.enterPlaying, self.exitPlaying, ['gameOver']), State.State('gameOver', self.enterGameOver, self.exitGameOver, ['waitingToBegin'])], 'waitingToBegin', 'waitingToBegin')
x = self.boardNode.find('**/locator*')
self.locatorList = x.getChildren()
tempList = []
for x in range(0, 32):
self.locatorList[x].setTag('GamePeiceLocator', '%d' % x)
tempList.append(self.locatorList[x].attachNewNode(CollisionNode('picker%d' % x)))
tempList[x].node().addSolid(CollisionSphere(0, 0, 0, 0.39))
for z in self.locatorList:
y = loader.loadModel('phase_6/models/golf/regular_checker_piecewhite.bam')
y.find('**/checker_k*').hide()
zz = loader.loadModel('phase_6/models/golf/regular_checker_pieceblack.bam')
zz.find('**/checker_k*').hide()
y.reparentTo(z)
y.hide()
zz.reparentTo(z)
zz.hide()
return
def setName(self, name):
self.name = name
def announceGenerate(self):
DistributedNode.DistributedNode.announceGenerate(self)
if self.table.fsm.getCurrentState().getName() != 'observing':
if base.localAvatar.doId in self.table.tableState:
self.seatPos = self.table.tableState.index(base.localAvatar.doId)
def handleSleep(self, task = None):
if self.fsm.getCurrentState().getName() == 'waitingToBegin':
self.exitButtonPushed()
if task != None:
task.done
return
def setTableDoId(self, doId):
self.tableDoId = doId
self.table = self.cr.doId2do[doId]
self.table.setTimerFunc(self.startButtonPushed)
self.fsm.enterInitialState()
self.table.setGameDoId(self.doId)
def disable(self):
DistributedNode.DistributedNode.disable(self)
if self.leaveButton:
self.leaveButton.destroy()
self.leavebutton = None
if self.screenText:
self.screenText.destroy()
self.screenText = None
if self.turnText:
self.turnText.destroy()
self.turnText = None
self.clockNode.stop()
self.clockNode.hide()
self.ignore('mouse1')
self.ignore('stoppedAsleep')
self.fsm = None
return
def delete(self):
DistributedNode.DistributedNode.delete(self)
self.table.gameDoId = None
self.table.game = None
if self.exitButton:
self.exitButton.destroy()
if self.startButton:
self.startButton.destroy()
self.clockNode.stop()
self.clockNode.hide()
self.table.startButtonPushed = None
self.ignore('mouse1')
self.ignore('stoppedAsleep')
self.fsm = None
self.table = None
return
def getTimer(self):
self.sendUpdate('requestTimer', [])
def setTimer(self, timerEnd):
if self.fsm.getCurrentState() != None and self.fsm.getCurrentState().getName() == 'waitingToBegin' and not self.table.fsm.getCurrentState().getName() == 'observing':
self.clockNode.stop()
time = globalClockDelta.networkToLocalTime(timerEnd)
timeLeft = int(time - globalClock.getRealTime())
if timeLeft > 0 and timerEnd != 0:
if timeLeft > 60:
timeLeft = 60
self.clockNode.setPos(1.16, 0, -0.83)
self.clockNode.countdown(timeLeft, self.startButtonPushed)
self.clockNode.show()
else:
self.clockNode.stop()
self.clockNode.hide()
return
def setTurnTimer(self, turnEnd):
if self.fsm.getCurrentState() != None and self.fsm.getCurrentState().getName() == 'playing':
self.clockNode.stop()
time = globalClockDelta.networkToLocalTime(turnEnd)
timeLeft = int(time - globalClock.getRealTime())
if timeLeft > 0:
self.clockNode.setPos(-.74, 0, -0.2)
if self.isMyTurn:
self.clockNode.countdown(timeLeft, self.doNothing)
else:
self.clockNode.countdown(timeLeft, self.doNothing)
self.clockNode.show()
return
def gameStart(self, playerNum):
if playerNum != 255:
self.playerNum = playerNum
if self.playerNum == 1:
self.playerColorString = 'white'
else:
self.playerColorString = 'black'
self.playerColor = self.playerColors[playerNum - 1]
self.moveCameraForGame()
self.fsm.request('playing')
def sendTurn(self, playersTurn):
if self.fsm.getCurrentState().getName() == 'playing':
if playersTurn == self.playerNum:
self.isMyTurn = True
self.enableTurnScreenText(playersTurn)
def illegalMove(self):
self.exitButtonPushed()
def moveCameraForGame(self):
if self.table.cameraBoardTrack.isPlaying():
self.table.cameraBoardTrack.finish()
rotation = 0
if self.seatPos > 2:
if self.playerNum == 1:
rotation = 180
elif self.playerNum == 2:
rotation = 0
for x in self.locatorList:
x.setH(180)
self.isRotated = True
elif self.playerNum == 1:
rotation = 0
elif self.playerNum == 2:
rotation = 180
for x in self.locatorList:
x.setH(180)
self.isRotated = True
int = LerpHprInterval(self.boardNode, 4.2, Vec3(rotation, self.boardNode.getP(), self.boardNode.getR()), self.boardNode.getHpr())
int.start()
def enterWaitingToBegin(self):
if self.table.fsm.getCurrentState().getName() != 'observing':
self.enableExitButton()
self.enableStartButton()
def exitWaitingToBegin(self):
if self.exitButton:
self.exitButton.destroy()
self.exitButton = None
if self.startButton:
self.startButton.destroy()
self.exitButton = None
self.clockNode.stop()
self.clockNode.hide()
return
def enterPlaying(self):
self.inGame = True
self.enableScreenText()
if self.table.fsm.getCurrentState().getName() != 'observing':
self.enableLeaveButton()
def exitPlaying(self):
self.inGame = False
if self.leaveButton:
self.leaveButton.destroy()
self.leavebutton = None
self.playerNum = None
if self.screenText:
self.screenText.destroy()
self.screenText = None
if self.turnText:
self.turnText.destroy()
self.turnText = None
self.clockNode.stop()
self.clockNode.hide()
return
def enterGameOver(self):
pass
def exitGameOver(self):
pass
def exitWaitCountdown(self):
self.__disableCollisions()
self.ignore('trolleyExitButton')
self.clockNode.reset()
def enableExitButton(self):
self.exitButton = DirectButton(relief=None, text=TTLocalizer.ChineseCheckersGetUpButton, text_fg=(1, 1, 0.65, 1), text_pos=(0, -.23), text_scale=0.8, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0.92, 0, 0.4), scale=0.15, command=lambda self = self: self.exitButtonPushed())
return
def enableScreenText(self):
defaultPos = (-.8, -0.4)
if self.playerNum == 1:
message = TTLocalizer.CheckersColorWhite
color = Vec4(1, 1, 1, 1)
elif self.playerNum == 2:
message = TTLocalizer.CheckersColorBlack
color = Vec4(0, 0, 0, 1)
else:
message = TTLocalizer.CheckersObserver
color = Vec4(0, 0, 0, 1)
defaultPos = (-.8, -0.4)
self.screenText = OnscreenText(text=message, pos=defaultPos, scale=0.1, fg=color, align=TextNode.ACenter, mayChange=1)
def enableStartButton(self):
self.startButton = DirectButton(relief=None, text=TTLocalizer.ChineseCheckersStartButton, text_fg=(1, 1, 0.65, 1), text_pos=(0, -.23), text_scale=0.6, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0.92, 0, 0.1), scale=0.15, command=lambda self = self: self.startButtonPushed())
return
def enableLeaveButton(self):
self.leaveButton = DirectButton(relief=None, text=TTLocalizer.ChineseCheckersQuitButton, text_fg=(1, 1, 0.65, 1), text_pos=(0, -.13), text_scale=0.5, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0.92, 0, 0.4), scale=0.15, command=lambda self = self: self.exitButtonPushed())
return
def enableTurnScreenText(self, player):
playerOrder = [1,
4,
2,
5,
3,
6]
message1 = TTLocalizer.CheckersIts
if self.turnText != None:
self.turnText.destroy()
if player == self.playerNum:
message2 = TTLocalizer.ChineseCheckersYourTurn
color = (0, 0, 0, 1)
elif player == 1:
message2 = TTLocalizer.CheckersWhiteTurn
color = (1, 1, 1, 1)
elif player == 2:
message2 = TTLocalizer.CheckersBlackTurn
color = (0, 0, 0, 1)
self.turnText = OnscreenText(text=message1 + message2, pos=(-0.8, -0.5), scale=0.092, fg=color, align=TextNode.ACenter, mayChange=1)
return
def startButtonPushed(self):
self.sendUpdate('requestBegin')
self.startButton.hide()
self.clockNode.stop()
self.clockNode.hide()
def exitButtonPushed(self):
self.fsm.request('gameOver')
self.table.fsm.request('off')
self.clockNode.stop()
self.clockNode.hide()
self.table.sendUpdate('requestExit')
def mouseClick(self):
messenger.send('wakeup')
if self.isMyTurn == True and self.inGame == True:
mpos = base.mouseWatcherNode.getMouse()
self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
self.traverser.traverse(render)
if self.myHandler.getNumEntries() > 0:
self.myHandler.sortEntries()
pickedObj = self.myHandler.getEntry(0).getIntoNodePath()
pickedObj = pickedObj.getNetTag('GamePeiceLocator')
if pickedObj:
self.handleClicked(int(pickedObj))
def handleClicked(self, index):
self.sound = Sequence(SoundInterval(self.clickSound))
if self.moveList == []:
if index not in self.mySquares and index not in self.myKings:
return
self.moveList.append(index)
type = self.board.squareList[index].getState()
if type == 3 or type == 4:
self.moverType = 'king'
else:
self.moverType = 'normal'
self.blinker = Sequence()
col = self.locatorList[index].getColor()
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, self.tintConstant, col))
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, col, self.tintConstant))
self.blinker.loop()
self.sound.start()
elif index in self.mySquares or index in self.myKings:
for x in self.moveList:
self.locatorList[x].setColor(1, 1, 1, 1)
self.locatorList[x].hide()
self.blinker.finish()
self.blinker = Sequence()
col = self.locatorList[index].getColor()
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, self.tintConstant, col))
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, col, self.tintConstant))
self.blinker.loop()
self.sound.start()
self.locatorList[self.moveList[0]].show()
self.moveList = []
self.moveList.append(index)
type = self.board.squareList[index].getState()
if type == 3 or type == 4:
self.moverType = 'king'
else:
self.moverType = 'normal'
else:
self.currentMove = index
lastItem = self.board.squareList[self.moveList[len(self.moveList) - 1]]
thisItem = self.board.squareList[index]
if self.mustJump == True:
if lastItem.getNum() == index:
self.blinker.finish()
self.d_requestMove(self.moveList)
self.isMyTurn = False
self.moveList = []
return
if self.checkLegalJump(lastItem, thisItem, self.moverType) == True:
col = self.locatorList[index].getColor()
self.locatorList[index].show()
self.sound.start()
if self.existsLegalJumpsFrom(index, self.moverType) == False:
self.moveList.append(index)
self.blinker.finish()
self.d_requestMove(self.moveList)
self.moveList = []
self.isMyTurn = False
else:
self.moveList.append(index)
if self.playerColorString == 'white':
x = self.locatorList[index].getChildren()[1]
x.show()
else:
x = self.locatorList[index].getChildren()[2]
x.show()
if self.moverType == 'king':
x.find('**/checker_k*').show()
self.locatorList[index].setColor(Vec4(0.5, 0.5, 0.5, 0.5))
elif self.checkLegalMove(lastItem, thisItem, self.moverType) == True:
self.moveList.append(index)
col = self.locatorList[index].getColor()
self.locatorList[index].show()
self.sound.start()
self.blinker.finish()
self.d_requestMove(self.moveList)
self.moveList = []
self.isMyTurn = False
def existsLegalJumpsFrom(self, index, peice):
if peice == 'king':
for x in range(4):
if self.board.squareList[index].getAdjacent()[x] != None and \
self.board.squareList[index].getJumps()[x] != None:
adj = self.board.squareList[self.board.squareList[index].getAdjacent()[x]]
jump = self.board.squareList[self.board.squareList[index].getJumps()[x]]
if adj.getState() == 0:
pass
elif adj.getState() == self.playerNum or adj.getState() == self.playerNum + 2:
pass
elif jump.getState() == 0:
if index not in self.moveList and jump.getNum() not in self.moveList:
return True
return False
elif peice == 'normal':
if self.playerNum == 1:
moveForward = [1, 2]
elif self.playerNum == 2:
moveForward = [0, 3]
for x in moveForward:
if self.board.squareList[index].getAdjacent()[x] != None and \
self.board.squareList[index].getJumps()[x] != None:
adj = self.board.squareList[self.board.squareList[index].getAdjacent()[x]]
jump = self.board.squareList[self.board.squareList[index].getJumps()[x]]
if adj.getState() == 0:
pass
elif adj.getState() == self.playerNum or adj.getState() == self.playerNum + 2:
pass
elif jump.getState() == 0:
if index not in self.moveList:
return True
return False
def existsLegalMovesFrom(self, index, peice):
if peice == 'king':
for x in self.board.squareList[index].getAdjacent():
if x != None:
if self.board.squareList[x].getState() == 0:
return True
return False
elif peice == 'normal':
if self.playerNum == 1:
moveForward = [1, 2]
elif self.playerNum == 2:
moveForward = [0, 3]
for x in moveForward:
if self.board.squareList[index].getAdjacent()[x] != None:
adj = self.board.squareList[self.board.squareList[index].getAdjacent()[x]]
if adj.getState() == 0:
return True
return False
return
def checkLegalMove(self, firstSquare, secondSquare, peice):
if firstSquare.getNum() not in self.mySquares and firstSquare.getNum() not in self.myKings:
return False
if self.playerNum == 1:
moveForward = [1, 2]
else:
moveForward = [0, 3]
if peice == 'king':
for x in range(4):
if firstSquare.getAdjacent()[x] != None:
if self.board.squareList[firstSquare.getAdjacent()[x]].getState() == 0 and secondSquare.getNum() in firstSquare.getAdjacent():
return True
return False
elif peice == 'normal':
for x in moveForward:
if firstSquare.getAdjacent()[x] != None and secondSquare.getNum() in firstSquare.getAdjacent():
if self.board.squareList[firstSquare.getAdjacent()[x]].getState() == 0 and firstSquare.getAdjacent().index(secondSquare.getNum()) == x:
return True
return False
return
def checkLegalJump(self, firstSquare, secondSquare, peice):
if firstSquare.getNum() not in self.mySquares and firstSquare.getNum() not in self.myKings and len(self.moveList) == 1:
return False
if self.playerNum == 1:
moveForward = [1, 2]
opposingPeices = [2, 4]
else:
moveForward = [0, 3]
opposingPeices = [1, 3]
if peice == 'king':
if secondSquare.getNum() in firstSquare.getJumps():
index = firstSquare.getJumps().index(secondSquare.getNum())
if self.board.squareList[firstSquare.getAdjacent()[index]].getState() in opposingPeices:
return True
else:
return False
elif peice == 'normal':
if secondSquare.getNum() in firstSquare.getJumps():
index = firstSquare.getJumps().index(secondSquare.getNum())
if index in moveForward:
if self.board.squareList[firstSquare.getAdjacent()[index]].getState() in opposingPeices:
return True
else:
return False
else:
return False
else:
return False
def d_requestMove(self, moveList):
self.sendUpdate('requestMove', [moveList])
def setGameState(self, tableState, moveList):
if moveList != []:
if self.board.squareList[moveList[0]].getState() == 1 or self.board.squareList[moveList[0]].getState() == 3:
playerColor = 'white'
else:
playerColor = 'black'
if self.board.squareList[moveList[0]].getState() <= 2:
self.animatePeice(tableState, moveList, 'normal', playerColor)
else:
self.animatePeice(tableState, moveList, 'king', playerColor)
else:
self.updateGameState(tableState)
def updateGameState(self, squares):
self.board.setStates(squares)
self.mySquares = []
self.myKings = []
messenger.send('wakeup')
isObserve = False
if self.playerNum == None:
self.playerNum = 1
self.playerColorString = 'white'
isObserve = True
for xx in range(32):
for blah in self.locatorList[xx].getChildren():
blah.hide()
if self.locatorList[xx].getChildren().index(blah) != 0:
blah1 = blah.find('**/checker_k*')
owner = self.board.squareList[xx].getState()
if owner == self.playerNum:
if self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').hide()
else:
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').hide()
self.mySquares.append(xx)
elif owner == 0:
self.hideChildren(self.locatorList[xx].getChildren())
elif owner == self.playerNum + 2:
if self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').show()
else:
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').show()
self.myKings.append(xx)
elif owner <= 2:
if self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').hide()
else:
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').hide()
elif self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').show()
else:
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').show()
if isObserve == True:
self.playerNum = None
self.playerColorString = None
return
self.mustJump = False
self.hasNormalMoves = False
for x in self.myKings:
if self.existsLegalJumpsFrom(x, 'king') == True:
self.mustJump = True
break
else:
self.mustJump = False
if self.mustJump == False:
for x in self.mySquares:
if self.existsLegalJumpsFrom(x, 'normal') == True:
self.mustJump = True
break
else:
self.mustJump = False
if self.mustJump != True:
for x in self.mySquares:
if self.existsLegalMovesFrom(x, 'normal') == True:
self.hasNormalMoves = True
break
else:
self.hasNormalMoves = False
if self.hasNormalMoves == False:
for x in self.myKings:
if self.existsLegalMovesFrom(x, 'king') == True:
self.hasNormalMoves = True
break
else:
self.hasNormalMoves = False
if self.mustJump == False and self.hasNormalMoves == False:
pass
return
def hideChildren(self, nodeList):
for x in range(1, 2):
nodeList[x].hide()
def animatePeice(self, tableState, moveList, type, playerColor):
messenger.send('wakeup')
if playerColor == 'white':
gamePeiceForAnimation = loader.loadModel('phase_6/models/golf/regular_checker_piecewhite.bam')
else:
gamePeiceForAnimation = loader.loadModel('phase_6/models/golf/regular_checker_pieceblack.bam')
if type == 'king':
gamePeiceForAnimation.find('**/checker_k*').show()
else:
gamePeiceForAnimation.find('**/checker_k*').hide()
gamePeiceForAnimation.reparentTo(self.boardNode)
gamePeiceForAnimation.setPos(self.locatorList[moveList[0]].getPos())
if self.isRotated == True:
gamePeiceForAnimation.setH(180)
for x in self.locatorList[moveList[0]].getChildren():
x.hide()
checkersPeiceTrack = Sequence()
length = len(moveList)
for x in range(length - 1):
checkersPeiceTrack.append(Parallel(SoundInterval(self.moveSound), ProjectileInterval(gamePeiceForAnimation, endPos=self.locatorList[moveList[x + 1]].getPos(), duration=0.5)))
checkersPeiceTrack.append(Func(gamePeiceForAnimation.removeNode))
checkersPeiceTrack.append(Func(self.updateGameState, tableState))
checkersPeiceTrack.append(Func(self.unAlpha, moveList))
checkersPeiceTrack.start()
def announceWin(self, avId):
self.fsm.request('gameOver')
def unAlpha(self, moveList):
for x in moveList:
self.locatorList[x].setColorOff()
def doRandomMove(self):
import random
move = []
foundLegal = False
self.blinker.pause()
self.numRandomMoves += 1
while not foundLegal:
x = random.randint(0, 9)
for y in self.board.getAdjacent(self.mySquares[x]):
if y != None and self.board.getState(y) == 0:
move.append(self.mySquares[x])
move.append(y)
foundLegal = True
break
if move == []:
pass
playSound = Sequence(SoundInterval(self.knockSound))
playSound.start()
self.d_requestMove(move)
self.moveList = []
self.isMyTurn = False
if self.numRandomMoves >= 5:
self.exitButtonPushed()
return
def doNothing(self):
pass
|
mit
| 4,444,623,332,312,362,000
| 39.547425
| 363
| 0.554471
| false
| 3.929097
| false
| false
| false
|
ilona-asa/LDSAproject
|
email_counter.py
|
1
|
2018
|
#!/usr/bin/env python
import os
rootdir ='enron_mail_20110402/maildir'
for user in os.listdir(rootdir):
sent_items = 0
sent = 0
_sent_mail = 0
inbox = 0
total = 0
for folder in os.listdir(rootdir+'/'+user):
# print '%s\t%s' % ((folder, os.path.isdir(folder)), 1)
#if os.path.isdir(folder) == True:
# for mail in os.listdir(rootdir+'/'+user+'/'+folder):
# if os.path.isdir(mail) == False:
# print '%s\t%s' % ('total', 1)
# print folder
if folder == 'sent_items':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
sent_items += 1
total += 1
# print '%s,%s,%s' % (user, folder, sent_items)
elif folder == 'sent':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
sent += 1
total += 1
# print '%s,%s,%s' % (user, folder, sent)
elif folder == '_sent_mail':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
_sent_mail += 1
total += 1
# print '%s,%s,%s' % (user, folder, _sent_mail)
elif folder == 'inbox':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
inbox += 1
total += 1
else:
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
total += 1
print '%s,%s,%s' % (user, 'sent_items', sent_items)
print '%s,%s,%s' % (user, 'sent', sent)
print '%s,%s,%s' % (user, '_sent_mail', _sent_mail)
print '%s,%s,%s' % (user, 'inbox', inbox)
print '%s,%s,%s' % (user, 'all', total)
|
mit
| 7,093,846,238,697,374,000
| 41.957447
| 76
| 0.387017
| false
| 4.01992
| false
| false
| false
|
jevinw/rec_utilities
|
babel_util/parsers/tree.py
|
1
|
4791
|
#!/usr/bin/env python
import logging
class TreeFile(object):
"""Handling functions for tree files, as produced by Infomap.
The file should be a plain text file with the following format:
<cluster_id> <score> <paper_id>
1:1:1:1 0.000021 "123456"
1:1:1:2 0.023122 "8675309"
"""
def __init__(self, stream, delimiter=' ', comment='#'):
"""Initializes a TreeFile for reading.
Args:
source: An iterable providing a line of input for each iteration.
delimiter: Character tree file is delimited by.
comment: Lines starting with this character should be skipped
"""
self.delimiter = delimiter
self.stream = stream
self.comment = comment
def to_dict(self, on_collide="error", transform=None):
"""Converts a TreeFile to a dictionary. Consumes all of stream.
This might consume all available memory if the input stream is large.
Args:
on_collide: If a value already exists in the dictionary what should
happen. Options are:
error - raise an exception
warn - log a warning
info - log an info
transform: If provided a function that will be applied to the
values prior to storing them. This function should accept
a tuple of (cluster_id, score, paper_id):
("1:2:3:4", 0.12345, "A paper title"). If this function returns
None the paper will not be stored.
Returns:
Returns a dictionary using paper_id as the key and
(cluster_id, score, paper_id) as the value.
Raises:
KeyError: If on_collide="error" this signals a duplicate paper_id
in the tree file.
"""
results = dict()
for cid, score, pid in self:
if pid in results:
if on_collide == "error":
raise KeyError("Duplicate paper_id: {0}".format(pid))
elif on_collide == "warn":
logging.warning("Duplicate paper_id: {0}".format(pid))
elif on_collide == "info":
logging.info("Duplicate paper_id: {0}".format(pid))
if transform:
value = transform((cid, score, pid))
if value is not None:
results[pid] = value
else:
results[pid] = (cid, score)
return results
def __iter__(self):
self._iter = iter(self.stream)
return self
def __next__(self):
line = next(self._iter)
while self.comment and line.startswith(self.comment):
line = next(self._iter)
return self.parse_line(line)
def parse_line(self, line):
try:
v = line.split(self.delimiter)
v[2] = v[2].strip().strip('"')
return TreeRecord(v[0], v[2], v[1])
except ValueError:
print(line)
raise
except AttributeError:
print(line)
raise
except IndexError:
print(line)
raise
class TreeRecord(object):
__slots__ = ("pid", "local", "score", "parent")
def __init__(self, cluster, pid, score, delimiter=':'):
if not pid or pid == "":
raise ValueError("Invalid pid")
if score is None:
raise ValueError("Invalid score")
if cluster is None:
raise ValueError("Invalid cluster")
cluster = cluster.split(delimiter)
try:
cluster.pop() # Remove local order
self.local = delimiter.join(cluster)
if not self.local:
raise ValueError("Invalid cluster")
except IndexError:
self.local = None
try:
cluster.pop() # Remove local-cluster id
if len(cluster):
self.parent = delimiter.join(cluster)
else:
self.parent = None
except IndexError:
self.parent = None
score = float(score)
if score == 0:
score = -1.0 #Dynamo doesn't understand inf
# Strip whitespace and any quotes
self.pid = pid.strip().strip('"')
self.score = score
def __eq__(self, other):
if not isinstance(other, TreeRecord):
return False
return self.pid == other.pid and self.local == other.local and self.parent == other.parent
def __ne__(self, other):
return not self == other
def __str__(self):
return "<TreeRecord: %s %s %s>" % (self.local, self.pid, self.score)
def __repr__(self):
return "<TreeRecord: %s %s %s>" % (self.local, self.pid, self.score)
|
agpl-3.0
| 51,877,899,420,969,890
| 31.591837
| 98
| 0.540388
| false
| 4.444341
| false
| false
| false
|
evilchili/shiptrak
|
mmsn/settings/__init__.py
|
1
|
4072
|
"""
Django settings for mmsn project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
import socket
import dj_database_url
from django.utils.crypto import get_random_string
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd%ehat=&bb5pr+=unsxmpxq(57@1nx+okkyni3n9lk!a#pduq&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'shiptrak', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
#"django.core.context_processors.request",
#"django.contrib.auth.context_processors.auth",
],
},
},
]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shiptrak',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mmsn.urls'
WSGI_APPLICATION = 'mmsn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
import os
STATIC_ROOT = 'staticfiles'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles/')
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
LOGGING = {
'version': 1,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'default': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
}
GOOGLE_MAPS_API_KEY = ''
CACHE_DIR = os.path.abspath(os.path.join(BASE_DIR, 'callsign_data'))
WINLINK_API_URL = "http://cms.winlink.org/"
h = socket.gethostname()
try:
(h, domain) = h.split('.', 2)
print("from mmsn.settings.{0} import *".format(h))
exec(("from mmsn.settings.{0} import *".format(h)), locals())
print("Overriding production configuration with local settings for host {}".format(h))
except Exception as e:
SECRET_KEY = get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
ALLOWED_HOSTS = ['*']
CSRF_TRUSTED_ORIGINS = ['*']
SESSION_COOKIE_SECURE = False
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
DATABASES = {
'default': dj_database_url.config()
}
DEBUG = False
GOOGLE_MAPS_API_KEY = 'AIzaSyDHRIu1CdX0O95_bTdyyiom4Z84uzKG0bw'
GOOGLE_ANALYTICS_ID = 'UA-52163451-1'
|
mit
| -5,726,247,871,517,285,000
| 26.513514
| 100
| 0.658644
| false
| 3.343186
| false
| false
| false
|
igemsoftware/HFUT-China_2015
|
design/search_part.py
|
1
|
5122
|
"""
search_part.py realize the part search
@author: Bowen
"""
from elasticsearch import Elasticsearch
from design.models import parts, teams, team_parts, part_papers, paper
import traceback
def getPart(partName):
"""
find the part with part name
@param partName: name of a part
@type partName: str
@return : part information
@rtype: dict
"""
try:
partObj = parts.objects.get(part_name=partName)
papers = part_papers.objects.filter(part=partObj)
result = {
'isSuccessful': True,
'isEmpty': False,
'part_id': partObj.part_id,
'ok': partObj.ok,
'part_name': partObj.part_name,
'nickname' : partObj.nickname,
'short_desc': partObj.short_desc,
'description': partObj.description,
'part_type': partObj.part_type,
'author': partObj.author,
'status': partObj.status,
'dominant': partObj.dominant,
'discontinued': partObj.discontinued,
'part_status': partObj.part_status,
'sample_status': partObj.sample_status,
'p_status_cache': partObj.p_status_cache,
's_status_cache': partObj.s_status_cache,
'in_stock': partObj.in_stock,
'results': partObj.results,
'favorite': partObj.favorite,
'ps_string': partObj.ps_string,
'scars' : partObj.scars,
'barcode' : partObj.barcode,
'notes' : partObj.notes,
'source' : partObj.source,
'premium' : partObj.premium,
'categories' : partObj.categories,
'sequence' : partObj.sequence,
'sequence_length' : partObj.sequence_length,
'part_url' : partObj.part_url,
'score' : str(partObj.score)
}
paper_list = list()
for paper in papers:
paper_info = {
'name': paper.paper.paper_name,
'url' : paper.paper.paper_url
}
paper_list.append(paper_info)
result['paper'] = paper_list
except:
traceback.print_exc()
result = {
'isSuccessful': False
}
return result
def ambiguousSearch(keyword, funcs):
"""
ambiguous search parts with the keyword, and adjust result with the functions
@param keyword: search keyword
@type keyword: str
@param funcs: functions
@type: str
@return: search result
@rtype: list
"""
es = Elasticsearch()
result = format_fuzzy_result(sort_result(fuzzy_search_parts(es, keyword), funcs))
return result
def fuzzy_search_parts(es, keyword):
"""
fuzzy search part with elasticsearch
@param es: elasticsearch object
@type es: Elasticsearch
@param keyword: search keyword
@type keyword: str
@return: elasticsearch search result
@rtype: dict
"""
query_body = {
"from" : 0,
"size" : 80,
"query" : {
"fuzzy_like_this" : {
"fields" : ["part_name", "part_type", "short_desc"],
"like_text" : keyword,
"max_query_terms" : 80
}
}
}
result = es.search(index="biodesigners", doc_type="parts", body=query_body)
return result
def get_func_parts(func_list):
"""
get parts related to functions
@param func_list: functions
@type func_list: list
@return : parts related to functions
@rtype: list
"""
part_list = list()
for func_id in func_list:
team_list = teams.objects.filter(function_id=func_id)
for team_obj in team_list:
part_list.extend(team_parts.objects.filter(team=team_obj))
result = list()
for part_obj in part_list:
result.append(part_obj.part_id)
return result
def sort_result(es_result, funcs):
"""
sort result according to the functions
@param funcs: functions
@type funcs : list
@return : sorted result
@rtype: list
"""
if funcs == None:
func_parts = list()
else:
if funcs.endswith('_'):
funcs = funcs[:-1]
if funcs.startswith('_'):
funcs = funcs[1:]
func_parts = get_func_parts(funcs.split('_'))
hits = es_result['hits']['hits']
for item in hits:
if item['_source']['part_id'] in func_parts:
item['_score'] += 1.5
hits = sorted(hits, key = lambda x:x['_score'], reverse = True)
return hits[:40]
def format_fuzzy_result(hits):
"""
format search result
@param hits: searched parts
@type hists: list
@return part informaions
@rtype: list
"""
part_list = list()
for item in hits:
info = item['_source']
part_info = {
'part_name' : info['part_name'],
'part_id' : info['part_id'],
'part_type' : info['part_type'],
}
part_list.append(part_info)
return part_list
|
apache-2.0
| 1,825,477,763,711,557,600
| 28.606936
| 85
| 0.545881
| false
| 3.889142
| false
| false
| false
|
ruipgil/TrackToTrip
|
scripts/test/test_t_mode_changepoint.py
|
1
|
2236
|
from tracktotrip import Track
import tracktotrip.transportation_mode as tm
from changepy import pelt
from changepy.costs import normal_mean
import numpy as np
import matplotlib.pyplot as plt
temp_trk = [
Track.from_gpx('/Users/ruipgil/tracks/backup/2015-07-23_1.gpx')[0],
Track.from_gpx('/Users/ruipgil/tracks/backup/2015-07-23_2.gpx')[0],
Track.from_gpx('/Users/ruipgil/tracks/backup/2015-07-23_3.gpx')[0]
]
segs = []
for trke in temp_trk:
segs.extend(trke.segments)
trk = Track("", segs)
trk.compute_metrics()
trk.to_trip('', 0, 5.0, 0.15, 80, 0.3, '%Y-%m-%d')
def raw_vel(seg):
return [p.vel for p in seg.points]
def raw_acc(seg):
return [p.acc for p in seg.points]
def abs_vel(seg):
return [abs(p.vel) for p in seg.points]
def square_vel(seg):
return [p.vel**2 for p in seg.points]
def diff_vel(seg):
result = []
last = None
for p in seg.points:
if last is None:
result.append(0)
else:
result.append(last.vel-p.vel)
last = p
return result
def abs_diff_vel(seg):
return [abs(v) for v in diff_vel(seg)]
def square_diff_vel(seg):
return [v**3 for v in diff_vel(seg)]
def compute_metric(metric):
return [metric(seg) for seg in trk.segments]
colors = 'rgby'
def plot(ax, data, changepoints):
index = 0
for i, seg_data in enumerate(data):
ax.plot(range(index, len(seg_data) + index), seg_data, '-')
for changepoint in changepoints[i]:
ax.axvline(changepoint + index, color='k', linestyle='--')
index = index + len(seg_data)
def pelt_(data):
return pelt(normal_mean(data, np.std(data)), len(data))
plot_n = 1
plot_cols = 2
plot_rows = 3
def changepoint_for(metric):
global plot_n
ax = fig.add_subplot(plot_rows, plot_cols, plot_n)
data = compute_metric(metric)
changepoints = [pelt_(d) for d in data]
ax.set_title("%s (%d changepoints)" % (metric.__name__, sum([len(c) for c in changepoints])))
plot(ax, data, changepoints)
plot_n = plot_n + 1
fig = plt.figure()
changepoint_for(raw_vel)
changepoint_for(abs_vel)
changepoint_for(square_vel)
changepoint_for(diff_vel)
changepoint_for(square_diff_vel)
changepoint_for(raw_acc)
plt.show()
|
mit
| -9,132,506,782,880,904,000
| 24.123596
| 97
| 0.647138
| false
| 2.833967
| false
| false
| false
|
pybel/pybel-tools
|
src/pybel_tools/analysis/neurommsig/export.py
|
1
|
8861
|
# -*- coding: utf-8 -*-
"""This module contains the functions needed to process the NeuroMMSig excel sheets as well as export as BEL.
To run, type :code:`python3 -m pybel_tools.analysis.neurommsig` in the command line
"""
import itertools as itt
import logging
import os
import re
import time
from functools import partial
from typing import Mapping, TextIO
import pandas as pd
import pybel
from bel_resources import get_bel_resource
from pybel import BELGraph
from pybel.dsl import Abundance, Gene
from pybel.utils import ensure_quotes
logger = logging.getLogger(__name__)
hgnc_symbol_pattern = re.compile(r"^[A-Z0-9-]+$|^C[0-9XY]+orf[0-9]+$")
snp_pattern = re.compile(r"^rs[0-9]+$")
snps_pattern_space = re.compile(r"^(rs[0-9]+)\s((rs[0-9]+)\s)*(rs[0-9]+)$")
snps_pattern_comma = re.compile(r"^(rs[0-9]+),((rs[0-9]+),)*(rs[0-9]+)$")
snps_pattern_space_comma = re.compile(r"^(rs[0-9]+), ((rs[0-9]+), )*(rs[0-9]+)$")
checked_by_anandhi = re.compile(r"No")
mirna_pattern = re.compile(r"^MIR.*$")
mirnas_pattern = re.compile(r"^(MIR.*),((MIR.*$),)*(MIR.*$)$")
def preprocessing_excel(path: str) -> pd.DataFrame:
"""Preprocess the excel sheet.
:param path: filepath of the excel data
:return: df: pandas dataframe with excel data
"""
if not os.path.exists(path):
raise ValueError("Error: %s file not found" % path)
# Import Models from Excel sheet, independent for AD and PD
df = pd.read_excel(path, sheet_name=0, header=0)
# Indexes and column name
# [log.info(str(x)+': '+str((df.columns.values[x]))) for x in range (0,len(df.columns.values))]
# Starting from 4: Pathway Name
# Fill Pathway cells that are merged and are 'NaN' after deleting rows where there is no genes
for column_idx in (0, 1): # identifiers column then names columns
df.iloc[:, column_idx] = pd.Series(df.iloc[:, column_idx]).fillna(method='ffill')
# Number of gaps
# log.info(df.ix[:,6].isnull().sum())
df = df[df.iloc[:, 1].notnull()]
df = df.reset_index(drop=True)
# Fill NaN to zeros in PubMed identifier column
df.iloc[:, 2].fillna(0, inplace=True)
# Number of gaps in the gene column should be already zero
if (df.iloc[:, 1].isnull().sum()) != 0:
raise ValueError("Error: Empty cells in the gene column")
# Check current state
# df.to_csv('out.csv')
return df
def munge_cell(cell, line=None, validators=None):
"""Process a cell from the NeuroMMSig excel sheet."""
if pd.isnull(cell) or isinstance(cell, int):
return None
c = ' '.join(cell.split())
if validators is not None and all(re.match(validator, c) is None for validator in validators):
if line:
logger.info("Munge cell error: aprox in line: %s: %s", line, c)
return None
return [x.strip() for x in str(c).strip().split(',')]
def preprocessing_br_projection_excel(path: str) -> pd.DataFrame:
"""Preprocess the excel file."""
if not os.path.exists(path):
raise ValueError(f"Error: {path} file not found")
return pd.read_excel(path, sheetname=0, header=0)
munge_snp = partial(munge_cell, validators=[snp_pattern, snps_pattern_space_comma])
mesh_alzheimer = "Alzheimer Disease" # Death to the eponym!
mesh_parkinson = "Parkinson Disease"
CANNED_EVIDENCE = 'Serialized from NeuroMMSigDB'
CANNED_CITATION = '28651363'
PATHWAY_ID_COLUMN_NAME = 'NeuroMMSig identifier'
PATHWAY_COLUMN_NAME = 'Subgraph Name'
GENE_COLUMN_NAME = 'Genes'
pmids_column = 'PMIDs'
snp_from_literature_column = 'SNPs from Literature (Aybuge)'
snp_from_gwas_column = 'Genome wide associated SNPs (Mufassra)'
snp_from_ld_block_column = 'LD block analysis (Mufassra)'
clinical_features_column = 'Imaging Features (Anandhi)'
snp_from_imaging_column = 'SNP_Image Feature (Mufassra & Anandhi)'
columns = [
GENE_COLUMN_NAME,
pmids_column,
snp_from_literature_column,
snp_from_gwas_column,
snp_from_ld_block_column,
clinical_features_column,
snp_from_imaging_column,
]
def preprocess(path: str) -> pd.DataFrame:
"""Preprocess a NeuroMMSig excel sheet, specified by a file path."""
df = preprocessing_excel(path)
df[snp_from_literature_column] = df[snp_from_literature_column].map(munge_snp)
df[snp_from_gwas_column] = df[snp_from_gwas_column].map(munge_snp)
df[snp_from_ld_block_column] = df[snp_from_ld_block_column].map(munge_snp)
df[clinical_features_column] = df[clinical_features_column].map(munge_cell)
df[clinical_features_column] = df[clinical_features_column].map(
lambda c: None
if c is not None and c[0] == 'No' else
c
)
df[snp_from_imaging_column] = df[snp_from_imaging_column].map(munge_snp)
return df
def get_nift_values() -> Mapping[str, str]:
"""Map NIFT names that have been normalized to the original names."""
r = get_bel_resource('https://arty.scai.fraunhofer.de/artifactory/bel/namespace/nift/NIFT.belns')
return {
name.lower(): name
for name in r['Values']
}
def write_neurommsig_bel(
file: TextIO,
df: pd.DataFrame,
disease: str,
nift_values: Mapping[str, str],
) -> None:
"""Write the NeuroMMSigDB excel sheet to BEL.
:param file: a file or file-like that can be writen to
:param df:
:param disease:
:param nift_values: a dictionary of lower-cased to normal names in NIFT
"""
graph = get_neurommsig_bel(df, disease, nift_values)
pybel.to_bel_script(graph, file)
def get_neurommsig_bel(
df: pd.DataFrame,
disease: str,
nift_values: Mapping[str, str],
) -> BELGraph:
"""Generate the NeuroMMSig BEL graph.
:param df:
:param disease:
:param nift_values: a dictionary of lower-cased to normal names in NIFT
"""
missing_features = set()
fixed_caps = set()
nift_value_originals = set(nift_values.values())
graph = BELGraph(
name=f'NeuroMMSigDB for {disease}',
description=f'SNP and Clinical Features for Subgraphs in {disease}',
authors='Daniel Domingo-Fernández, Charles Tapley Hoyt, Mufassra Naz, Aybuge Altay, Anandhi Iyappan',
contact='daniel.domingo.fernandez@scai.fraunhofer.de',
version=time.strftime('%Y%m%d'),
)
for pathway, pathway_df in df.groupby(PATHWAY_COLUMN_NAME):
sorted_pathway_df = pathway_df.sort_values(GENE_COLUMN_NAME)
sliced_df = sorted_pathway_df[columns].itertuples()
for _, gene, pubmeds, lit_snps, gwas_snps, ld_block_snps, clinical_features, clinical_snps in sliced_df:
gene = ensure_quotes(gene)
for snp in itt.chain(lit_snps or [], gwas_snps or [], ld_block_snps or [], clinical_snps or []):
if not snp.strip():
continue
graph.add_association(
Gene('HGNC', gene),
Gene('DBSNP', snp),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
for clinical_feature in clinical_features or []:
if not clinical_feature.strip():
continue
if clinical_feature.lower() not in nift_values:
missing_features.add(clinical_feature)
continue
if clinical_feature not in nift_value_originals:
fixed_caps.add((clinical_feature, nift_values[clinical_feature.lower()]))
clinical_feature = nift_values[clinical_feature.lower()] # fix capitalization
graph.add_association(
Gene('HGNC', gene),
Abundance('NIFT', clinical_feature),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
if clinical_snps:
for clinical_snp in clinical_snps:
graph.add_association(
Gene('DBSNP', clinical_snp),
Abundance('NIFT', clinical_feature),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
if missing_features:
logger.warning('Missing Features in %s', disease)
for feature in missing_features:
logger.warning(feature)
if fixed_caps:
logger.warning('Fixed capitalization')
for broken, fixed in fixed_caps:
logger.warning('%s -> %s', broken, fixed)
return graph
|
mit
| 6,785,736,960,564,687,000
| 33.341085
| 112
| 0.611174
| false
| 3.241859
| false
| false
| false
|
ilya-epifanov/ansible
|
lib/ansible/cli/doc.py
|
1
|
11138
|
# (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
import fcntl
import datetime
import os
import struct
import termios
import traceback
import textwrap
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader
from ansible.cli import CLI
from ansible.utils import module_docs
class DocCLI(CLI):
""" Vault command line class """
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
def __init__(self, args, display=None):
super(DocCLI, self).__init__(args, display)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
filename = module_loader.find_plugin(module)
if filename is None:
self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
except:
self.display.vvv(traceback.print_exc())
self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
all_keys = []
for (k,v) in doc['options'].iteritems():
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += DocCLI.get_snippet_text(doc)
else:
text += DocCLI.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception, e:
self.display.vvv(traceback.print_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
self.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in self.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in self.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
tty_size = 0
if os.isatty(0):
tty_size = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
columns = max(60, tty_size)
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
@staticmethod
def get_snippet_text(doc):
text = []
desc = CLI.tty_ify(" ".join(doc['short_description']))
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
if opt.get('required', False):
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, desc))
text.append('')
return "\n".join(text)
@staticmethod
def get_man_text(doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
desc = " ".join(doc['description'])
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
if opt.get('required', False):
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
desc = " ".join(opt['description'])
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt:
default = str(opt['default'])
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=opt_indent,
subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), initial_indent=" ",
subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), initial_indent=" ",
subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
|
gpl-3.0
| -8,255,860,300,840,190,000
| 35.638158
| 158
| 0.532412
| false
| 4.280553
| false
| false
| false
|
linktlh/Toontown-journey
|
otp/uberdog/GlobalOtpObjectUD.py
|
1
|
1956
|
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
MANAGER_CLASS = ''
class GlobalOtpObjectUD(DistributedObjectGlobalUD):
notify = directNotify.newCategory('GlobalOtpObjectUD')
def announceGenerate(self):
DistributedObjectGlobalUD.announceGenerate(self)
self.senders2Mgrs = {}
def __makeAvMsg(self, field, values, recipient):
return self.air.dclassesByName['DistributedToonUD'].getFieldByName(field).aiFormatUpdate(
recipient, recipient, simbase.air.ourChannel, values)
def sendToAvatar(self, avId, field, values):
dg = self.__makeAvMsg(field, values, avId)
self.air.send(dg)
def __makeAIMsg(self, field, values, recipient):
return self.air.dclassesByName[MANAGER_CLASS].getFieldByName(field).aiFormatUpdate(
recipient, recipient, simbase.air.ourChannel, values)
def sendToAI(self, field, values, sender=None):
if not MANAGER_CLASS:
self.notify.warning('A AI manager class is not implemented!')
return
if not sender:
sender = self.air.getAvatarIdFromSender()
dg = self.__makeAIMsg(field, values, self.senders2Mgrs.get(sender, sender + 8))
self.air.send(dg)
def hello(self, channel):
if not MANAGER_CLASS:
self.notify.warning('A AI manager class is not implemented!')
return
self.senders2Mgrs[simbase.air.getAvatarIdFromSender()] = channel
# Manager classes must implement their own response to hello's
self.sendToAI('UDResponse', [])
self.air.addPostRemove(self.__makeAIMsg('UDLost', [], channel))
def heartbeat(self, channel):
if simbase.air.getAvatarIdFromSender() not in self.senders2Mgrs:
self.senders2Mgrs[simbase.air.getAvatarIdFromSender()] = channel
self.sendUpdateToChannel(simbase.air.getAvatarIdFromSender(), 'heartbeatResponse', [])
|
apache-2.0
| -1,745,958,492,094,661,600
| 37.352941
| 97
| 0.685072
| false
| 3.761538
| false
| false
| false
|
xiangke/pycopia
|
mibs/pycopia/mibs/UCD_SNMP_MIB.py
|
1
|
28770
|
# python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_SMI import OBJECT_TYPE, NOTIFICATION_TYPE, MODULE_IDENTITY, Integer32, Opaque, enterprises, Counter32
from SNMPv2_TC import TEXTUAL_CONVENTION, DisplayString, TruthValue
class UCD_SNMP_MIB(ModuleObject):
path = '/usr/share/snmp/mibs/site/UCD-SNMP-MIB'
conformance = 3
name = 'UCD-SNMP-MIB'
language = 2
description = 'Deprecate the non-raw objects.'
# nodes
class ucdavis(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021])
name = 'ucdavis'
class memory(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4])
name = 'memory'
class systemStats(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11])
name = 'systemStats'
class ucdInternal(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 12])
name = 'ucdInternal'
class ucdExperimental(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 13])
name = 'ucdExperimental'
class logMatch(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16])
name = 'logMatch'
class version(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100])
name = 'version'
class snmperrs(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101])
name = 'snmperrs'
class ucdSnmpAgent(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250])
name = 'ucdSnmpAgent'
class hpux9(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 1])
name = 'hpux9'
class sunos4(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 2])
name = 'sunos4'
class solaris(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 3])
name = 'solaris'
class osf(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 4])
name = 'osf'
class ultrix(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 5])
name = 'ultrix'
class hpux10(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 6])
name = 'hpux10'
class netbsd1(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 7])
name = 'netbsd1'
class freebsd(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 8])
name = 'freebsd'
class irix(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 9])
name = 'irix'
class linux(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 10])
name = 'linux'
class bsdi(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 11])
name = 'bsdi'
class openbsd(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 12])
name = 'openbsd'
class win32(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 13])
name = 'win32'
class hpux11(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 14])
name = 'hpux11'
class unknown(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 255])
name = 'unknown'
class ucdTraps(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 251])
name = 'ucdTraps'
# macros
# types
class Float(pycopia.SMI.Basetypes.Opaque):
status = 1
ranges = Ranges(Range(7, 7))
# scalars
class memIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memErrorName(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class memTotalSwap(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailSwap(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalReal(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailReal(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalSwapTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 7])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailSwapTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalRealTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 9])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailRealTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalFree(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memMinimumSwap(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 12])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memShared(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 13])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memBuffer(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 14])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memCached(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 15])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memSwapError(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memSwapErrorMsg(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class ssIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssErrorName(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class ssSwapIn(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssSwapOut(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssIOSent(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssIOReceive(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssSysInterrupts(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 7])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssSysContext(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuUser(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 9])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuSystem(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuIdle(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuRawUser(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 50])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawNice(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 51])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawSystem(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 52])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawIdle(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 53])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawWait(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 54])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawKernel(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 55])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawInterrupt(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 56])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssIORawSent(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 57])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssIORawReceived(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 58])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssRawInterrupts(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 59])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssRawContexts(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 60])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchMaxEntries(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionTag(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionDate(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionCDate(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 4])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionIdent(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 5])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionConfigureOptions(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 6])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionClearCache(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionUpdateConfig(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionRestartAgent(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 12])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionSavePersistentData(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 13])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionDoDebugging(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 20])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class snmperrIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class snmperrNames(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class snmperrErrorFlag(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class snmperrErrMessage(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
# columns
class prIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prNames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class prMin(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prMax(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prErrMessage(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class prErrFix(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 102])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prErrFixCmd(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 103])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class extNames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extCommand(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extResult(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class extOutput(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extErrFix(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 102])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class extErrFixCmd(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 103])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class dskIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskPath(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class dskDevice(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class dskMinimum(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskMinPercent(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskTotal(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskAvail(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 7])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskUsed(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskPercent(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 9])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskPercentNode(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskErrorMsg(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class laNames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laLoad(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laConfig(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laLoadInt(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class laLoadFloat(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 6])
syntaxobject = Float
class laErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class laErrMessage(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class fileIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class fileName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class fileSize(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 4
units = 'kB'
class fileMax(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 4
units = 'kB'
class fileErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.TruthValue
class fileErrorMsg(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchFilename(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchRegEx(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchGlobalCounter(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchGlobalCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchCurrentCounter(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 7])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchCurrentCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchCounter(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 9])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchCycle(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.TruthValue
class logMatchRegExCompilation(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class mrIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 102, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.ObjectIdentifier
class mrModuleName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 102, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
# rows
class prEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([prIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1])
access = 2
columns = {'prIndex': prIndex, 'prNames': prNames, 'prMin': prMin, 'prMax': prMax, 'prCount': prCount, 'prErrorFlag': prErrorFlag, 'prErrMessage': prErrMessage, 'prErrFix': prErrFix, 'prErrFixCmd': prErrFixCmd}
class extEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([extIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1])
access = 2
columns = {'extIndex': extIndex, 'extNames': extNames, 'extCommand': extCommand, 'extResult': extResult, 'extOutput': extOutput, 'extErrFix': extErrFix, 'extErrFixCmd': extErrFixCmd}
class dskEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([dskIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1])
access = 2
columns = {'dskIndex': dskIndex, 'dskPath': dskPath, 'dskDevice': dskDevice, 'dskMinimum': dskMinimum, 'dskMinPercent': dskMinPercent, 'dskTotal': dskTotal, 'dskAvail': dskAvail, 'dskUsed': dskUsed, 'dskPercent': dskPercent, 'dskPercentNode': dskPercentNode, 'dskErrorFlag': dskErrorFlag, 'dskErrorMsg': dskErrorMsg}
class laEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([laIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1])
access = 2
columns = {'laIndex': laIndex, 'laNames': laNames, 'laLoad': laLoad, 'laConfig': laConfig, 'laLoadInt': laLoadInt, 'laLoadFloat': laLoadFloat, 'laErrorFlag': laErrorFlag, 'laErrMessage': laErrMessage}
class fileEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([fileIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1])
access = 2
columns = {'fileIndex': fileIndex, 'fileName': fileName, 'fileSize': fileSize, 'fileMax': fileMax, 'fileErrorFlag': fileErrorFlag, 'fileErrorMsg': fileErrorMsg}
class logMatchEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([logMatchIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1])
access = 2
columns = {'logMatchIndex': logMatchIndex, 'logMatchName': logMatchName, 'logMatchFilename': logMatchFilename, 'logMatchRegEx': logMatchRegEx, 'logMatchGlobalCounter': logMatchGlobalCounter, 'logMatchGlobalCount': logMatchGlobalCount, 'logMatchCurrentCounter': logMatchCurrentCounter, 'logMatchCurrentCount': logMatchCurrentCount, 'logMatchCounter': logMatchCounter, 'logMatchCount': logMatchCount, 'logMatchCycle': logMatchCycle, 'logMatchErrorFlag': logMatchErrorFlag, 'logMatchRegExCompilation': logMatchRegExCompilation}
class mrEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([mrIndex], True)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 102, 1])
access = 2
columns = {'mrIndex': mrIndex, 'mrModuleName': mrModuleName}
# notifications (traps)
class ucdStart(NotificationObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 251, 1])
class ucdShutdown(NotificationObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 251, 2])
# groups
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
|
lgpl-2.1
| 7,706,995,773,191,167,000
| 27.97281
| 525
| 0.714599
| false
| 2.569668
| false
| false
| false
|
CFDEMproject/LAMMPS
|
tools/moltemplate/src/ltemplify.py
|
1
|
94070
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
"""
ltemplify.py
The "ltemplify.py" script can be used to convert existing LAMMPS
input script and data files into a single .ttree file
(which includes both topology and force-field information
for a single molecule in your system).
Example:
ltemplify.py -name Mol file.in file.data > mol.ttree
This creates a template for a new type of molecule (named "Mol"),
consisting of all the atoms in the lammps files you included,
and saves this data in a single ttree file ("mol.ttree").
This file can be used with moltemplate/ttree to
define large systems containing this molecule.
"""
import sys
from ttree_lex import *
from lttree_styles import *
def Intify(s):
if s.isdigit():
return int(s)
elif s[0:2] == 'id':
return int(s[2:])
elif s[0:4] == 'type':
return int(s[4:])
else:
return s
def StringToInterval(sel_str, slice_delim='*'):
i_slice = sel_str.find(slice_delim)
if i_slice == -1:
if sel_str.isdigit():
a = int(sel_str)
b = int(sel_str)
else:
a = sel_str
b = sel_str
else:
a = sel_str[:i_slice]
b = sel_str[i_slice+len(slice_delim):]
if (((len(a)>0) and (not a.isdigit())) or
((len(b)>0) and (not b.isdigit()))):
raise InputError('Error: invalid selection string \"'+
sel_str+'\"\n')
if (len(a) > 0):
a = int(a)
else:
a = None
if (len(b) > 0):
b = int(b)
else:
b = None
return a,b
# Selections are simply lists of 2-tuples (pairs)
def LammpsSelectToIntervals(sel_str, slice_delim='*', or_delim=', '):
"""
This function converts a string such as "1*4 6 9*12" into
a list of tuples, for example: [(1,4), (6,6), (9,12)]
In general, the of intervals has the form:
[(a1,b1), (a2,b2), (a3,b3), ... ]
An atom is considered to belong to this selection
if it happens to lie within the closed interval [a,b]
for any pair of a,b values in the list of intervals.
If for a given pair a,b, either a or b is "None", then that a or b
value is not used to disqualify membership in the interval.
(Similar to -infinity or +infinity. In other words if a is set to None,
then to belong to the interval it is enough to be less than b.)
"""
selection_list = []
#tokens = sel_str.split(or_delim) <-- Not what we want when len(or_delim)>1
tokens = LineLex.TextBlock2Lines(sel_str, or_delim, keep_delim=False)
for token in tokens:
token = token.strip()
(a,b) = StringToInterval(token, slice_delim)
selection_list.append((a, b))
return selection_list
def IntervalListToMinMax(interval_list):
min_a = None
max_b = None
for (a,b) in interval_list:
if ((not (type(a) is int)) or (not (type(b) is int))):
return None,None #only integer min/max makes sense. otherwise skip
if (min_a == None) or (a < min_a):
min_a = a
if (max_b == None) or (b > max_b):
max_b = b
return min_a, max_b
def BelongsToSel(i, sel):
if (i == None) or (sel == None) or (len(sel) == 0):
# If the user has not specified a selection for this category,
# then by default all objects are accepted
return True
elif (type(i) is str):
if i.isdigit():
i = int(i)
else:
return True
belongs = False
for interval in sel:
assert(len(interval) == 2)
if interval[0]:
if i >= interval[0]:
if (interval[1] == None) or (i <= interval[1]):
belongs = True
break
elif interval[1]:
if i <= interval[1]:
belongs = True
break
else:
# In that case, the user entered something like "*"
# which covers all possible numbers
belongs = True
break
return belongs
try:
g_program_name = __file__.split('/')[-1] # = 'lemplify.py'
g_version_str = '0.3'
g_date_str = '2012-12-11'
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+'\n')
non_empty_output = False
no_warnings = True
indent = 2
cindent = 0
atomid_selection = []
atomtype_selection = []
molid_selection = []
mol_name = ''
min_sel_atomid = None
min_sel_atomtype = None
min_sel_bondid = None
min_sel_bondtype = None
min_sel_angleid = None
min_sel_angletype = None
min_sel_dihedralid = None
min_sel_dihedraltype = None
min_sel_improperid = None
min_sel_impropertype = None
max_sel_atomid = None
max_sel_atomtype = None
max_sel_bondid = None
max_sel_bondtype = None
max_sel_angleid = None
max_sel_angletype = None
max_sel_dihedralid = None
max_sel_dihedraltype = None
max_sel_improperid = None
max_sel_impropertype = None
needed_atomids = set([])
needed_atomtypes = set([])
needed_bondids = set([])
needed_bondtypes = set([])
needed_angleids = set([])
needed_angletypes = set([])
needed_dihedralids = set([])
needed_dihedraltypes = set([])
needed_improperids = set([])
needed_impropertypes = set([])
min_needed_atomtype = None
max_needed_atomtype = None
min_needed_bondtype = None
max_needed_bondtype = None
min_needed_angletype = None
max_needed_angletype = None
min_needed_dihedraltype = None
max_needed_dihedraltype = None
# To process the selections, we need to know the atom style:
atom_style_undefined = True
i_atomid = None
i_atomtype = None
i_molid = None
l_in_init = []
l_in_settings = []
l_in_masses = []
l_in_pair_coeffs = []
l_in_bond_coeffs = []
l_in_angle_coeffs = []
l_in_dihedral_coeffs = []
l_in_improper_coeffs = []
l_data_masses = []
l_data_bond_coeffs = []
l_data_angle_coeffs = []
l_data_dihedral_coeffs = []
l_data_improper_coeffs = []
l_data_pair_coeffs = []
l_data_atoms = []
l_data_velocities = []
l_data_bonds = []
l_data_angles = []
l_data_dihedrals = []
l_data_impropers = []
# class2 force fields
l_data_bondbond_coeffs = []
l_data_bondangle_coeffs = []
l_data_middlebondtorsion_coeffs = []
l_data_endbondtorsion_coeffs = []
l_data_angletorsion_coeffs = []
l_data_angleangletorsion_coeffs = []
l_data_bondbond13_coeffs = []
l_data_angleangle_coeffs = []
# non-point-like particles:
l_data_ellipsoids = []
l_data_lines = []
l_data_triangles = []
# automatic generation of bonded interactions by type:
l_data_angles_by_type = []
l_data_dihedrals_by_type = []
l_data_impropers_by_type = []
atoms_already_read = False
some_pair_coeffs_read = False
complained_atom_style_mismatch = False
argv = sys.argv
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-columns':
if i+1 >= len(argv):
raise InputError('Error: the \"'+argv[i]+'\" argument should be followed by a quoted\n'
' string which contains a space-delimited list of the names of\n'
' of columns in the \"Atoms\" section of the LAMMPS data file.\n'
' If the list contains the symbols:\n'
' \"atom-ID\" or \"atomid\", they are interpreted\n'
' as unique atom ID numbers, and columns named\n'
' \"atom-type\" or \"atomtype\" are interpreted\n'
' as atom types. Finally, columns named\n'
' \"molecule-ID\", \"molecule\", or \"mol-ID\", or \"mol\"\n'
' are interpreted as unique molecule id numbers.\n'
'Example:\n'
' '+argv[i]+' \'atom-ID atom-type q polarizability molecule-ID x y z\'\n'
' defines a custom atom_style containing the properties\n'
' atom-ID atom-type q polarizability molecule-ID x y z\n'
' Make sure you enclose the entire list in quotes.\n');
column_names = argv[i+1].strip('\"\'').strip().split()
del(argv[i:i+2])
elif ((argv[i] == '-name') or
(argv[i] == '-molname') or
(argv[i] == '-molecule-name') or
(argv[i] == '-molecule_name')):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a a molecule type name.\n')
cindent = 2
indent += cindent
mol_name = argv[i+1]
del(argv[i:i+2])
elif ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom_style') or
(argv[i].lower() == '-atom-style')):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a an atom_style name.\n'
' (or single quoted string which includes a space-separated\n'
' list of column names).\n')
atom_style_undefined = False
column_names = AtomStyle2ColNames(argv[i+1])
if (argv[i+1].strip().split()[0] in g_style_map):
l_in_init.append((' '*indent) + 'atom_style ' + argv[i+1] + '\n')
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' '+(' '.join(column_names))+'\n')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
if i_molid:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+', i_molid='+str(i_molid+1)+')\n\n')
else:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+')\n')
del(argv[i:i+2])
elif ((argv[i].lower() == '-id') or
#(argv[i].lower() == '-a') or
#(argv[i].lower() == '-atoms') or
(argv[i].lower() == '-atomid') or
#(argv[i].lower() == '-atomids') or
(argv[i].lower() == '-atom-id')
#(argv[i].lower() == '-atom-ids') or
#(argv[i].lower() == '-$atom') or
#(argv[i].lower() == '-$atoms')
):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a list of integers\n'
' (or strings). These identify the group of atoms you want to\n'
' to include in the template you are creating.\n')
atomid_selection += LammpsSelectToIntervals(argv[i+1])
min_sel_atomid, max_sel_atomid = IntervalListToMinMax(atomid_selection)
del(argv[i:i+2])
elif ((argv[i].lower() == '-type') or
#(argv[i].lower() == '-t') or
(argv[i].lower() == '-atomtype') or
(argv[i].lower() == '-atom-type')
#(argv[i].lower() == '-atomtypes') or
#(argv[i].lower() == '-atom-types') or
#(argv[i].lower() == '-@atom') or
#(argv[i].lower() == '-@atoms') or
#(argv[i].lower() == '-@atomtype') or
#(argv[i].lower() == '-@atomtypes')
):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of atom types you want to\n'
' to include in the template you are creating.\n')
atomtype_selection += LammpsSelectToIntervals(argv[i+1])
min_sel_atomtype, max_sel_atomtype = IntervalListToMinMax(atomtype_selection)
del(argv[i:i+2])
elif ((argv[i].lower() == '-mol') or
#(argv[i].lower() == '-m') or
(argv[i].lower() == '-molid') or
#(argv[i].lower() == '-molids') or
(argv[i].lower() == '-mol-id') or
#(argv[i].lower() == '-mol-ids') or
#(argv[i].lower() == '-molecule') or
(argv[i].lower() == '-moleculeid') or
(argv[i].lower() == '-molecule-id')
#(argv[i].lower() == '-molecules') or
#(argv[i].lower() == '-molecule-ids') or
#(argv[i].lower() == '-$mol') or
#(argv[i].lower() == '-$molecule')
):
if i+1 >= len(argv):
sys.stderr.write('Error: '+argv[i]+' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of molecules you want to\n'
' include in the template you are creating.\n')
molid_selection += LammpsSelectToIntervals(argv[i+1])
del(argv[i:i+2])
else:
i += 1
if atom_style_undefined:
# The default atom_style is "full"
column_names = AtomStyle2ColNames('full')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
#---------------------------------------------------------
#-- The remaining arguments are files that the user wants
#-- us to read and convert. It is typical to have
#-- multiple input files, because LAMMPS users often
#-- store their force field parameters in either the LAMMPS
#-- data files and input script files, or both.
#-- We want to search all of the LAMMPS input files in
#-- order to make sure we extracted all the force field
#-- parameters (coeff commands).
#---------------------------------------------------------
for i_arg in range(1,len(argv)):
fname = argv[i_arg]
try:
lammps_file = open(fname, 'r')
except IOError:
raise InputError('Error: unrecognized argument (\"'+fname+'\"),\n'
' OR unable to open file:\n'
'\n'
' \"'+fname+'\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name,\n'
' then there is a problem in your argument list.)\n')
sys.stderr.write('reading file \"'+fname+'\"\n')
atomid2type = {}
atomid2mol = {}
data_file_header_names = set(['LAMMPS Description',
'Atoms', 'Masses', 'Velocities', 'Bonds',
'Angles', 'Dihedrals', 'Impropers',
'Pair Coeffs',
'Bond Coeffs', 'Angle Coeffs',
'Dihedral Coeffs', 'Improper Coeffs',
#class2 force fields:
'BondBond Coeffs', 'BondAngle Coeffs',
'MiddleBondTorsion Coeffs', 'EndBondTorsion Coeffs',
'AngleTorsion Coeffs', 'AngleAngleTorsion Coeffs',
'BondBond13 Coeffs',
'AngleAngle Coeffs',
# non-point-like particles:
'Ellipsoids', 'Triangles', 'Lines',
#specifying bonded interactions by type:
'Angles By Type', 'Dihedrals By Type', 'Impropers By Type'
])
lex=LineLex(lammps_file, fname)
lex.source_triggers = set(['include','import'])
# set up lex to accept most characters in file names:
lex.wordterminators = '(){}' + lex.whitespace
# set up lex to understand the "include" statement:
lex.source = 'include'
lex.escape = '\\'
while lex:
infile = lex.infile
lineno = lex.lineno
line = lex.ReadLine()
if (lex.infile != infile):
infile = lex.infile
lineno = lex.lineno
#sys.stderr.write(' processing \"'+line.strip()+'\", (\"'+infile+'\":'+str(lineno)+')\n')
if line == '':
break
tokens = line.strip().split()
if (len(tokens) > 0):
if ((tokens[0] == 'atom_style') and
atom_style_undefined):
sys.stderr.write(' Atom Style found. Processing: \"'+line.strip()+'\"\n')
if atoms_already_read:
raise InputError('Error: The file containing the \"atom_style\" command must\n'
' come before the data file in the argument list.\n'
' (The templify program needs to know the atom style before reading\n'
' the data file. Either change the order of arguments so that the\n'
' LAMMPS input script file is processed before the data file, or use\n'
' the \"-atom_style\" command line argument to specify the atom_style.)\n')
column_names = AtomStyle2ColNames(line.split()[1])
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' '+(' '.join(column_names))+'\n')
if i_molid:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+', i_molid='+str(i_molid+1)+')\n\n')
else:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+')\n\n')
l_in_init.append((' '*indent)+line.lstrip())
elif (tokens[0] in set(['units',
'angle_style',
'bond_style',
'dihedral_style',
'impoper_style',
'min_style',
'pair_style',
'pair_modify',
'special_bonds',
'kspace_style',
'kspace_modify'])):
l_in_init.append((' '*indent)+line.lstrip())
#if (line.strip() == 'LAMMPS Description'):
# sys.stderr.write(' processing \"'+line.strip()+'\"\n')
# # skip over this section
# while lex:
# line = lex.ReadLine()
# if line.strip() in data_file_header_names:
# lex.push_raw_text(line) # <- Save line for later
# break
elif (line.strip() == 'Atoms'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
atoms_already_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if ((len(tokens) <= i_atomid) or
(len(tokens) <= i_atomtype) or
((i_molid != None) and
(len(tokens) <= i_molid))):
raise InputError('Error: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
elif ((len(tokens) != len(column_names)) and
(not complained_atom_style_mismatch)):
complained_atom_style_mismatch = True
sys.stderr.write('Warning: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
# this is not a very serious warning.
#no_warnings = False <--no need. commenting out
atomid = Intify(tokens[i_atomid])
atomtype = Intify(tokens[i_atomtype])
molid = None
if i_molid:
molid = Intify(tokens[i_molid])
atomid2type[atomid] = atomtype
if i_molid:
atomid2mol[atomid] = molid
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[i_atomid] = '$atom:id'+tokens[i_atomid]
tokens[i_atomtype] = '@atom:type'+tokens[i_atomtype]
if i_molid:
tokens[i_molid] = '$mol:id'+tokens[i_molid]
l_data_atoms.append((' '*indent)+(' '.join(tokens)+'\n'))
needed_atomids.add(atomid)
needed_atomtypes.add(int(atomtype))
for atomtype in needed_atomtypes:
if type(atomtype) is int:
if ((min_needed_atomtype == None) or
(min_needed_atomtype > atomtype)):
min_needed_atomtype = atomtype
if ((max_needed_atomtype == None) or
(max_needed_atomtype < atomtype)):
max_needed_atomtype = atomtype
elif (line.strip() == 'Masses'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomtype = Intify(tokens[0])
if BelongsToSel(atomtype, atomtype_selection):
#tokens[0] = '@atom:type'+tokens[0]
l_data_masses.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Velocities'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_velocities.append((' '*indent)+(' '.join(tokens)+'\n'))
# non-point-like-particles:
elif (line.strip() == 'Ellipsoids'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_ellipsoids.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Lines'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_lines.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Triangles'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_triangles.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Bonds'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 4):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Bonds section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$bond:id'+tokens[0]
#tokens[1] = '@bond:type'+tokens[1]
atomids = [None, None]
atomtypes = [None, None]
molids = [None, None]
in_selections = True
some_in_selection = False
for n in range(0,2):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_bonds.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS BONDS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,2):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected are bonded\n'
' to other atoms you didn\'t select.\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Angles'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line == '':
break
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 5):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Angles section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$angle:id'+tokens[0]
#tokens[1] = '@angle:type'+tokens[1]
atomids = [None, None, None]
atomtypes = [None, None, None]
molids = [None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,3):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_angles.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS ANGLES\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,3):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 3-body \"Angle\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Dihedrals'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Dihedrals section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$dihedral:id'+tokens[0]
#tokens[1] = '@dihedral:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,4):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_dihedrals.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS DIHEDRALS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,4):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Dihedral\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Impropers'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Impropers section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$improper:id'+tokens[0]
#tokens[1] = '@improper:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,4):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_impropers.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS IMPROPERS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,4):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Improper\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Bond Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@bond:type'+tokens[0]
l_data_bond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Angle Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_angle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Dihedral Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_dihedral_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Improper Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@improper:type'+tokens[0]
l_data_improper_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Pair Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
some_pair_coeffs_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Pair Coeffs section:\n'
' \"'+line.strip()+'\"\n')
atomtype_i_str = tokens[0]
if '*' in atomtype_i_str:
raise InputError('PROBLEM near or before '+ErrorLeader(infile, lineno)+'\n'
' As of 2012-7, moltemplate forbids use of the "\*\" wildcard\n'
' character in the \"Pair Coeffs\" section.\n')
else:
i = int(atomtype_i_str)
if ((not i) or
BelongsToSel(i, atomtype_selection)):
i_str = '@atom:type'+str(i)
tokens[0] = i_str
l_data_pair_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'pair_coeff'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical pair_coeff command:\n'
' \"'+line.strip()+'\"\n')
l_in_pair_coeffs.append(' '*indent+line.strip())
elif (tokens[0] == 'mass'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical \"mass\" command:\n'
' \"'+line.strip()+'\"\n')
l_in_masses.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'bond_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical bond_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@bond:type'+tokens[1]
l_in_bond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'angle_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical angle_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@angle:type'+tokens[1]
l_in_angle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'dihedral_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical dihedral_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@dihedral:type'+tokens[1]
l_in_dihedral_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'improper_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical improper_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@improper:type'+tokens[1]
l_in_improper_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
# -- class2 force fields --
elif (line.strip() == 'BondBond Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type'+tokens[0]
l_data_bondbond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'BondAngle Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type'+tokens[0]
l_data_bondangle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'MiddleBondTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_middlebondtorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'EndBondTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_endbondtorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_angletorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleAngleTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_angleangletorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'BondBond13 Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_bondbond13_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleAngle Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@improper:type'+tokens[0]
l_data_angleangle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Angles By Type'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type'+tokens[0]
l_data_angles_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Dihedrals By Type'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_dihedrals_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Impropers By Type'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@improper:type'+tokens[0]
l_data_impropers_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
else:
sys.stderr.write(' Ignoring line \"'+line.strip()+'\"\n')
sys.stderr.write('\n\n')
# --- Now delete items that were not selected from the other lists ---
# --- MASSES ---
# delete masses for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_masses):
line = l_data_masses[i_line]
tokens = line.strip().split()
atomtype = Intify(tokens[0])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del(l_data_masses[i_line])
else:
tokens[0] = '@atom:type'+str(atomtype)
l_data_masses[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- PAIR COEFFS ---
# delete data_pair_coeffs for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_pair_coeffs):
line = l_data_pair_coeffs[i_line]
tokens = line.strip().split()
assert(len(tokens) > 0)
split_colon = tokens[0].split(':')
assert(len(split_colon) == 2)
atomtype = Intify(split_colon[1])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del(l_data_pair_coeffs[i_line])
else:
i_line += 1
# delete in_pair_coeffs for atom we don't care about anymore:
i_line = 0
while i_line < len(l_in_pair_coeffs):
line = l_in_pair_coeffs[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
atomtype_j_str = tokens[2]
#if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if ('*' in atomtype_j_str):
atomtype_j_tokens = atomtype_j_str.split('*')
if atomtype_j_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
j_a = min_sel_atomtype
else:
j_a = min_needed_atomtype
else:
j_a = Intify(atomtype_j_tokens[0])
if atomtype_j_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
j_b = max_sel_atomtype
else:
j_b = max_needed_atomtype
else:
j_b = Intify(atomtype_j_tokens[1])
else:
j_a = j_b = Intify(atomtype_j_str)
j_a_final = None
j_b_final = None
for j in range(j_a, j_b+1):
if ((j in needed_atomtypes) or (min_sel_atomtype <= j)):
j_a_final = j
break
for j in reversed(range(j_a, j_b+1)):
if ((j in needed_atomtypes) or (max_sel_atomtype >= j)):
j_b_final = j
break
#if j_a_final and j_b_final:
# if j_a_final == j_b_final:
# j_str = '@atom:type'+str(j_a_final)
# tokens[1] = j_str
# else:
# j_str = '@{atom:type'+str(j_a_final)+'}*@{atom:type'+str(j_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del(l_in_pair_coeffs[i_line])
elif (('*' in atomtype_i_str) or ('*' in atomtype_j_str)):
del(l_in_pair_coeffs[i_line])
for i in range(i_a_final, i_b_final+1):
for j in range(j_a_final, j_b_final+1):
if j >= i:
tokens[1] = '@atom:type'+str(i)
tokens[2] = '@atom:type'+str(j)
l_in_pair_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
tokens[1] = '@atom:type'+tokens[1]
tokens[2] = '@atom:type'+tokens[2]
l_in_pair_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete mass commands for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_in_masses):
line = l_in_masses[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
#if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del(l_in_masses[i_line])
elif ('*' in atomtype_i_str):
del(l_in_masses[i_line])
for i in range(i_a_final, i_b_final+1):
tokens[1] = '@atom:type'+str(i)
l_in_masses.insert(i_line, (' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
tokens[1] = '@atom:type'+str(i_a)
l_in_masses[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- BONDS AND BOND COEFFS ---
# delete lines from data_bonds if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_bonds):
line = l_data_bonds[i_line]
tokens = line.strip().split()
assert(len(tokens) == 4)
bondid = Intify(tokens[0])
bondtype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$bond:id'+str(bondid)
tokens[1] = '@bond:type'+str(bondtype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
needed_bondids.add(bondid)
needed_bondtypes.add(bondtype)
l_data_bonds[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_bonds[i_line])
# delete data_bond_coeffs for bondtypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bond_coeffs):
line = l_data_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype = Intify(tokens[0])
if (not (bondtype in needed_bondtypes)):
del(l_data_bond_coeffs[i_line])
else:
tokens[0] = '@bond:type'+str(bondtype)
l_data_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_bond_coeffs for bondtypes we don't care about anymore:
for bondtype in needed_bondtypes:
if type(bondtype) is int:
if ((min_needed_bondtype == None) or
(min_needed_bondtype > bondtype)):
min_needed_bondtype = bondtype
if ((max_needed_bondtype == None) or
(max_needed_bondtype < bondtype)):
max_needed_bondtype = bondtype
i_line = 0
while i_line < len(l_in_bond_coeffs):
line = l_in_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype_str = tokens[1]
if ('*' in bondtype_str):
bondtype_tokens = bondtype_str.split('*')
if bondtype_tokens[0] == '':
i_a = min_needed_bondtype
else:
i_a = Intify(bondtype_tokens[0])
if bondtype_tokens[1] == '':
i_b = max_needed_bondtype
else:
i_b = Intify(bondtype_tokens[1])
else:
i_a = i_b = Intify(bondtype_str)
if i_a < min_needed_bondtype:
i_a = min_needed_bondtype
if i_b > max_needed_bondtype:
i_b = max_needed_bondtype
#if i_a == i_b:
# i_str = '@bond:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{bond:type'+str(j_a)+'}*@{bond:type'+str(j_b)+'}'
if ('*' in bondtype_str):
del(l_in_bond_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_bondtypes):
tokens[1] = '@bond:type'+str(i)
l_in_bond_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_bondtypes):
tokens[1] = '@bond:type'+str(i_a)
l_in_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_bond_coeffs[i_line])
# --- ANGLES AND ANGLE COEFFS ---
# delete lines from data_angles if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_angles):
line = l_data_angles[i_line]
tokens = line.strip().split()
assert(len(tokens) == 5)
angleid = Intify(tokens[0])
angletype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$angle:id'+str(angleid)
tokens[1] = '@angle:type'+str(angletype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
tokens[4] = '$atom:id'+str(atomid3)
needed_angleids.add(angleid)
needed_angletypes.add(angletype)
l_data_angles[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_angles[i_line])
# delete data_angle_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_angle_coeffs):
line = l_data_angle_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del(l_data_angle_coeffs[i_line])
else:
tokens[0] = '@angle:type'+str(angletype)
l_data_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_angle_coeffs for angletypes we don't care about anymore:
for angletype in needed_angletypes:
if type(angletype) is int:
if ((min_needed_angletype == None) or
(min_needed_angletype > angletype)):
min_needed_angletype = angletype
if ((max_needed_angletype == None) or
(max_needed_angletype < angletype)):
max_needed_angletype = angletype
i_line = 0
while i_line < len(l_in_angle_coeffs):
line = l_in_angle_coeffs[i_line]
tokens = line.strip().split()
angletype_str = tokens[1]
if ('*' in angletype_str):
angletype_tokens = angletype_str.split('*')
if angletype_tokens[0] == '':
i_a = min_needed_angletype
else:
i_a = Intify(angletype_tokens[0])
if angletype_tokens[1] == '':
i_b = max_needed_angletype
else:
i_b = Intify(angletype_tokens[1])
else:
i_a = i_b = Intify(angletype_str)
if i_a < min_needed_angletype:
i_a = min_needed_angletype
if i_b > max_needed_angletype:
i_b = max_needed_angletype
#if i_a == i_b:
# i_str = '@angle:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{angle:type'+str(j_a)+'}*@{angle:type'+str(j_b)+'}'
if ('*' in angletype_str):
del(l_in_angle_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_angletypes):
tokens[1] = '@angle:type'+str(i)
l_in_angle_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_angletypes):
tokens[1] = '@angle:type'+str(i_a)
l_in_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_angle_coeffs[i_line])
# --- DIHEDRALS AND DIHEDRAL COEFFS ---
# delete lines from data_dihedrals if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_dihedrals):
line = l_data_dihedrals[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
dihedralid = Intify(tokens[0])
dihedraltype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$dihedral:id'+str(dihedralid)
tokens[1] = '@dihedral:type'+str(dihedraltype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
tokens[4] = '$atom:id'+str(atomid3)
tokens[5] = '$atom:id'+str(atomid4)
needed_dihedralids.add(dihedralid)
needed_dihedraltypes.add(dihedraltype)
l_data_dihedrals[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_dihedrals[i_line])
# delete data_dihedral_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_dihedral_coeffs):
line = l_data_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del(l_data_dihedral_coeffs[i_line])
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_dihedral_coeffs for dihedraltypes we don't care about anymore:
for dihedraltype in needed_dihedraltypes:
if type(dihedraltype) is int:
if ((min_needed_dihedraltype == None) or
(min_needed_dihedraltype > dihedraltype)):
min_needed_dihedraltype = dihedraltype
if ((max_needed_dihedraltype == None) or
(max_needed_dihedraltype < dihedraltype)):
max_needed_dihedraltype = dihedraltype
i_line = 0
while i_line < len(l_in_dihedral_coeffs):
line = l_in_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype_str = tokens[1]
if ('*' in dihedraltype_str):
dihedraltype_tokens = dihedraltype_str.split('*')
if dihedraltype_tokens[0] == '':
i_a = min_needed_dihedraltype
else:
i_a = Intify(dihedraltype_tokens[0])
if dihedraltype_tokens[1] == '':
i_b = max_needed_dihedraltype
else:
i_b = Intify(dihedraltype_tokens[1])
else:
i_a = i_b = Intify(dihedraltype_str)
if i_a < min_needed_dihedraltype:
i_a = min_needed_dihedraltype
if i_b > max_needed_dihedraltype:
i_b = max_needed_dihedraltype
#if i_a == i_b:
# i_str = '@dihedral:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{dihedral:type'+str(j_a)+'}*@{dihedral:type'+str(j_b)+'}'
if ('*' in dihedraltype_str):
del(l_in_dihedral_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i)
l_in_dihedral_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i_a)
l_in_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_dihedral_coeffs[i_line])
# --- IMPROPERS AND IMPROPER COEFFS ---
# delete lines from data_impropers if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_impropers):
line = l_data_impropers[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
improperid = Intify(tokens[0])
impropertype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$improper:id'+str(improperid)
tokens[1] = '@improper:type'+str(impropertype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
tokens[4] = '$atom:id'+str(atomid3)
tokens[5] = '$atom:id'+str(atomid4)
needed_improperids.add(improperid)
needed_impropertypes.add(impropertype)
l_data_impropers[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_impropers[i_line])
# delete data_improper_coeffs for impropertypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_improper_coeffs):
line = l_data_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype = Intify(tokens[0])
if (not (impropertype in needed_impropertypes)):
del(l_data_improper_coeffs[i_line])
else:
tokens[0] = '@improper:type'+str(impropertype)
l_data_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_improper_coeffs for impropertypes we don't care about anymore:
for impropertype in needed_impropertypes:
if type(impropertype) is int:
if ((min_needed_impropertype == None) or
(min_needed_impropertype > impropertype)):
min_needed_impropertype = impropertype
if ((max_needed_impropertype == None) or
(max_needed_impropertype < impropertype)):
max_needed_impropertype = impropertype
i_line = 0
while i_line < len(l_in_improper_coeffs):
line = l_in_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype_str = tokens[1]
if ('*' in impropertype_str):
impropertype_tokens = impropertype_str.split('*')
if impropertype_tokens[0] == '':
i_a = min_needed_impropertype
else:
i_a = Intify(impropertype_tokens[0])
if impropertype_tokens[1] == '':
i_b = max_needed_impropertype
else:
i_b = Intify(impropertype_tokens[1])
else:
i_a = i_b = Intify(impropertype_str)
if i_a < min_needed_impropertype:
i_a = min_needed_impropertype
if i_b > max_needed_impropertype:
i_b = max_needed_impropertype
#if i_a == i_b:
# i_str = '@improper:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{improper:type'+str(j_a)+'}*@{improper:type'+str(j_b)+'}'
if ('*' in impropertype_str):
del(l_in_improper_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_impropertypes):
tokens[1] = '@improper:type'+str(i)
l_in_improper_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_impropertypes):
tokens[1] = '@improper:type'+str(i_a)
l_in_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_improper_coeffs[i_line])
if not some_pair_coeffs_read:
sys.stderr.write('Warning: No \"pair coeffs\" set.\n'
' (No interactions between non-bonded atoms defined.)\n')
no_warnings = False
#sys.stderr.write('Writing ttree data to standard out.\n'
# ' You can redirect this to a file using:\n'+
# ' '+' '.join(sys.argv)+' > filename.ttree\n'
# ' ----------------------\n')
if mol_name != '':
sys.stdout.write(mol_name + ' {\n')
if len(l_in_init) > 0:
sys.stdout.write('\n### LAMMPS commands for initialization\n'
'### (These can be overridden later.)\n\n')
l_in_init.insert(0, (' '*cindent)+'write_once(\"'+in_init+'\") {\n')
l_in_init.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_init))
if len(l_in_settings) > 0:
sys.stdout.write('\n### LAMMPS commands for settings\n'
'### (These can be overridden later.)\n\n')
l_in_settings.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_settings.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_settings))
non_empty_output = True
if len(l_in_masses) > 0:
l_in_masses.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_masses))
non_empty_output = True
if len(l_in_pair_coeffs) > 0:
l_in_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_pair_coeffs))
non_empty_output = True
if len(l_in_bond_coeffs) > 0:
l_in_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_bond_coeffs))
non_empty_output = True
if len(l_in_angle_coeffs) > 0:
l_in_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_angle_coeffs))
non_empty_output = True
if len(l_in_dihedral_coeffs) > 0:
l_in_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_dihedral_coeffs))
non_empty_output = True
if len(l_in_improper_coeffs) > 0:
l_in_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_improper_coeffs))
non_empty_output = True
if non_empty_output:
sys.stdout.write('\n### DATA sections\n\n')
if len(l_data_masses) > 0:
l_data_masses.insert(0, (' '*cindent)+'write_once(\"'+data_masses+'\") {\n')
l_data_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_masses))
non_empty_output = True
if len(l_data_bond_coeffs) > 0:
l_data_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bond_coeffs+'\") {\n')
l_data_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bond_coeffs))
non_empty_output = True
if len(l_data_angle_coeffs) > 0:
l_data_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angle_coeffs+'\") {\n')
l_data_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angle_coeffs))
non_empty_output = True
if len(l_data_dihedral_coeffs) > 0:
l_data_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_dihedral_coeffs+'\") {\n')
l_data_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedral_coeffs))
non_empty_output = True
if len(l_data_improper_coeffs) > 0:
l_data_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_improper_coeffs+'\") {\n')
l_data_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_improper_coeffs))
non_empty_output = True
if len(l_data_pair_coeffs) > 0:
l_data_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_pair_coeffs+'\") {\n')
l_data_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_pair_coeffs))
non_empty_output = True
# class2 force fields:
if len(l_data_bondbond_coeffs) > 0:
l_data_bondbond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondbond_coeffs+'\") {\n')
l_data_bondbond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond_coeffs))
non_empty_output = True
if len(l_data_bondangle_coeffs) > 0:
l_data_bondangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondangle_coeffs+'\") {\n')
l_data_bondangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondangle_coeffs))
non_empty_output = True
if len(l_data_middlebondtorsion_coeffs) > 0:
l_data_middlebondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_middlebondtorsion_coeffs+'\") {\n')
l_data_middlebondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_middlebondtorsion_coeffs))
non_empty_output = True
if len(l_data_endbondtorsion_coeffs) > 0:
l_data_endbondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_endbondtorsion_coeffs+'\") {\n')
l_data_endbondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_endbondtorsion_coeffs))
non_empty_output = True
if len(l_data_angletorsion_coeffs) > 0:
l_data_angletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angletorsion_coeffs+'\") {\n')
l_data_angletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angletorsion_coeffs))
non_empty_output = True
if len(l_data_angleangletorsion_coeffs) > 0:
l_data_angleangletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangletorsion_coeffs+'\") {\n')
l_data_angleangletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangletorsion_coeffs))
non_empty_output = True
if len(l_data_bondbond13_coeffs) > 0:
l_data_bondbond13_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondbond13_coeffs+'\") {\n')
l_data_bondbond13_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond13_coeffs))
non_empty_output = True
if len(l_data_angleangle_coeffs) > 0:
l_data_angleangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangle_coeffs+'\") {\n')
l_data_angleangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangle_coeffs))
non_empty_output = True
# automatic generation of bonded interactions by type:
if len(l_data_angles_by_type) > 0:
l_data_angles_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_angles_by_type+'\") {\n')
l_data_angles_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles_by_type))
non_empty_output = True
if len(l_data_dihedrals_by_type) > 0:
l_data_dihedrals_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_dihedrals_by_type+'\") {\n')
l_data_dihedrals_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals_by_type))
non_empty_output = True
if len(l_data_impropers_by_type) > 0:
l_data_impropers_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_impropers_by_type+'\") {\n')
l_data_impropers_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers_by_type))
non_empty_output = True
if len(l_data_atoms) > 0:
l_data_atoms.insert(0, (' '*cindent)+'write(\"'+data_atoms+'\") {\n')
l_data_atoms.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_atoms))
non_empty_output = True
else:
sys.stderr.write('Warning: missing \"Atoms\" section.\n'
' (Did you include a LAMMPS data file in your argument list?)\n')
no_warnings = False
# non-point-like particles
if len(l_data_ellipsoids) > 0:
l_data_ellipsoids.insert(0, (' '*cindent)+'write(\"'+data_ellipsoids+'\") {\n')
l_data_ellipsoids.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_ellipsoids))
if len(l_data_lines) > 0:
l_data_lines.insert(0, (' '*cindent)+'write(\"'+data_lines+'\") {\n')
l_data_lines.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_lines))
if len(l_data_triangles) > 0:
l_data_triangles.insert(0, (' '*cindent)+'write(\"'+data_triangles+'\") {\n')
l_data_triangles.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_triangles))
if len(l_data_velocities) > 0:
l_data_velocities.insert(0, (' '*cindent)+'write(\"'+data_velocities+'\") {\n')
l_data_velocities.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_velocities))
if len(l_data_bonds) > 0:
l_data_bonds.insert(0, (' '*cindent)+'write(\"'+data_bonds+'\") {\n')
l_data_bonds.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bonds))
non_empty_output = True
if len(l_data_angles) > 0:
l_data_angles.insert(0, (' '*cindent)+'write(\"'+data_angles+'\") {\n')
l_data_angles.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles))
non_empty_output = True
if len(l_data_dihedrals) > 0:
l_data_dihedrals.insert(0, (' '*cindent)+'write(\"'+data_dihedrals+'\") {\n')
l_data_dihedrals.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals))
non_empty_output = True
if len(l_data_impropers) > 0:
l_data_impropers.insert(0, (' '*cindent)+'write(\"'+data_impropers+'\") {\n')
l_data_impropers.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers))
non_empty_output = True
if mol_name != '':
sys.stdout.write('\n} # end of \"'+mol_name+'\" type definition\n')
if non_empty_output and no_warnings:
sys.stderr.write('WARNING: The '+g_program_name+' script has not been rigorously tested.\n'
' Exotic (manybody) pair styles (and other force-field styles\n'
' with unusual syntax) are not understood by '+g_program_name+'\n'
' (although they are supported by moltemplate). Please look over\n'
' the resulting LT file and check for errors. Convert any remaining\n'
' atom, bond, angle, dihedral, or improper id or type numbers to the\n'
' corresponding variables. Feel free to report any bugs you find.\n'
' (-Andrew Jewett 2012-12-11)\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(-1)
|
gpl-2.0
| -8,459,022,808,084,960,000
| 45.248771
| 148
| 0.452418
| false
| 3.89121
| false
| false
| false
|
axptwig/CSCI-2963--Intro-to-Open-Source
|
files/Lab7/words5unordered.py
|
1
|
2890
|
"""
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile words_dat.txt.gz. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book [1]_,[2]_.
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Authors: Aric Hagberg (hagberg@lanl.gov),
# Brendt Wohlberg,
# hughdbrown@yahoo.com
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from itertools import permutations
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
from string import ascii_lowercase as lowercase
G = nx.Graph(name="words")
lookup = dict((c,lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
perms = [''.join(p) for p in permutations(word)]
for p in perms:
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i+1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j+1:]:
yield left + cc + right
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
import gzip
fh=gzip.open('words_dat.txt.gz','r')
words=set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w=str(line[0:5])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
from networkx import *
G=words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter, order doesn't matter.")
print("Graph has %d nodes with %d edges"
%(number_of_nodes(G),number_of_edges(G)))
print("%d connected components" % number_connected_components(G))
for (source,target) in [('chaos','order'),('nodes','graph'),'moron','smart'),('pound','marks')]:
print("Shortest path between %s and %s is"%(source,target))
try:
sp=shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
|
mit
| 7,894,361,437,417,402,000
| 35.125
| 100
| 0.582699
| false
| 3.559113
| false
| false
| false
|
ph1l/ocemr
|
ocemr/modelviz.7.py
|
1
|
6810
|
#!/usr/bin/env python
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-d] [-i <model_names>] [-e <model_names>] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-d, --disable_fields
don't show the class member fields.
-i, --include_models=User,Person,Car
only include selected models in graph.
-e, --exclude_models=User,Person,Car
only include selected models in graph.
"""
__version__ = "0.8"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Christopher Schmidt <crschmidt@metacarta.com>",
]
import getopt, sys
from django.core.management import setup_environ
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.template import Template, Context
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
head_template = """
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
"""
body_template = """
{% for model in models %}
{% for relation in model.relations %}
{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{{ model.name }} -> {{ relation.target }}
[label="{{ relation.name }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}
{% for model in models %}
{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.name }}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.name }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
"""
tail_template = """
}
"""
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = kwargs.get('include_models', [])
exclude_models = kwargs.get('exclude_models', [])
dot = head_template
for app_label in app_labels:
app = models.get_app(app_label)
graph = Context({
'name': '"%s"' % app.__name__,
'disable_fields': disable_fields,
'models': []
})
for appmodel in get_models(app):
# consider given model name ?
def consider(model_name):
return (not include_models or model_name in include_models) and (not model_name in exclude_models)
if not consider(appmodel._meta.object_name):
continue
model = {
'name': appmodel.__name__,
'fields': [],
'relations': []
}
# model attributes
def add_attributes():
model['fields'].append({
'name': field.name,
'type': type(field).__name__,
'blank': field.blank
})
for field in appmodel._meta.fields:
add_attributes()
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
add_attributes()
# relations
def add_relation(extras=""):
_rel = {
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'arrows': extras
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.fields:
if isinstance(field, ForeignKey):
add_relation()
elif isinstance(field, OneToOneField):
add_relation("[arrowhead=none arrowtail=none]")
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
if isinstance(field, ManyToManyField):
add_relation("[arrowhead=normal arrowtail=normal]")
elif isinstance(field, GenericRelation):
add_relation(
'[style="dotted"] [arrowhead=normal arrowtail=normal]')
graph['models'].append(model)
t = Template(body_template)
dot += '\n' + t.render(graph)
dot += '\n' + tail_template
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hdi:e:",
["help", "disable_fields", "include_models=", "exclude_models="])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
else:
if not args:
print __doc__
sys.exit()
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
if opt in ("-i", "--include_models"):
kwargs['include_models'] = arg.split(',')
if opt in ("-e", "--exclude_models"):
kwargs['exclude_models'] = arg.split(',')
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
|
gpl-3.0
| -5,578,806,056,656,196,000
| 29.538117
| 114
| 0.5442
| false
| 3.793872
| false
| false
| false
|
mkrapp/pystable
|
src/pystable.py
|
1
|
11946
|
from ConfigParser import SafeConfigParser
import sys, os
from string import Template
import markdown2
import shutil
import glob
from dateutil.parser import parse
import calendar
def parse_config(posts_directory):
global site_title, site_subtitle, site_author, \
site_aboutme, site_info, site_syntax, \
site_url, site_output, site_theme, style_file, \
contact
parser = SafeConfigParser()
config_file = posts_directory+'/site.config'
parser.read(config_file)
site_title = parser.get("info","title")
site_subtitle = parser.get("info","subtitle")
site_author = parser.get("info","author")
site_aboutme = parser.get("info","aboutme")
site_info = parser.get("info","info")
site_syntax = parser.get("config","syntax")
site_url = parser.get("config","url")
site_output = parser.get("config","output")
site_theme = parser.get("config","theme")
style_file = site_url+'/'+site_output+'/styles.css'
contact = {}
contact["twitter"] = parser.get("contact","twitter")
contact["facebook"] = parser.get("contact","facebook")
contact["email"] = parser.get("contact","email")
contact["google+"] = parser.get("contact","google+")
contact["github"] = parser.get("contact","github")
contact["linkedin"] = parser.get("contact","linkedin")
print contact
def parse_posts(posts_directory):
files = glob.glob(posts_directory+'/*.txt')
posts = []
for p in files:
post = {}
meta = []
content = ""
is_content = False
f = open(p,'r')
lines = f.readlines()
for l in lines:
if is_content == False and l.strip():
meta.append(l.strip())
else:
is_content = True
if is_content:
content += l
f.close()
meta_dict = { k.lower().strip():v.strip() for k, v in dict(s.split(':',1) for s in meta).iteritems()}
post["meta"] = meta_dict
post["content"] = content[1:]
# parse tags from meta
post["tags"] = [tag.strip() for tag in meta_dict["tags"].split(',')]
# parse date into year, month from meta
date = parse(post['meta']['date'])
post["year"] = date.year
post["month"] = date.month
post["date"] = date
posts.append(post)
return posts
def parse_dates(posts):
# parse dates of posts according to years and months
date_list = [parse(p['meta']['date']) for p in posts]
years = [parse(p['meta']['date']).year for p in posts]
years = list(set(years))
dates = {}
for year in years:
dates[year] = []
for date in date_list:
if date.year == year: dates[year].append(date.month)
return dates
def parse_tags(posts):
# parse tags
tags = [p["tags"] for p in posts]
all_tags = [item for sublist in tags for item in sublist]
tags = list(set(all_tags))
tags = [(tag,all_tags.count(tag)) for tag in tags]
return tags
def generate_footer(theme):
footer_tmpl = open(theme+'/footer.tmpl','r')
lines = footer_tmpl.readlines()
footer = ""
disclaimer = 'Created by <a href="https://github.com/mkrapp/pystable" target="_blank">Pystable</a> \
(© 2014-2016 Mario Krapp. All rights reserved.)'
for l in lines:
s = Template(l)
footer += s.safe_substitute(disclaimer=disclaimer)
footer_tmpl.close()
return footer
def generate_header(theme):
header_tmpl = open(theme+'/header.tmpl','r')
lines = header_tmpl.readlines()
header = ""
title = '<a href="'+site_url+'/'+site_output+'/'+'index.html">'+site_title+'</a>'
for l in lines:
s = Template(l)
header += s.safe_substitute(title=title, subtitle=site_subtitle)
header_tmpl.close()
return header
def generate_sidebar(dates,tags,theme):
sidebar_tmpl = open(theme+'/sidebar.tmpl','r')
lines = sidebar_tmpl.readlines()
# contacts
twitter = ""
if contact["twitter"] != "":
twitter = '<a href="https://twitter.com/'+contact["twitter"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/twitter-32-black.png"></img></a>'
email = ""
if contact["email"] != "":
email = '<a href="mailto:'+contact["email"]+'?subject=Mail from '+site_url+'/'+site_output+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/email-32-black.png"></img></a>'
facebook = ""
if contact["facebook"] != "":
facebook = '<a href="https://www.facebook.com/'+contact["facebook"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/facebook-32-black.png"></img></a>'
google = ""
if contact["google+"] != "":
google = '<a href="https://www.plus.google.com/'+contact["google+"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/googleplus-32-black.png"></img></a>'
linkedin = ""
if contact["linkedin"] != "":
linkedin = '<a href="'+contact["linkedin"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/linkedin-32-black.png"></img></a>'
github = ""
if contact["github"] != "":
github = '<a href="https://www.github.com/'+contact["github"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/github-32-black.png"></img></a>'
# archive
archive = "<ul>"
for year in sorted(dates)[::-1]:
archive += "<li>%.4d</li>" % year
archive += "<ul>"
for month in sorted(set(dates[year]))[::-1]:
k = dates[year].count(month)
n = ""
if k>1: n = ' ('+str(k)+')'
link = site_url+'/'+site_output+'/archive/%.4d/%.2d/index.html' % (year,month)
archive += '<li><a href="'+link+'">' + calendar.month_name[month] + n + '</a></li>'
archive += "</ul>"
archive += "</ul>"
# tagcloud
tagcloud = ""
for tag in sorted(tags):
tag_dir = site_output+'/tag/'+tag[0]
tag_dir = tag_dir.replace(' ','%20')
size = 40-25/tag[1]
tagcloud += '<span style="font-size: %.2dpx"><a href=' % size +site_url+'/'+tag_dir+'/index.html>'+tag[0]+'</a></span> '
sidebar = ""
for l in lines:
s = Template(l)
sidebar += s.safe_substitute(author=site_author, aboutme=site_aboutme, info=site_info,
archive=archive, tagcloud=tagcloud[:-2],
twitter=twitter,email=email,facebook=facebook,github=github,google=google,linkedin=linkedin)
sidebar_tmpl.close()
return sidebar
def process_content(content,syntax):
if syntax == 'markdown':
processed_content = markdown2.markdown(content,extras=["tables","fenced-code-blocks"])
if syntax == 'text':
processed_content = content
return processed_content.encode('utf-8')
def generate_main_page(posts,theme):
# create the full main web page
index_tmpl = open(theme+'/main.html.tmpl','r')
lines = index_tmpl.readlines()
index_html = open(site_output+'/index.html','w')
main = ""
for p in posts:
post = generate_post_page(p,theme)
main += post["html_content"]
for l in lines:
s = Template(l)
index_html.write(s.safe_substitute(style_file=style_file, title=site_title, header=header,
sidebar=sidebar, footer=footer, main=main))
index_html.close()
index_tmpl.close()
def generate_tags_page(posts,tags,theme):
# create a full web page for each tag
for tag in tags:
print 'create index.html for tag ' +tag[0]
index_tmpl = open(theme+'/main.html.tmpl','r')
lines = index_tmpl.readlines()
index_html = open(site_output+'/tag/'+tag[0]+'/index.html','w')
main = ""
for p in posts:
if tag[0] in p["tags"]:
main += p["html_content"]
shutil.copy2(site_output+'/'+p["html_file"],site_output+'/tag/'+tag[0]+'/'+p["html_file"])
for l in lines:
s = Template(l)
index_html.write(s.safe_substitute(style_file=style_file, header=header,title=site_title,
sidebar=sidebar, footer=footer, main=main))
index_html.close()
index_tmpl.close()
def generate_post(post,theme):
# read meta and content of post
meta = post["meta"]
content = process_content(post["content"],site_syntax)
# open post template
post_tmpl = open(theme+'/post.tmpl','r')
lines = post_tmpl.readlines()
post_content = ""
post_file = meta["title"].replace(" ","_")+'.html'
tags = ""
# create tags directory according to tag
for t in post["tags"]:
tag_dir = site_output+'/tag/'+t
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
tags += '<a href='+site_url+'/'+tag_dir+'/index.html>'+t+'</a>, '
# create archive directory according to year and month
date_dir = site_output+'/archive/%.4d/%.2d' % (post["year"],post["month"])
if not os.path.exists(date_dir):
os.makedirs(date_dir)
# create post content
for l in lines:
s = Template(l)
post_content += s.safe_substitute(title=meta["title"], date=meta["date"],
content=content, url='./'+post_file,
tags=tags[:-2])
post_tmpl.close()
# append html file name and html-processed content to post
post["html_content"] = post_content
post["html_file"] = post_file
return post
def generate_post_page(post,theme):
# create a full web page for a single post
post_tmpl = open(theme+'/main.html.tmpl','r')
lines = post_tmpl.readlines()
new_post = generate_post(post,theme)
post_file = new_post["html_file"]
html_content = new_post["html_content"]
post_html = open(site_output+'/'+post_file,'w')
for l in lines:
s = Template(l)
post_html.write(s.safe_substitute(style_file=style_file, title=site_title,
header=header, sidebar=sidebar,
footer=footer, main=html_content))
post_html.close()
post_tmpl.close()
return new_post
def generate_archives_page(posts,dates,theme):
# create a full web page for each month of each year (if posts are available for that date)
for year in sorted(dates):
months = list(set(dates[year]))
for month in months:
print 'create index.html for '+calendar.month_name[month]+' %.4d' % year
index_tmpl = open(theme+'/main.html.tmpl','r')
lines = index_tmpl.readlines()
index_html = open(site_output+'/archive/%.4d/%.2d/index.html' % (year,month),'w')
main = ""
for p in posts:
if month == p["month"]:
main += p["html_content"]
shutil.copy2(site_output+'/'+p["html_file"],
site_output+'/archive/%.4d/%.2d/' % (year,month)+p["html_file"])
for l in lines:
s = Template(l)
index_html.write(s.safe_substitute(style_file=style_file, header=header, sidebar=sidebar,
title=site_title, footer=footer, main=main))
index_html.close()
index_tmpl.close()
def create_blog(posts_dir):
global sidebar, footer, header
parse_config(posts_dir)
theme = 'themes/'+site_theme
if not os.path.exists(site_output):
os.makedirs(site_output)
# copy style.css
shutil.copy2(theme+'/styles.css',site_output+'/styles.css')
if not os.path.exists(site_output+'/icons'):
shutil.copytree('themes/icons',site_output+'/icons')
posts = parse_posts(posts_dir)
# sort list of posts in descending order of their date
decorated_posts = [(dict_["date"], dict_) for dict_ in posts]
decorated_posts.sort(reverse=True)
posts = [dict_ for (key, dict_) in decorated_posts]
tags = parse_tags(posts)
dates = parse_dates(posts)
header = generate_header(theme)
sidebar = generate_sidebar(dates,tags,theme)
footer = generate_footer(theme)
generate_main_page(posts,theme)
generate_tags_page(posts,tags,theme)
generate_archives_page(posts,dates,theme)
create_blog(sys.argv[1])
|
gpl-2.0
| 7,100,139,526,058,622,000
| 38.556291
| 194
| 0.597438
| false
| 3.464617
| true
| false
| false
|
pmeier82/django-spikeval
|
djspikeval/views/algorithm.py
|
1
|
2234
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.db.models import Q
from django.shortcuts import redirect
from django.views.generic import CreateView, ListView, DetailView, UpdateView, DeleteView
from djspikeval.forms import AlgorithmForm
__all__ = [
"AlgorithmBaseView", "AlgorithmList", "AlgorithmCreate", "AlgorithmDetail", "AlgorithmUpdate", "AlgorithmDelete"]
__author__ = "pmeier82"
Algorithm = apps.get_model("djspikeval", "algorithm")
class AlgorithmBaseView(object):
model = Algorithm
class AlgorithmList(AlgorithmBaseView, ListView):
template_name = "djspikeval/algorithm/list.html"
paginate_by = 10
def get_context_data(self, **kwargs):
cntx = super(AlgorithmList, self).get_context_data(**kwargs)
cntx.update(scope=self.request.GET.get("scope"))
return cntx
def get_queryset(self):
if self.request.GET.get("scope"):
scope = self.request.GET.get("scope")
return Algorithm.objects.filter(
Q(name__icontains=scope) |
Q(kind__name__icontains=scope))
return Algorithm.objects.all()
class AlgorithmCreate(AlgorithmBaseView, CreateView):
template_name = "djspikeval/algorithm/create.html"
form_class = AlgorithmForm
def get_form_kwargs(self):
kwargs = super(AlgorithmCreate, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
class AlgorithmDetail(AlgorithmBaseView, DetailView):
template_name = "djspikeval/algorithm/detail.html"
class AlgorithmUpdate(AlgorithmBaseView, UpdateView):
template_name = "djspikeval/algorithm/update.html"
form_class = AlgorithmForm
def get_form_kwargs(self):
kwargs = super(AlgorithmUpdate, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
class AlgorithmDelete(AlgorithmBaseView, DeleteView):
template_name = "djspikeval/algorithm/delete.html"
success_url = reverse_lazy("algorithm:list")
if __name__ == "__main__":
pass
|
bsd-3-clause
| -5,051,124,523,755,810,000
| 29.60274
| 117
| 0.703671
| false
| 3.878472
| false
| false
| false
|
FlowBoat/Flow-Tech-NeurAlgae
|
Versions/v2/v2.0/NeurAlgae2.0.py
|
1
|
15860
|
# FlowTech | NeurAlgae
## 2017 CWSF Science Fair | NeurAlgae: HAB Prediction Using Machine Learning Algorithms
#Describes and trains a neural network for the analysis and prediction of algal bloom data
#Copyright (C) 2017 Zachary Trefler and Atif Mahmud
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#If you have comments, questions, concerns, or you just want to say 'hi',
#email Zachary Trefler at zmct99@gmail.com or Atif Mahmud at atifmahmud101@gmail.com
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Dropout, regularizers
from keras.models import Model
#from Data import dataX as X, dataPN as PN, dataCDA as CDA, dataPDA as PDA
import Data
X = Data.dataX[5000:len(Data.dataX) - 5000]
PN = Data.dataPN[5000:len(Data.dataPN) - 5000]
CDA = Data.dataCDA[5000:len(Data.dataCDA) - 5000]
PDA = Data.dataPDA[5000:len(Data.dataPDA) - 5000]
Xr = np.zeroes((3, 2))
PNr = np.array
architecture = int(input("Which network architecture to use? "))
if architecture == 0:
#Overfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu")(inputs)
layer2 = Dense(64, activation = "relu")(layer1)
outputs = Dense(1, activation = "sigmoid")(layer2)
epochnum = 256
minimizer = "rmsprop"
cost = "mean_squared_error"
elif architecture == 1:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1_l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1_l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 256
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 2:
#Overfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu")(inputs)
layer2 = Dense(64, activation = "relu")(layer1)
outputs = Dense(1, activation = "sigmoid")(layer2)
epochnum = 256
minimizer = "rmsprop"
cost = "mean_squared_error"
elif architecture == 3:
#Pretty good
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 4:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.001))(drop1)
drop2 = Dropout(0.5)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 5:
#Surprisingly good underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu")(inputs)
outputs = Dense(1, activation = "sigmoid")(layer1)
epochnum = 1
minimizer = "rmsprop"
cost = "mean_squared_error"
elif architecture == 6:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 7:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0005))(inputs)
drop1 = Dropout(0.33)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0005))(drop1)
drop2 = Dropout(0.33)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 8:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.20)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.20)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 9:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
layer3 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.25)(layer3)
outputs = Dense(1, activation = "sigmoid")(drop3)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 10:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
layer3 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.25)(layer3)
outputs = Dense(1, activation = "sigmoid")(drop3)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 11:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 12:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 13:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 32
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 14:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.5)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 15:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.5)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.5)(layer1)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop3)
drop4 = Dropout(0.5)(layer1)
outputs = Dense(1, activation = "sigmoid")(drop4)
epochnum = 256
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 16:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.5)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.5)(layer3)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop3)
drop4 = Dropout(0.5)(layer4)
layer5 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop4)
drop5 = Dropout(0.5)(layer1)
layer6 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop5)
drop6 = Dropout(0.5)(layer2)
layer7 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop6)
drop7 = Dropout(0.5)(layer3)
layer8 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop7)
drop8 = Dropout(0.5)(layer4)
outputs = Dense(1, activation = "sigmoid")(drop8)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 17:
#Overfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(inputs)
drop1 = Dropout(0.05)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop1)
drop2 = Dropout(0.05)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop2)
drop3 = Dropout(0.05)(layer3)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop3)
drop4 = Dropout(0.05)(layer4)
layer5 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop4)
drop5 = Dropout(0.05)(layer1)
layer6 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop5)
drop6 = Dropout(0.05)(layer2)
layer7 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop6)
drop7 = Dropout(0.05)(layer3)
layer8 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop7)
drop8 = Dropout(0.05)(layer4)
outputs = Dense(1, activation = "sigmoid")(drop8)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 18:
#Interesting
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(inputs)
drop1 = Dropout(0.2)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop1)
drop2 = Dropout(0.2)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop2)
drop3 = Dropout(0.2)(layer3)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop3)
drop4 = Dropout(0.2)(layer4)
layer5 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop4)
drop5 = Dropout(0.2)(layer1)
layer6 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop5)
drop6 = Dropout(0.2)(layer2)
layer7 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop6)
drop7 = Dropout(0.2)(layer3)
layer8 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop7)
drop8 = Dropout(0.2)(layer4)
outputs = Dense(1, activation = "sigmoid")(drop8)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
else:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(16, activation = "sigmoid")(inputs)
outputs = Dense(1, activation = "sigmoid")(layer1)
epochnum = 128
minimizer = "sgd"
cost = "mean_squared_error"
netPN = Model(inputs = inputs, outputs = outputs)
netPN.compile(optimizer = minimizer, loss = cost)
PNh = netPN.fit(x = X, y = PN, batch_size = 128, epochs = epochnum, verbose = 1, validation_split = 0.2, shuffle = True)
netPN.save_weights("Nets/v2/netPN" + str(architecture) + ".hdf5")
plt.figure(1)
plt.subplot(311)
plt.plot(PNh.history["loss"])
plt.plot(PNh.history["val_loss"])
plt.title("MSE Loss vs. Training Epoch")
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.legend(["Training loss", "Testing loss"])
plt.figure(2)
plt.subplot(311)
x = [i for i in range(len(Data.dataX))]
yPNp = [netPN.predict(np.array([Data.dataX[i]]))[0][0] for i in range(len(Data.dataX))]
yPNo = [Data.dataPN[i][0] for i in range(len(Data.dataX))]
plt.plot(x, yPNp, label = "Predicted")
plt.plot(x, yPNo, label = "Observed")
plt.title("Predicted and Observed Values vs. Time")
plt.xlabel("Time")
plt.ylabel("P(PN > 10Kcells/L)")
plt.legend()
netCDA = Model(inputs = inputs, outputs = outputs)
netCDA.compile(optimizer = minimizer, loss = cost)
CDAh = netCDA.fit(x = X, y = CDA, batch_size = 128, epochs = epochnum, verbose = 1, validation_split = 0.2, shuffle = True)
netCDA.save_weights("Nets/v2/netCDA" + str(architecture) + ".hdf5")
plt.figure(1)
plt.subplot(312)
plt.plot(CDAh.history["loss"])
plt.plot(CDAh.history["val_loss"])
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.legend(["Training loss", "Testing loss"])
plt.figure(2)
plt.subplot(312)
x = [i for i in range(len(Data.dataX))]
yCDAp = [netCDA.predict(np.array([Data.dataX[i]]))[0][0] for i in range(len(Data.dataX))]
yCDAo = [Data.dataCDA[i][0] for i in range(len(Data.dataX))]
plt.plot(x, yCDAp, label = "Predicted")
plt.plot(x, yCDAo, label = "Observed")
plt.xlabel("Time")
plt.ylabel("P(CDA > 10pg/cell)")
plt.legend()
netPDA = Model(inputs = inputs, outputs = outputs)
netPDA.compile(optimizer = minimizer, loss = cost)
PDAh = netPDA.fit(x = X, y = PDA, batch_size = 128, epochs = epochnum, verbose = 1, validation_split = 0.2, shuffle = True)
netPDA.save_weights("Nets/v2/netPDA" + str(architecture) + ".hdf5")
plt.figure(1)
plt.subplot(313)
plt.plot(PDAh.history["loss"])
plt.plot(PDAh.history["val_loss"])
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.legend(["Training loss", "Testing loss"])
plt.savefig("Plots/v2/MSEvE" + str(architecture) + ".png")
plt.figure(2)
plt.subplot(313)
x = [i for i in range(len(Data.dataX))]
yPDAp = [netPDA.predict(np.array([Data.dataX[i]]))[0][0] for i in range(len(Data.dataX))]
yPDAo = [Data.dataPDA[i][0] for i in range(len(Data.dataX))]
plt.plot(x, yPDAp, label = "Predicted")
plt.plot(x, yPDAo, label = "Observed")
plt.xlabel("Time")
plt.ylabel("P(PDA > 500ng/L)")
plt.legend()
plt.savefig("Plots/v2/POvT" + str(architecture) + ".png")
|
gpl-3.0
| -8,447,979,558,048,298,000
| 41.98103
| 123
| 0.674149
| false
| 3.020377
| false
| false
| false
|
temnoregg/django-muzo
|
muzo/ws.py
|
1
|
7137
|
from SOAPpy import WSDL
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from os.path import dirname
from signature import CSignature
from models import MERCHANT_NUM
MUZO_ORDER_STATES = {
0: _('UNKNOWN'),
1: _('REQUESTED'),
2: _('PENDING'),
3: _('CREATED'),
4: _('APPROVED'),
5: _('APPROVE_REVERSED'),
6: _('UNAPPROVED'),
7: _('DEPOSITED_BATCH_OPENED'),
8: _('DEPOSITED_BATCH_CLOSED'),
9: _('ORDER_CLOSED'),
10: _('DELETED'),
11: _('CREDITED_BATCH_OPENED'),
12: _('CREDITED_BATCH_CLOSED'),
13: _('DECLINED')
}
MUZO_PRCODE = {
0: _('OK'),
1: _('Field too long'),
2: _('Field too short'),
3: _('Incorrect content of field'),
4: _('Field is null'),
5: _('Missing required field'),
11: _('Unknown merchant'),
14: _('Duplicate order number'),
15: _('Object not found'),
17: _('Amount to deposit exceeds approved amount'),
18: _('Total sum of credited amounts exceeded deposited amount'),
20: _('Object not in valid state for operation'),
26: _('Technical problem in connection to authorization center'),
27: _('Incorrect order type'),
28: _('Declined in 3D'),
30: _('Declined in AC'),
31: _('Wrong digest'),
1000: _('Technical problem')
}
MUZO_SRCODE = {
0: _('Empty'),
1: _('ORDERNUMBER'),
2: _('MERCHANTNUMBER'),
6: _('AMOUNT'),
7: _('CURRENCY'),
8: _('DEPOSITFLAG'),
10: _('MERORDERNUM'),
11: _('CREDITNUMBER'),
12: _('OPERATION'),
18: _('BATCH'),
22: _('ORDER'),
24: _('URL'),
25: _('MD'),
26: _('DESC'),
34: _('DIGEST'),
1001: _("""Declined in AC, Card blocked"""),
1002: _("""Declined in AC, Declined"""),
1003: _("""Declined in AC, Card problem"""),
1004: _("""Declined in AC, Technical problem in authorization process"""),
1005: _("""Declined in AC, Account problem"""),
3000: _("""Declined in 3D. Cardholder not authenticated in 3D.
Contact your card issuer. Note: Cardholder authentication failed (wrong
password, transaction canceled, authentication window was closed)
Transaction Declined."""),
3001: _("""Authenticated. Note: Cardholder was successfully
authenticated - transaction continue with
authorization."""),
3002: _("""Not Authenticated in 3D. Issuer or Cardholder not participating in 3D.
Contact your card issuer."""),
3004: _("""Not Authenticated in 3D. Issuer not participating or Cardholder not
enrolled. Contact your card issuer."""),
3005: _("""Declined in 3D. Technical problem during Cardholder authentication.
Contact your card issuer"""),
3006: _("""Declined in 3D. Technical problem during Cardholder authentication."""),
3007: _("""Declined in 3D. Acquirer technical problem. Contact the merchant."""),
3008: _("""Declined in 3D. Unsupported card product. Contact your card issuer""")
}
class MuzoWSError(Exception):
pass
class MuzoWS:
# private key file location
priv_key = settings.MUZO_PRIV_KEY
# password
passwd = settings.MUZO_PASS
# public key file location
pub_key = settings.MUZO_PUB_KEY
# WSDL file
# if settings.DEBUG:
# wsdl_file = dirname(__file__)+'/pgwTest.xml'
# else:
wsdl_file = dirname(__file__)+'/pgw.xml'
#
merchant_num = MERCHANT_NUM
def __init__(self, order_num):
self._server = WSDL.Proxy(self.wsdl_file)
self._order_num = str(order_num)
# sign data routine, returns base64 encoded digest
def _sign(self, data):
CS = CSignature(privkey=self.priv_key, passwd=self.passwd, pubkey=self.pub_key)
return CS.sign(data)
# sends orderQueryState request to WS server and returns WS object response
def queryOrderState(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
return self._server.queryOrderState(str(self.merchant_num), str(self._order_num), digest)
def getOrderState(self):
st = self.queryOrderState()
return '%s - %s' % (st.state, MUZO_ORDER_STATES[st.state])
def getOrderStateId(self):
st = self.queryOrderState().state
return int(st)
def approveReversal(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.approveReversal(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def deposit(self, amount):
d = '%s|%s|%s' % (self.merchant_num, self._order_num, amount)
digest = self._sign(d)
response = self._server.deposit(str(self.merchant_num), str(self._order_num), str(self.amount), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def depositReversal(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.depositReversal(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def credit(self, amount):
d = '%s|%s|%s' % (self.merchant_num, self._order_num, amount)
digest = CS._sign(d)
response = self._server.credit(str(self.merchant_num), str(self._order_num), str(amount), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def creditReversal(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.creditReversal(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def orderClose(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.orderClose(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def delete(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.delete(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def batchClose(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.batchClose(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
|
mit
| -1,142,081,912,268,378,500
| 34.507463
| 136
| 0.676475
| false
| 2.978715
| false
| false
| false
|
IntelLabsEurope/infrastructure-repository
|
monitoring_service/epa_database/hw_reources.py
|
1
|
12910
|
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module to manage Hw information of a node.
It takes in input the files produced by the agents,
parses them and it stores a graph representation of the host
in Neo4j
"""
__author__ = 'gpetralia'
import xml.etree.ElementTree as Et
import time
import json
import networkx as nx
import common.neo4j_resources as neo_resource
# Map numerical types used by hardware locality to string categories
OSDEVTYPE_CATEGORY_MAP = {
'0': 'storage', # HWLOC_OBJ_OSDEV_BLOCK
'1': 'compute', # HWLOC_OBJ_OSDEV_GPU
'2': 'network', # HWLOC_OBJ_OSDEV_NETWORK
'3': 'network', # HWLOC_OBJ_OSDEV_OPENFABRICS
'4': 'compute', # HWLOC_OBJ_OSDEV_DMA
'5': 'compute', # HWLOC_OBJ_OSDEV_COPROC
}
class HostHwResources(object):
"""
Class to manage hw resources in the Neo4j DB
"""
def __init__(self, hostname, pop_id, graph_db):
self.hostname = hostname
self.graph_db = graph_db
self.pop_id = pop_id
self.label = 'physical_resource'
self.index = 'physical_name'
def store(self, path, hwloc_file, cpu_file=None, sriov_file=None, dpdk_file=None, timestamp=None):
"""
Store information contained in files created by the EPA agents into Neo4j.
using a networkx graph
:param path: Path of the files
:param hwloc_file: Hardware locality file
:param cpu_file: Optional cpu information file
:param sriov_file: Optional SR-IOV information file
:param dpdk_file: Optional DPDK information file
:param timestamp: Optional timestamp in epoch
"""
graph = nx.DiGraph()
xml_root = Et.parse(path + hwloc_file).getroot()
deleted_edges = {}
for child in xml_root:
_parse_object_hwloc(graph, child, self.hostname, deleted_edges, self.pop_id)
if cpu_file is not None:
processors_dict = _parse_cpu_info(path + cpu_file)
_enrich_graph_cpuinfo(graph, processors_dict)
if dpdk_file is not None:
dpdk_dict = _parse_dpdk_info(path + dpdk_file)
_enrich_graph_dpdkinfo(graph, dpdk_dict)
if sriov_file is not None:
sriov_dict = _parse_sriov_info(path + sriov_file)
_enrich_graph_sriovinfo(graph, sriov_dict)
if timestamp is not None:
now = timestamp
else:
now = time.time()
neo_id_nodes = {}
nodes_to_add = []
nodes_stored = []
query_string = 'Match n Where n.hostname = {hostname} ' \
'And n.resource_type = {resource_type} Return n.physical_name'
res = self.graph_db.cypher.execute(query_string, hostname=self.hostname, resource_type='physical')
for item in res:
print str(item)
nodes_stored.append(item['n.physical_name'])
for nx_node in graph.nodes():
nodes_to_add.append(str(nx_node))
neo_id_nodes[nx_node] = neo_resource.add_node(self.graph_db, (self.label, self.index, nx_node), now,
get_node_properties(graph, nx_node))
nodes_to_remove = [item for item in nodes_stored if item not in nodes_to_add]
for node in nodes_to_remove:
neo_resource.delete_node(self.graph_db, (self.label, self.index, node))
for edge in graph.edges():
source = edge[0]
target = edge[1]
edge_label = ''
if 'label' in graph.edge[source][target]:
edge_label = graph.edge[source][target]['label']
db_src = neo_id_nodes[source]
db_target = neo_id_nodes[target]
rel_stored = neo_resource.get_edge(self.graph_db, db_src, db_target)
if rel_stored is None:
neo_resource.add_edge(self.graph_db, db_src, db_target, timestamp, edge_label)
else:
neo_resource.update_edge(self.graph_db, timestamp, edge_label, db_src=db_src, db_target=db_target)
def get_node_properties(graph, node_name):
"""
Return a dict containing nodes properties
:param graph: Networkx graph
:param node_name: name of the node
:return dict: Node properties
"""
neo_node = {}
for item in graph.node[str(node_name)]:
if isinstance((graph.node[str(node_name)][item]), dict):
neo_node[item] = json.dumps(graph.node[str(node_name)][item])
else:
neo_node[item] = str(graph.node[str(node_name)][item])
neo_node['physical_name'] = node_name
return neo_node
def _enrich_graph_sriovinfo(graph, sriov_dict):
"""
Enrich the graph with SR-IOV information
:param graph: networkx graph
:param sriov_dict: SR-IOV information
"""
for node, attr in graph.nodes(data=True):
if 'pci_busid' in attr['attributes'].keys() and attr['attributes']['pci_busid'] in sriov_dict.keys():
attr['attributes']['sriov'] = sriov_dict[attr['attributes']['pci_busid']]
def _enrich_graph_dpdkinfo(graph, dpdk_dict):
"""
Enrich the graph with DPDK information
:param graph: networkx graph
:param dpdk_dict: DPDK information
"""
for node, attr in graph.nodes(data=True):
if 'pci_busid' in attr['attributes'].keys() and attr['attributes']['pci_busid'] in dpdk_dict.keys():
attr['attributes']['dpdk'] = True
def _enrich_graph_cpuinfo(graph, processors_dict):
"""
Navigate the graph and
add attributes from processor_list
to the PU nodes.
The key between processor_list and hwlock_graph
is the os_index attribute.
:param graph: the graph that should be enriched
:param processors_dict: a dict of cpu attributes
"""
for node, attr in graph.nodes(data=True):
if '_PU_' in node:
index = int(attr['attributes']['os_index'])
attr['attributes'].update(processors_dict[index])
def _parse_sriov_info(sriov_info_file):
"""
Create a dict containing information extracted from the SR-IOV file
:param sriov_info_file: SR-IOV file
:return dict: SR-IOV information
"""
sriov_dict = {}
with open(sriov_info_file) as f:
for line in f:
line = sanitize_string(line)
attr = line.split(' ')
if len(attr) == 3:
sriov_dict[attr[0]] = {"numvfs": attr[1], "totalvfs": attr[2]}
return sriov_dict
def _parse_dpdk_info(dpdk_info_file):
"""
Create a dict containing information extracted from the DPDK file
:param dpdk_info_file: DPDK file
:return dict: DPDK information
"""
dpdk_dict = {}
with open(dpdk_info_file) as f:
for line in f:
line = sanitize_string(line)
dpdk_dict[line] = {"dpdk": True}
return dpdk_dict
def _parse_cpu_info(cpu_info_file):
"""
Parse the text cpuinfo file
and create a dict of processors.
Each processor is a dict with all the attributes given by cpuinfo.
:param cpu_info_file: Text file with the output of cat /proc/cpuinfo
:return processors_dict: Dictionary containing attributes of each proc
"""
processors_dict = {}
with open(cpu_info_file) as f:
current_id = None
for line in f:
attr = line.split(':')
if len(attr) > 1:
attr[0] = sanitize_string(attr[0])
attr[1] = sanitize_string(attr[1])
if 'processor' in attr[0]:
current_id = int(attr[1])
processors_dict[current_id] = {}
processors_dict[current_id]['id'] = attr[1]
elif current_id is not None and attr[1] is not None and attr[1] is not '':
processors_dict[current_id][attr[0]] = attr[1]
return processors_dict
def _parse_object_hwloc(graph, obj, host_name, deleted_edges, pop_id, parent=None):
"""
Given an xml object extracted from Hardware locality file, create the
corresponding node in the Networkx graph
:param graph: netowrkx graph
:param obj: xml object
:param host_name: hostname of the host who the hwloc obj belongs to
:param deleted_edges: list of edges to delete
:param pop_id: PoP ID
:param parent: Optional reference to the parent of the current object
"""
object_children = []
new_node_properties = {
'resource_type': 'physical',
'category': _get_category(obj),
'type': obj.attrib['type'],
'hostname': host_name,
'pop': pop_id,
'attributes': _get_attributes(obj)
}
node_name = _get_unique_name(obj, host_name)
attr = obj.attrib.copy()
del attr['type']
# Saving the children to be parsed
for child in obj:
if child.tag == 'object':
object_children.append(child)
graph.add_node(node_name, attr_dict=new_node_properties)
# Adding the edge between current node and the parent
if parent is not None:
graph.add_edge(parent, node_name, label='INTERNAL')
if parent in deleted_edges.keys():
graph.add_edge(deleted_edges[parent], node_name, label='INTERNAL')
# Resolving the bug of hwloc that shows
# the 2 caches L1 (data and instruction)
# as they are one under the other
if parent is not None:
if new_node_properties['type'] == 'Cache':
parent_type = ''
parent_depth = ''
for node, node_attr in graph.nodes(data=True):
if node == parent:
parent_type = node_attr['type']
if parent_type == 'Cache':
parent_depth = node_attr['attributes']['depth']
if parent_type == new_node_properties['type'] and attr['depth'] == parent_depth:
graph.remove_edge(parent, node_name)
deleted_edges[node_name] = parent
parent = graph.pred[parent].keys()[0]
graph.add_edge(parent, node_name, label='INTERNAL')
# Recursively calls the function to parse the child of current node
for obj in object_children:
_parse_object_hwloc(graph, obj, host_name, deleted_edges, pop_id, parent=node_name)
def _get_category(hw_obj):
"""
Given an object from the hwloc xml file
the function return the category of the node
choosen using the OSDETYPE_CATEGORY_MAP
:param hw_obj: object extracted from hwloc xml file
:rtype string
:return: category
"""
attrib = hw_obj.attrib
if 'osdev_type' in attrib.keys():
category = OSDEVTYPE_CATEGORY_MAP[attrib['osdev_type']]
else:
category = 'compute'
return category
def _get_attributes(hw_obj):
"""
Return a dict containing the attributes
of an xml object extracted from Hwloc xml file
:param hw_obj: hw object to be parsed
:return dict: attributes of the object
"""
attributes = hw_obj.attrib.copy()
del attributes['type']
for child in hw_obj:
if child.tag == 'info':
name = child.attrib['name']
value = child.attrib['value']
attributes[name] = value
return attributes
def _get_unique_name(hw_obj, hostname):
# UniqueName
# Cache: hostname_Cache_[cpuset]_[depth]_[cache_type]
# OSDev: hostname_OSDev_[name]
# otherwise: hostname_[type]_os_index
obj_type = hw_obj.attrib['type']
if obj_type == 'Cache':
return hostname + '_' + 'Cache' + '_' + hw_obj.attrib['cpuset'] + '_' + hw_obj.attrib['depth'] + '_' + \
hw_obj.attrib['cache_type']
if obj_type == 'OSDev':
return hostname + '_' + 'OSDev' + '_' + hw_obj.attrib['name']
if obj_type == 'Core':
print "Core"
return hostname + '_' + 'Core' + '_' + hw_obj.attrib['cpuset']
return hostname + '_' + hw_obj.attrib['type'] + '_' + hw_obj.attrib['os_index']
def sanitize_string(input_string, space=True):
"""
Sanitize the input_string changing it to lowercase,
deleting space at the start and at the end
:param input_string:
:param space: if space=False, spaces will be replaced with _
:return:
"""
output_string = input_string.strip().lower().replace('-', '_').replace('\n', '')
if not space:
output_string = output_string.replace(' ', '_')
return output_string
|
apache-2.0
| 8,347,671,430,936,619,000
| 32.273196
| 114
| 0.608521
| false
| 3.645863
| false
| false
| false
|
GrahamDumpleton/ispyd
|
ispyd/manager.py
|
1
|
2308
|
import atexit
import cmd
import ConfigParser
import os
import socket
import threading
import traceback
import sys
from ispyd.shell import RootShell
class ShellManager(object):
def __init__(self, config_file):
self.__config_file = config_file
self.__config_object = ConfigParser.RawConfigParser()
if not self.__config_object.read([config_file]):
raise RuntimeError('Unable to open configuration file %s.' %
config_file)
self.__socket_server = self.__config_object.get('ispyd',
'listen') % {'pid': os.getpid()}
if not os.path.isabs(self.__socket_server):
host, port = self.__socket_server.split(':')
port = int(port)
self.__socket_server = (host, port)
self.__thread = threading.Thread(target=self.__thread_run,
name='ISpyd-Shell-Manager')
self.__thread.setDaemon(True)
self.__thread.start()
def __socket_cleanup(self, path):
try:
os.unlink(path)
except:
pass
def __thread_run(self):
if type(self.__socket_server) == type(()):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(self.__socket_server)
else:
try:
os.unlink(self.__socket_server)
except:
pass
listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
listener.bind(self.__socket_server)
atexit.register(self.__socket_cleanup, self.__socket_server)
os.chmod(self.__socket_server, 0600)
listener.listen(5)
while True:
client, addr = listener.accept()
shell = RootShell(self.__config_object)
shell.stdin = client.makefile('r')
shell.stdout = client.makefile('w')
try:
shell.cmdloop()
except:
print >> shell.stdout, 'Exception in shell "%s".' % shell.name
traceback.print_exception(*sys.exc_info(), file=shell.stdout)
shell.stdin = None
shell.stdout = None
del shell
client.close()
|
apache-2.0
| -6,454,305,951,312,017,000
| 27.85
| 78
| 0.555893
| false
| 4.289963
| true
| false
| false
|
jart/tensorflow
|
tensorflow/python/eager/backprop.py
|
1
|
29803
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tensorflow.TFE_OpNameGetAttrType(h, op_type, attr_name)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
return dtypes.as_dtype(value)
elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
return [dtypes.as_dtype(v) for v in value]
elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
return tensor_shape.as_shape(value).as_proto()
elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
return grad_fn(mock_op, *out_grads)
pywrap_tensorflow.TFE_Py_RegisterGradientFunction(_gradient_function)
_tracing = False
# TODO(agarwal): use an automatic mechanism for handling None arguments to
# gradient functions.
# Some gradient functions can accept None arguments for gradients. The following
# maps the operation name to the indices at which the corresponding gradient
# function can accept None values.
# e.g. FusedBatchNorm outputs 5 values and hence receives 5 gradient values
# during backprop. However the gradient function uses only the first of those
# values and ignores the rest. The entry, "FusedBatchNorm": [1, 2, 3, 4],
# indicates that only the gradient corresponding to index 0 is used, and the
# gradient values at indices 1-4 are ignored (and hence can be None). The
# backprop algorithm can then leverage this by not constructing zeros to
# pass for those indices.
_grad_fn_accepts_none_for_indices = {
"SoftmaxCrossEntropyWithLogits": [1],
"FusedBatchNorm": [1, 2, 3, 4]
}
def _record_gradient(op_name, inputs, attrs, results, name):
return pywrap_tensorflow.TFE_Py_RecordGradient(op_name, inputs, attrs,
results, name)
execute.record_gradient = _record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all trainable TFE
variables accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
grad = imperative_grad.imperative_grad(_default_vspace,
this_tape,
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all trainable TFE variables
accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Note that only tensors with real or complex dtypes are differentiable.
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns: function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and is vjp w.r.t. params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function rturns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(args[i])
if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
_default_vspace, this_tape, nest.flatten(result), sources,
output_gradients=dy)
return result, vjp
return decorated
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all([isinstance(g, ops.Tensor) for g in gradients]):
return math_ops.add_n(gradients)
else:
assert all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in gradients])
indexed_slices_list = []
for grad in gradients:
# TODO(xpan): Support nested IndexedSlices and core IndexedSlices
if isinstance(grad, ops.Tensor):
indexed_slices = ops.IndexedSlices(
grad,
math_ops.range(grad.shape[0]),
constant_op.constant(grad.shape.as_list()))
indexed_slices_list.append(indexed_slices)
else:
indexed_slices_list.append(grad)
# Dense shapes from all gradients should be the same.
dense_shape = indexed_slices_list[0].dense_shape
# For simplicity now, always cast to int64.
indices = array_ops.concat([math_ops.cast(x.indices, dtypes.int64)
for x in indexed_slices_list], 0)
values = array_ops.concat([x.values for x in indexed_slices_list], 0)
return ops.IndexedSlices(values, indices, dense_shape)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
return functools.reduce(operator.mul, grad._shape_tuple(), 1) # pylint: disable=protected-access
if isinstance(grad, ops.IndexedSlices):
return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access
raise ValueError("`grad` not a Tensor or IndexedSlices.")
_zeros_cache = context._TensorCache() # pylint: disable=protected-access
def _fast_fill(value, shape, dtype):
return array_ops.fill(shape, constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Wraps array_ops.zeros to cache last zero for a given shape and dtype."""
device = context.context().device_name
if dtype == dtypes.variant:
# TODO(apassos): need to save enough information about variant tensors to do
# a zeros
return None
# pylint: disable=protected-access
cache_key = shape, dtype, device, context.context()._eager_context.mode
# pylint: enable=protected-access
cached = _zeros_cache.get(cache_key)
if cached is None:
cached = _fast_fill(0, shape, dtype)
_zeros_cache.put(cache_key, cached)
return cached
def _ones(shape, dtype):
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(1, dtype=dtype)
return _fast_fill(1, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
tensor_id=ops.tensor_id,
zeros=_zeros,
ones=_ones)
def _handle_or_self(x):
"""If x is ResourceVariable, return its handle, else x."""
if isinstance(x, resource_variable_ops.ResourceVariable):
x = x.handle
return x
@tf_export("GradientTape")
class GradientTape(object):
"""Record operations for automatic differentiation.
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Trainable variables (created by `tf.contrib.eager.Variable` or
@{tf.get_variable}, trainable=True is default in both cases) are automatically
watched. Tensors can be manually watched by invoking the `watch` method on
this context manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
dy_dx = g.gradient(y, x) # Will compute to 6.0
```
GradientTapes can be nested to compute higher-order derivatives. For example,
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
with tf.GradientTape() as gg:
gg.watch(x)
y = x * x
dy_dx = gg.gradient(y, x) # Will compute to 6.0
d2y_dx2 = g.gradient(dy_dx, x) # Will compute to 2.0
```
By default, the resources held by a GradientTape are released as soon as
GradientTape.gradient() method is called. To compute multiple gradients over
the same computation, create a persistent gradient tape. This allows multiple
calls to the gradient() method as resources are released when the tape object
is garbage collected. For example:
```python
x = tf.constant(3.0)
with tf.GradientTape(persistent=True) as g:
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 3)
dy_dx = g.gradient(y, x) # 6.0
del g # Drop the reference to the tape
```
Note that only tensors with real or complex dtypes are differentiable.
"""
def __init__(self, persistent=False):
"""Creates a new GradientTape.
Args:
persistent: Boolean controlling whether a persistent gradient tape
is created. False by default, which means at most one call can
be made to the gradient() method on this object.
"""
self._tape = None
self._persistent = persistent
self._recording = False
def __enter__(self):
"""Enters a context inside which operations are recorded on this tape."""
self._push_tape()
return self
def __exit__(self, typ, value, traceback):
"""Exits the recording context, no further operations are traced."""
if self._recording:
self._pop_tape()
def _push_tape(self, existing_tape=False):
if self._recording:
raise ValueError("Tape is already recording.")
if existing_tape:
if self._tape is None:
raise ValueError("There is no existing tape.")
tape.push_tape(self._tape)
else:
self._tape = tape.push_new_tape(persistent=self._persistent)
self._recording = True
def _pop_tape(self):
if not self._recording:
raise ValueError("Tape is not recording.")
tape.pop_tape(self._tape)
self._recording = False
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
tape.watch(_handle_or_self(t))
@tf_contextlib.contextmanager
def stop_recording(self):
"""Temporarily stops recording operations on this tape.
Operations executed while this context manager is active will not be
recorded on the tape. This is useful for reducing the memory used by tracing
all computations.
For example:
```
with tf.GradientTape(persistent=True) as t:
loss = compute_loss(model)
with t.stop_recording():
# The gradient computation below is not traced, saving memory.
grads = t.gradient(loss, model.variables)
```
Yields:
None
Raises:
RuntimeError: if the tape is not currently recording.
"""
if self._tape is None:
raise RuntimeError(
"Trying to stop recording a tape which is not recording.")
self._pop_tape()
try:
yield
finally:
self._push_tape(existing_tape=True)
def reset(self):
"""Clears all information stored in this tape.
Equivalent to exiting and reentering the tape context manager with a new
tape. For example, the two following code blocks are equivalent:
```
with tf.GradientTape() as t:
loss = loss_fn()
with tf.GradientTape() as t:
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
# The following is equivalent to the above
with tf.GradientTape() as t:
loss = loss_fn()
t.reset()
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
```
This is useful if you don't want to exit the context manager for the tape,
or can't because the desired reset point is inside a control flow construct:
```
with tf.GradientTape() as t:
loss = ...
if loss > k:
t.reset()
```
"""
self._pop_tape()
self._push_tape()
def watched_variables(self):
"""Returns variables watched by this tape in order of construction."""
return self._tape.watched_variables()
def gradient(self, target, sources, output_gradients=None):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor (or list of tensors) to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(logging.WARN,
"Calling GradientTape.gradient on a persistent "
"tape inside it's context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derrivatives.", 1)
flat_sources = nest.flatten(sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in nest.flatten(output_gradients)]
flat_grad = imperative_grad.imperative_grad(
_default_vspace, self._tape, nest.flatten(target), flat_sources,
output_gradients=output_gradients)
if not self._persistent:
self._tape = None
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
|
apache-2.0
| 6,596,691,934,725,755,000
| 33.494213
| 108
| 0.664497
| false
| 3.782107
| false
| false
| false
|
waytai/pytracemalloctext
|
tests/python_memleak.py
|
1
|
3305
|
"""
Memory usage of Python < 3.3 grows between some function calls, randomly,
whereas it should stay stable. The final memory usage should be close to the
initial memory usage.
Example with Python 2.6:
Initial memory:
VmRSS: 3176 kB
After call #1:
VmRSS: 4996 kB
After call #2:
VmRSS: 4996 kB
After call #3:
VmRSS: 14704 kB
Finally memory
VmRSS: 14704 kB
Example with Python 3.3 (compiled in debug mode):
Initial memory:
VmRSS: 6048 kB
After call #1:
VmRSS: 6732 kB
After call #2:
VmRSS: 6732 kB
After call #3:
VmRSS: 6732 kB
Finally memory
VmRSS: 6732 kB
The Python memory allocator of Python 3.3 uses mmap(), when available, instead
of malloc(). munmap() releases immediatly system memory because it can punch
holes in the memory space of the process, whereas malloc() uses brk() and
sbrk() which uses a contigious address range for the heap memory.
The Python memory allocator allocates chunks of memory of 256 KB (see
ARENA_SIZE in Objects/obmalloc.c). A chunk cannot be released to the system
before all objects stored in the chunk are released.
The Python memory allocator is only used for allocations smaller than 256 bytes
in Python <= 3.2, or allocations smaller than 512 bytes in Python 3.3.
Otherwise, malloc() and free() are used. The GNU libc uses brk() or mmap()
depending on a threshold: 128 KB by default. The threshold is dynamic nowadays.
Use mallopt(M_MMAP_THRESHOLD, nbytes) to change this threshold.
See also:
* http://pushingtheweb.com/2010/06/python-and-tcmalloc/
* http://sourceware.org/ml/libc-alpha/2006-03/msg00033.html
* http://www.linuxdevcenter.com/pub/a/linux/2006/11/30/linux-out-of-memory.html?page=2
* http://cloudfundoo.wordpress.com/2012/05/18/minor-page-faults-and-dynamic-memory-allocation-in-linux/
"""
import gc
import sys
import tracemalloc
import tracemalloctext
tracemalloc.add_exclusive_filter(tracemalloctext.__file__)
tracemalloc.enable()
task = tracemalloctext.DisplayTopTask(10)
#task.start(60)
def dump_memory():
print("*FORCE DISPLAY*")
task.display()
return
with open("/proc/self/status") as fp:
for line in fp:
if "VmRSS" not in line:
continue
print(line.rstrip())
break
#with open("/proc/self/maps") as fp:
# for line in fp:
# print(line.rstrip())
def func():
ns = {}
codeobj = compile(codestr, 'wastememory.py', "exec")
exec(codeobj, ns, ns)
ns.clear()
codeobj = None
ns = None
gc.collect()
codestr = ["""class SuperClass:"""]
for index in range(2000):
codestr.append("""
classattr%s = 2
def methdod%s(self, arg):
"docstring"
x = len(arg)
return x""" % (index, index))
codestr = ''.join(codestr)
print("Initial memory: ")
dump_memory()
for loop in range(1, 4):
func()
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
dump_memory()
print("Finally memory")
dump_memory()
|
mit
| -3,558,270,381,872,102,400
| 27.247863
| 103
| 0.645386
| false
| 3.35874
| false
| false
| false
|
cburschka/NBT
|
examples/block_analysis.py
|
1
|
8106
|
#!/usr/bin/env python
"""
Finds the contents of the different blocks in a level, taking different data values (sub block types) into account.
"""
import locale, os, sys
import glob
# local module
try:
import nbt
except ImportError:
# nbt not in search path. Let's see if it can be found in the parent folder
extrasearchpath = os.path.realpath(os.path.join(__file__,os.pardir,os.pardir))
if not os.path.exists(os.path.join(extrasearchpath,'nbt')):
raise
sys.path.append(extrasearchpath)
from nbt.region import RegionFile
from nbt.chunk import McRegionChunk
def stats_per_chunk(chunk, block_data_totals):
"""Given a chunk, increment the block types with the number of blocks found"""
for block_id, data_id in chunk.blocks.get_all_blocks_and_data():
block_data_totals[block_id][data_id] += 1
def bounded_stats_per_chunk(chunk, block_data_totals, start, stop):
"""Given a chunk, return the number of blocks types within the specified selection"""
chunk_z, chunk_x = chunk.get_coords()
for z in range(16):
world_z = z + chunk_z*16
if ( (start != None and world_z < int(start[2])) or (stop != None and world_z > int(stop[2])) ):
# Outside the bounding box; skip to next iteration
#print("Z break: %d,%d,%d" % (world_z,start[2],stop[2]))
break
for x in range(16):
world_x = x + chunk_x*16
if ( (start != None and world_x < int(start[0])) or (stop != None and world_x > int(stop[0])) ):
# Outside the bounding box; skip to next iteration
#print("X break: %d,%d,%d" % (world_x,start[0],stop[0]))
break
for y in range(128):
if ( (start != None and y < int(start[1])) or (stop != None and y > int(stop[1])) ):
# Outside the bounding box; skip to next iteration
#print("Y break: %d,%d,%d" % (y,start[1],stop[1]))
break
#print("Chunk: %d,%d Coord: %d,%d,%d" % (c['x'], c['z'],x,y,z))
block_id,block_data = chunk.blocks.get_block_and_data(x,y,z)
block_data_totals[block_id][block_data] += 1
def process_region_file(filename, start, stop):
"""Given a region filename, return the number of blocks of each ID in that file"""
pieces = filename.split('.')
rx = int(pieces[-3])
rz = int(pieces[-2])
block_data_totals = [[0]*16 for i in range(256)] # up to 16 data numbers in 256 block IDs
# Does the region overlap the bounding box at all?
if (start != None):
if ( (rx+1)*512-1 < int(start[0]) or (rz+1)*512-1 < int(start[2]) ):
return block_data_totals
elif (stop != None):
if ( rx*512-1 > int(stop[0]) or rz*512-1 > int(stop[2]) ):
return block_data_totals
file = RegionFile(filename)
# Get all chunks
chunks = file.get_chunks()
print("Parsing %s... %d chunks" % (os.path.basename(filename),len(chunks)))
for c in chunks:
# Does the chunk overlap the bounding box at all?
if (start != None):
if ( (c['x']+1)*16 + rx*512 - 1 < int(start[0]) or (c['z']+1)*16 + rz*512 - 1 < int(start[2]) ):
continue
elif (stop != None):
if ( c['x']*16 + rx*512 - 1 > int(stop[0]) or c['z']*16 + rz*512 - 1 > int(stop[2]) ):
continue
chunk = McRegionChunk(file.get_chunk(c['x'], c['z']))
assert chunk.get_coords() == (c['x'] + rx*32, c['z'] + rz*32)
#print("Parsing chunk ("+str(c['x'])+", "+str(c['z'])+")")
# Parse the blocks
# Fast code if no start or stop coordinates are specified
# TODO: also use this code if start/stop is specified, but the complete chunk is included
if (start == None and stop == None):
stats_per_chunk(chunk, block_data_totals)
else:
# Slow code that iterates through each coordinate
bounded_stats_per_chunk(chunk, block_data_totals, start, stop)
return block_data_totals
def print_results(block_data_totals):
locale.setlocale(locale.LC_ALL, '')
# Analyze blocks
for block_id,data in enumerate(block_data_totals):
if sum(data) > 0:
datastr = ", ".join([locale.format_string("%d: %d", (i,c), grouping=True) for (i,c) in enumerate(data) if c > 0])
print(locale.format_string("block id %3d: %12d (data id %s)", (block_id,sum(data),datastr), grouping=True))
block_totals = [sum(data_totals) for data_totals in block_data_totals]
total_blocks = sum(block_totals)
solid_blocks = total_blocks - block_totals[0]
solid_ratio = (solid_blocks+0.0)/total_blocks if (total_blocks > 0) else 0
print(locale.format_string("%d total blocks in region, %d are non-air (%0.4f", (total_blocks, solid_blocks, 100.0*solid_ratio), grouping=True)+"%)")
# Find valuable blocks
print(locale.format_string("Diamond Ore: %8d", block_totals[56], grouping=True))
print(locale.format_string("Gold Ore: %8d", block_totals[14], grouping=True))
print(locale.format_string("Redstone Ore: %8d", block_totals[73], grouping=True))
print(locale.format_string("Iron Ore: %8d", block_totals[15], grouping=True))
print(locale.format_string("Coal Ore: %8d", block_totals[16], grouping=True))
print(locale.format_string("Lapis Lazuli Ore: %8d", block_totals[21], grouping=True))
print(locale.format_string("Dungeons: %8d", block_totals[52], grouping=True))
print(locale.format_string("Clay: %8d", block_totals[82], grouping=True))
print(locale.format_string("Sugar Cane: %8d", block_totals[83], grouping=True))
print(locale.format_string("Cacti: %8d", block_totals[81], grouping=True))
print(locale.format_string("Pumpkin: %8d", block_totals[86], grouping=True))
print(locale.format_string("Dandelion: %8d", block_totals[37], grouping=True))
print(locale.format_string("Rose: %8d", block_totals[38], grouping=True))
print(locale.format_string("Brown Mushroom: %8d", block_totals[39], grouping=True))
print(locale.format_string("Red Mushroom: %8d", block_totals[40], grouping=True))
print(locale.format_string("Lava Springs: %8d", block_totals[11], grouping=True))
def main(world_folder, start=None, stop=None):
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
return 2 # ENOENT
regions = glob.glob(os.path.join(world_folder,'region','*.mcr'))
block_data_totals = [[0]*16 for i in range(256)] # up to 16 data numbers in 256 block IDs
try:
for filename in regions:
region_totals = process_region_file(filename, start, stop)
for i, data in enumerate(region_totals):
for j, total in enumerate(data):
block_data_totals[i][j] += total
except KeyboardInterrupt:
print_results(block_data_totals)
return 75 # EX_TEMPFAIL
print_results(block_data_totals)
return 0 # EX_OK
if __name__ == '__main__':
if (len(sys.argv) == 1):
print("No world folder specified! Usage: %s <world folder> [minx,miny,minz maxx,maxy,maxz]" % sys.argv[0])
sys.exit(64) # EX_USAGE
world_folder = sys.argv[1]
# clean path name, eliminate trailing slashes. required for os.path.basename()
world_folder = os.path.normpath(world_folder)
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
sys.exit(72) # EX_IOERR
start,stop = None,None
if (len(sys.argv) == 4):
# A min/max corner was specified
start_str = sys.argv[2][1:-1] # Strip parenthesis...
start = tuple(start_str.split(',')) # and convert to tuple
stop_str = sys.argv[3][1:-1] # Strip parenthesis...
stop = tuple(stop_str.split(',')) # and convert to tuple
sys.exit(main(world_folder, start, stop))
|
mit
| -2,987,137,642,103,406,600
| 45.855491
| 152
| 0.596348
| false
| 3.370478
| false
| false
| false
|
rockfruit/bika.lims
|
bika/lims/browser/batch/analysisrequests.py
|
1
|
2111
|
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from operator import itemgetter
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.analysisrequest import AnalysisRequestAddView as _ARAV
from bika.lims.browser.analysisrequest import AnalysisRequestsView as _ARV
from bika.lims.permissions import *
from plone.app.layout.globals.interfaces import IViewView
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
class AnalysisRequestsView(_ARV, _ARAV):
template = ViewPageTemplateFile(
"../analysisrequest/templates/analysisrequests.pt")
ar_add = ViewPageTemplateFile("../analysisrequest/templates/ar_add.pt")
implements(IViewView)
def __init__(self, context, request):
super(AnalysisRequestsView, self).__init__(context, request)
def contentsMethod(self, contentFilter):
return self.context.getAnalysisRequests(**contentFilter)
def __call__(self):
self.context_actions = {}
mtool = getToolByName(self.context, 'portal_membership')
if mtool.checkPermission(AddAnalysisRequest, self.portal):
self.context_actions[self.context.translate(_('Add new'))] = {
'url': self.context.absolute_url() + \
"/portal_factory/"
"AnalysisRequest/Request new analyses/ar_add?ar_count=1",
'icon': '++resource++bika.lims.images/add.png'}
return super(AnalysisRequestsView, self).__call__()
def getMemberDiscountApplies(self):
client = self.context.getClient()
return client and client.getMemberDiscountApplies() or False
def getRestrictedCategories(self):
client = self.context.getClient()
return client and client.getRestrictedCategories() or []
def getDefaultCategories(self):
client = self.context.getClient()
return client and client.getDefaultCategories() or []
|
agpl-3.0
| 2,410,436,482,305,657,300
| 39.596154
| 77
| 0.71009
| false
| 4.083172
| false
| false
| false
|
09zwcbupt/ryu
|
ryu/lib/packet/udp.py
|
1
|
2010
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import socket
from . import packet_base
from . import packet_utils
import ipv4
class udp(packet_base.PacketBase):
_PACK_STR = '!HHHH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, src_port, dst_port, total_length=0, csum=0):
super(udp, self).__init__()
self.src_port = src_port
self.dst_port = dst_port
self.total_length = total_length
self.csum = csum
self.length = udp._MIN_LEN
@classmethod
def parser(cls, buf):
(src_port, dst_port, total_length, csum) = struct.unpack_from(
cls._PACK_STR, buf)
msg = cls(src_port, dst_port, total_length, csum)
return msg, None
def serialize(self, payload, prev):
if self.total_length == 0:
self.total_length = udp._MIN_LEN + len(payload)
h = struct.pack(udp._PACK_STR, self.src_port, self.dst_port,
self.total_length, self.csum)
if self.csum == 0:
ph = struct.pack('!IIBBH', prev.src, prev.dst, 0, 17,
self.total_length)
f = ph + h + payload
if len(f) % 2:
f += '\x00'
self.csum = socket.htons(packet_utils.checksum(f))
h = struct.pack(udp._PACK_STR, self.src_port, self.dst_port,
self.total_length, self.csum)
return h
|
apache-2.0
| -6,886,356,583,741,292,000
| 34.892857
| 72
| 0.612438
| false
| 3.595707
| false
| false
| false
|
iandees/all-the-places
|
locations/spiders/ljsilvers.py
|
1
|
1524
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class LjsilversSpider(scrapy.Spider):
name = "ljsilvers"
allowed_domains = ["ljsilvers.com"]
start_urls = (
'http://www.ljsilvers.com/locator?postalcode=76010',
)
def parse(self, response):
data = response.body_as_unicode()
base_data = re.search('dataout\s--Array\s\((.*)\)\s\s--><style type="text/css">', data, re.DOTALL).group(1)
detail_matches = re.findall('\((.*?)\)', base_data, re.DOTALL)
for detail_match in detail_matches:
key_values = re.findall('(.*?)\s=>\s(.*)', detail_match)
props = {}
for key_value in key_values:
key = key_value[0].strip()
value = key_value[1].strip()
if key == '[storeID]':
props['ref'] = value
if key == '[address]':
props['addr_full'] = value
if key == '[city]':
props['city'] = value
if key == '[state]':
props['state'] = value
if key == '[zip]':
props['postcode'] = value
if key == '[phone_number]':
props['phone'] = value
if key == '[latitude]':
props['lat'] = value
if key == '[longitude]':
props['lon'] = value
yield GeojsonPointItem(**props)
|
mit
| -8,044,704,276,494,561,000
| 32.130435
| 115
| 0.467192
| false
| 4.04244
| false
| false
| false
|
twhyntie/tasl-data-management
|
wrappers/test_nod.py
|
1
|
1166
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#...the usual suspects.
import os, inspect
#...for the unit testing.
import unittest
#...for the logging.
import logging as lg
# The wrapper class to test.
from nod import NOD
class TestNOD(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_nod(self):
## The annotation CSV file.
nod = NOD("testdata/NOD/000000_00_00_00.csv")
# The tests.
# The headers.
self.assertEqual(nod.get_number_of_headers(), 2)
self.assertEqual(nod.get_header(0), "annotation_id")
self.assertEqual(nod.get_header(1), "n_oddities_identified")
# The annotations.
# Test the number of annotations found.
self.assertEqual(nod.get_number_of_annotations(), 88)
if __name__ == "__main__":
lg.basicConfig(filename='log_test_nod.log', filemode='w', level=lg.DEBUG)
lg.info(" *")
lg.info(" *=========================================")
lg.info(" * Logger output from wrappers/test_nod.py ")
lg.info(" *=========================================")
lg.info(" *")
unittest.main()
|
mit
| -4,170,661,468,903,382,500
| 21
| 77
| 0.559177
| false
| 3.491018
| true
| false
| false
|
certik/hermes-gui
|
hermesgui/core/handle_hermes.py
|
1
|
1543
|
from hermes2d import Mesh, H1Shapeset, PrecalcShapeset, H1Space, \
WeakForm, Solution, ScalarView, LinSystem, DummySolver, \
MeshView, set_verbose, plot_mesh_mpl_simple
from hermes2d.forms import set_forms
from hermes2d.mesh import read_hermes_format
def read_mesh(filename):
nodes, elements, boundary, nurbs = read_hermes_format(filename)
return nodes, elements, boundary, nurbs
def plot_mesh(mesh, axes=None, plot_nodes=True):
nodes, elements, boundary, nurbs = mesh
# remove the element markers
elements = [x[:-1] for x in elements]
return plot_mesh_mpl_simple(nodes, elements, axes=axes,
plot_nodes=plot_nodes)
def poisson_solver(mesh_tuple):
"""
Poisson solver.
mesh_tuple ... a tuple of (nodes, elements, boundary, nurbs)
"""
set_verbose(False)
mesh = Mesh()
mesh.create(*mesh_tuple)
mesh.refine_element(0)
shapeset = H1Shapeset()
pss = PrecalcShapeset(shapeset)
# create an H1 space
space = H1Space(mesh, shapeset)
space.set_uniform_order(5)
space.assign_dofs()
# initialize the discrete problem
wf = WeakForm(1)
set_forms(wf)
solver = DummySolver()
sys = LinSystem(wf, solver)
sys.set_spaces(space)
sys.set_pss(pss)
# assemble the stiffness matrix and solve the system
sys.assemble()
A = sys.get_matrix()
b = sys.get_rhs()
from scipy.sparse.linalg import cg
x, res = cg(A, b)
sln = Solution()
sln.set_fe_solution(space, pss, x)
return sln
|
bsd-3-clause
| 3,263,927,270,197,432,300
| 28.113208
| 73
| 0.657161
| false
| 3.276008
| false
| false
| false
|
nkoep/blaplay
|
blaplay/blagui/blaqueue.py
|
1
|
13341
|
# blaplay, Copyright (C) 2014 Niklas Koep
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import os
import cPickle as pickle
import re
import gobject
import gtk
import blaplay
ui_manager = blaplay.bla.ui_manager
from blaplay.blacore import blaconst, blacfg
from blaplay import blagui
from blaplay.formats._identifiers import *
from blawindows import BlaScrolledWindow
from blatracklist import (
COLUMN_ARTIST, COLUMN_ALBUM, COLUMN_ALBUM_ARTIST, COLUMN_GENRE, popup,
update_columns, parse_track_list_stats, BlaTreeView, BlaTrackListItem)
from blastatusbar import BlaStatusbar
from blaview import BlaViewMeta
from blaplaylist import playlist_manager
class BlaQueue(BlaScrolledWindow):
__metaclass__ = BlaViewMeta("Queue")
__layout = (
gobject.TYPE_PYOBJECT, # An instance of BlaTrackListItem
gobject.TYPE_STRING # Position in the queue
)
def __init__(self):
super(BlaQueue, self).__init__()
self.__size = 0
self.__length = 0
self.clipboard = []
self.__treeview = BlaTreeView(view_id=blaconst.VIEW_QUEUE)
self.__treeview.set_model(gtk.ListStore(*self.__layout))
self.__treeview.set_enable_search(False)
self.__treeview.set_property("rules_hint", True)
self.set_shadow_type(gtk.SHADOW_IN)
self.add(self.__treeview)
self.__treeview.enable_model_drag_dest(
[("queue", gtk.TARGET_SAME_WIDGET, 3)], gtk.gdk.ACTION_COPY)
self.__treeview.enable_model_drag_source(
gtk.gdk.BUTTON1_MASK,
[("queue", gtk.TARGET_SAME_WIDGET, 3)],
gtk.gdk.ACTION_COPY)
self.__treeview.connect("popup", popup, blaconst.VIEW_QUEUE, self)
self.__treeview.connect("row_activated", self.play_item)
self.__treeview.connect(
"button_press_event", self.__button_press_event)
self.__treeview.connect("key_press_event", self.__key_press_event)
self.__treeview.connect("drag_data_get", self.__drag_data_get)
self.__treeview.connect("drag_data_received", self.__drag_data_recv)
update_columns(self.__treeview, view_id=blaconst.VIEW_QUEUE)
self.show_all()
def __button_press_event(self, treeview, event):
if (event.button == 2 and
event.type not in [gtk.gdk._2BUTTON_PRESS,
gtk.gdk._3BUTTON_PRESS]):
self.paste()
return True
def __key_press_event(self, treeview, event):
if blagui.is_accel(event, "<Ctrl>X"):
self.cut()
elif blagui.is_accel(event, "<Ctrl>C"):
self.copy()
elif blagui.is_accel(event, "<Ctrl>V"):
self.paste()
elif blagui.is_accel(event, "Delete"):
self.remove()
return False
def __drag_data_get(self, treeview, drag_context, selection_data, info,
time):
data = pickle.dumps(treeview.get_selection().get_selected_rows()[-1],
pickle.HIGHEST_PROTOCOL)
selection_data.set("", 8, data)
def __drag_data_recv(self, treeview, drag_context, x, y, selection_data,
info, time):
drop_info = treeview.get_dest_row_at_pos(x, y)
model = self.__treeview.get_model()
paths = pickle.loads(selection_data.data)
# TODO: factor this out so we can use the same for the playlist
if drop_info:
path, pos = drop_info
iterator = model.get_iter(path)
if (pos == gtk.TREE_VIEW_DROP_BEFORE or
pos == gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
move_before = model.move_before
def move_func(it):
move_before(it, iterator)
else:
move_after = model.move_after
def move_func(it):
move_after(it, iterator)
paths.reverse()
else:
iterator = None
move_before = model.move_before
def move_func(it):
move_before(it, iterator)
get_iter = model.get_iter
iterators = map(get_iter, paths)
map(move_func, iterators)
self.update_queue_positions()
def __add_items(self, items, path=None, select_rows=False):
treeview = self.__treeview
model = treeview.get_model()
iterator = None
try:
if (not treeview.get_selection().get_selected_rows()[-1] or
path == -1):
raise TypeError
if not path:
path, column = treeview.get_cursor()
except TypeError:
path = (len(model),)
append = model.append
def insert_func(iterator, item):
append(item)
else:
iterator = model.get_iter(path)
insert_func = model.insert_before
items.reverse()
for item in items:
iterator = insert_func(iterator, [item, None])
if select_rows:
treeview.freeze_notify()
selection = treeview.get_selection()
selection.unselect_all()
select_path = selection.select_path
map(select_path, xrange(path[0], path[0] + len(items)))
treeview.thaw_notify()
self.update_queue_positions()
def __get_items(self, remove=True):
treeview = self.__treeview
model, selections = treeview.get_selection().get_selected_rows()
if selections:
get_iter = model.get_iter
iterators = map(get_iter, selections)
items = [model[iterator][0] for iterator in iterators]
if remove:
remove = model.remove
map(remove, iterators)
self.update_queue_positions()
return items
return []
def play_item(self, treeview, path, column=None):
model = treeview.get_model()
iterator = model.get_iter(path)
model[iterator][0].play()
if blacfg.getboolean("general", "queue.remove.when.activated"):
model.remove(iterator)
self.update_queue_positions()
def update_statusbar(self):
model = self.__treeview.get_model()
count = len(model)
if count == 0:
info = ""
else:
info = parse_track_list_stats(count, self.__size, self.__length)
BlaStatusbar.set_view_info(blaconst.VIEW_QUEUE, info)
def select(self, type_):
treeview = self.__treeview
selection = treeview.get_selection()
model, selected_paths = selection.get_selected_rows()
if type_ == blaconst.SELECT_ALL:
selection.select_all()
return
elif type_ == blaconst.SELECT_COMPLEMENT:
selected_paths = set(selected_paths)
paths = set([(p,) for p in xrange(len(model))])
paths.difference_update(selected_paths)
selection.unselect_all()
select_path = selection.select_path
map(select_path, paths)
return
elif type_ == blaconst.SELECT_BY_ARTISTS:
column_id = COLUMN_ARTIST
elif type_ == blaconst.SELECT_BY_ALBUMS:
column_id = COLUMN_ALBUM
elif type_ == blaconst.SELECT_BY_ALBUM_ARTISTS:
column_id = COLUMN_ALBUM_ARTIST
else:
column_id = COLUMN_GENRE
items = [model[path][0] for path in selected_paths]
eval_ = BlaEval(column_id).eval
values = set()
for item in items:
values.add(eval_(item.track).lower())
if not values:
return
r = re.compile(
r"^(%s)$" % "|".join(values), re.UNICODE | re.IGNORECASE)
items = [row[0] for row in model if r.match(eval_(row[0].track))]
paths = [row.path for row in model if row[0] in items]
selection.unselect_all()
select_path = selection.select_path
map(select_path, paths)
def update_queue_positions(self):
model = self.__treeview.get_model()
# Update the position labels for our own treeview.
for idx, row in enumerate(model):
model[row.path][1] = idx+1
# Invalidate the visible rows of the current playlists so the
# position labels also get updated in playlists.
playlist = playlist_manager.get_current_playlist()
playlist.invalidate_visible_rows()
# Calculate size and length of the queue and update the statusbar.
size = length = 0
for row in model:
track = row[0].track
size += track[FILESIZE]
length += track[LENGTH]
self.__size, self.__length = size, length
self.emit("count_changed", blaconst.VIEW_QUEUE, self.n_items)
self.update_statusbar()
def get_queue_positions(self, item):
model = self.__treeview.get_model()
return [row[1] for row in model if row[0] == item]
def queue_items(self, items):
if not items:
return
# If any of the items is not an instance of BlaTrackListItem it means
# all of the items are actually just URIs which stem from the library
# browser and are not part of a playlist.
if not isinstance(items[0], BlaTrackListItem):
items = map(BlaTrackListItem, items)
count = blaconst.QUEUE_MAX_ITEMS - self.n_items
self.__add_items(items[:count], path=-1)
def remove_items(self, items):
# This is invoked by playlists who want to remove tracks from the
# queue.
model = self.__treeview.get_model()
for row in model:
if row[0] in items:
model.remove(row.iter)
self.update_queue_positions()
def get_queue(self):
queue = []
playlists = playlist_manager.get_playlists()
for row in self.__treeview.get_model():
item = row[0]
playlist = item.playlist
try:
playlist_idx = playlists.index(playlist)
except ValueError:
item = (item.uri,)
else:
item = (playlist_idx,
playlist.get_path_from_item(item, all_=True))
queue.append(item)
return queue
def restore(self, items):
print_i("Restoring the play queue")
if not items:
return
playlists = playlist_manager.get_playlists()
for idx, item in enumerate(items):
try:
playlist_idx, path = item
except ValueError:
# Library tracks that are not part of a playlist.
item = BlaTrackListItem(item)
else:
item = playlists[playlist_idx].get_item_from_path(path)
items[idx] = item
self.queue_items(items)
def cut(self, *args):
self.clipboard = self.__get_items(remove=True)
ui_manager.update_menu(blaconst.VIEW_QUEUE)
def copy(self, *args):
# We specifically don't create actual copies of items here as it's not
# desired to have unique ones in the queue. Copied and pasted tracks
# should still refer to the same BlaTrackListItem instances which are
# possibly part of a playlist.
self.clipboard = self.__get_items(remove=False)
ui_manager.update_menu(blaconst.VIEW_QUEUE)
def paste(self, *args, **kwargs):
self.__add_items(items=self.clipboard, select_rows=True)
def remove(self, *args):
self.__get_items(remove=True)
def remove_duplicates(self):
unique = set()
model = self.__treeview.get_model()
for row in model:
uri = row[0].uri
if uri not in unique:
unique.add(uri)
else:
model.remove(row.iter)
self.update_queue_positions()
def remove_invalid_tracks(self):
model = self.__treeview.get_model()
isfile = os.path.isfile
for row in model:
uri = row[0].uri
if not isfile(uri):
model.remove(row.iter)
self.update_queue_positions()
def clear(self):
self.__treeview.get_model().clear()
self.update_queue_positions()
def get_item(self):
model = self.__treeview.get_model()
iterator = model.get_iter_first()
if iterator:
item = model[iterator][0]
model.remove(iterator)
self.update_queue_positions()
return item
return None
@property
def n_items(self):
return len(self.__treeview.get_model())
queue = BlaQueue()
|
gpl-2.0
| 1,768,491,662,655,170,000
| 33.562176
| 78
| 0.58174
| false
| 3.972901
| false
| false
| false
|
juliantaylor/scipy
|
scipy/optimize/cobyla.py
|
2
|
9434
|
"""
Interface to Constrained Optimization By Linear Approximation
Functions
---------
.. autosummary::
:toctree: generated/
fmin_cobyla
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import callable
from scipy.optimize import _cobyla
from .optimize import Result, _check_unknown_options
__all__ = ['fmin_cobyla']
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
rhoend=1e-4, iprint=1, maxfun=1000, disp=None, catol=1e-6):
"""
Minimize a function using the Constrained Optimization BY Linear
Approximation (COBYLA) method. This method wraps a FORTRAN
implentation of the algorithm.
Parameters
----------
func : callable
Function to minimize. In the form func(x, \\*args).
x0 : ndarray
Initial guess.
cons : sequence
Constraint functions; must all be ``>=0`` (a single function
if only 1 constraint). Each function takes the parameters `x`
as its first argument.
args : tuple
Extra arguments to pass to function.
consargs : tuple
Extra arguments to pass to constraint functions (default of None means
use same extra arguments as those passed to func).
Use ``()`` for no extra arguments.
rhobeg :
Reasonable initial changes to the variables.
rhoend :
Final accuracy in the optimization (not precisely guaranteed). This
is a lower bound on the size of the trust region.
iprint : {0, 1, 2, 3}
Controls the frequency of output; 0 implies no output. Deprecated.
disp : {0, 1, 2, 3}
Over-rides the iprint interface. Preferred.
maxfun : int
Maximum number of function evaluations.
catol : float
Absolute tolerance for constraint violations.
Returns
-------
x : ndarray
The argument that minimises `f`.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'COBYLA' `method` in particular.
Notes
-----
This algorithm is based on linear approximations to the objective
function and each constraint. We briefly describe the algorithm.
Suppose the function is being minimized over k variables. At the
jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
an approximate solution x_j, and a radius RHO_j.
(i.e. linear plus a constant) approximations to the objective
function and constraint functions such that their function values
agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
This gives a linear program to solve (where the linear approximations
of the constraint functions are constrained to be non-negative).
However the linear approximations are likely only good
approximations near the current simplex, so the linear program is
given the further requirement that the solution, which
will become x_(j+1), must be within RHO_j from x_j. RHO_j only
decreases, never increases. The initial RHO_j is rhobeg and the
final RHO_j is rhoend. In this way COBYLA's iterations behave
like a trust region algorithm.
Additionally, the linear program may be inconsistent, or the
approximation may give poor improvement. For details about
how these issues are resolved, as well as how the points v_i are
updated, refer to the source code or the references below.
References
----------
Powell M.J.D. (1994), "A direct search optimization method that models
the objective and constraint functions by linear interpolation.", in
Advances in Optimization and Numerical Analysis, eds. S. Gomez and
J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
Powell M.J.D. (1998), "Direct search algorithms for optimization
calculations", Acta Numerica 7, 287-336
Powell M.J.D. (2007), "A view of algorithms for optimization without
derivatives", Cambridge University Technical Report DAMTP 2007/NA03
Examples
--------
Minimize the objective function f(x,y) = x*y subject
to the constraints x**2 + y**2 < 1 and y > 0::
>>> def objective(x):
... return x[0]*x[1]
...
>>> def constr1(x):
... return 1 - (x[0]**2 + x[1]**2)
...
>>> def constr2(x):
... return x[1]
...
>>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
Normal return from subroutine COBYLA
NFVALS = 64 F =-5.000000E-01 MAXCV = 1.998401E-14
X =-7.071069E-01 7.071067E-01
array([-0.70710685, 0.70710671])
The exact solution is (-sqrt(2)/2, sqrt(2)/2).
"""
err = "cons must be a sequence of callable functions or a single"\
" callable function."
try:
len(cons)
except TypeError:
if callable(cons):
cons = [cons]
else:
raise TypeError(err)
else:
for thisfunc in cons:
if not callable(thisfunc):
raise TypeError(err)
if consargs is None:
consargs = args
# build constraints
con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
# options
if disp is not None:
iprint = disp
opts = {'rhobeg': rhobeg,
'tol': rhoend,
'iprint': iprint,
'disp': iprint != 0,
'maxiter': maxfun,
'catol': catol}
sol = _minimize_cobyla(func, x0, args, constraints=con,
**opts)
if iprint > 0 and not sol['success']:
print("COBYLA failed to find a solution: %s" % (sol.message,))
return sol['x']
def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, iprint=1, maxiter=1000,
disp=False, catol=1e-6, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options for the COBYLA algorithm are:
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
catol : float
Tolerance (absolute) for constraint violations
This function is called by the `minimize` function with
`method=COBYLA`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
if not disp:
iprint = 0
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
m = len(constraints)
def calcfc(x, con):
f = fun(x, *args)
for k, c in enumerate(constraints):
con[k] = c['fun'](x, *c['args'])
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
if info[3] > catol:
# Check constraint violation
info[0] = 4
return Result(x=xopt,
status=int(info[0]),
success=info[0] == 1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations has '
'been exceeded.',
3: 'Rounding errors are becoming damaging in '
'COBYLA subroutine.',
4: 'Did not converge to a solution satisfying '
'the constraints. See `maxcv` for magnitude '
'of violation.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3])
if __name__ == '__main__':
from math import sqrt
def fun(x):
return x[0] * x[1]
def cons(x):
return 1 - x[0]**2 - x[1]**2
x = fmin_cobyla(fun, [1., 1.], cons, iprint=3, disp=1)
print('\nTheoretical solution: %e, %e' % (1. / sqrt(2.), -1. / sqrt(2.)))
|
bsd-3-clause
| -2,557,425,230,123,296,300
| 32.572954
| 79
| 0.587662
| false
| 4.024744
| false
| false
| false
|
dcsch/pyif
|
pyif/util.py
|
1
|
1607
|
def is_whitespace(c):
if c == " " or c == "\t" or c == "\n":
return True
return False
def compress_whitespace(s):
"""
Remove extraneous whitespace from the string, that being all whitespace at the beginning
and end of the string and anything beyond a single space within the string.
"""
new_str = ""
in_text = False
for i in range(len(s)):
c = s[i]
if is_whitespace(c):
if not in_text:
# Before any text, so ignore
pass
else:
# We're leaving text, so we allow one space and ignore all others
new_str += " "
in_text = False
else:
# Handling text
new_str += c
in_text = True
if new_str[-1:] == " ":
new_str = new_str[:-1]
return new_str
def cw(s):
return compress_whitespace(s)
def insert_newlines(s, width):
"""
Insert newlines into the string so words don't wrap at the end of lines.
"""
new_str = ""
# Jump to the end of a line and scan backwards for whitespace
start = 0
pos = width
while pos < len(s):
for i in range(pos, pos - width, -1):
if is_whitespace(s[i]):
for j in range(i - 1, pos - width, -1):
if not is_whitespace(s[j]):
i = j + 1
new_str += s[start:i + 1] + "\n"
start = i + 1
pos += width
break
if start < len(s):
new_str += s[start:]
return new_str
|
mit
| -8,638,453,769,667,749,000
| 25.783333
| 92
| 0.481643
| false
| 3.958128
| false
| false
| false
|
mz314/django-sass-processor
|
sass_processor/management/commands/compilescss.py
|
1
|
7090
|
# -*- coding: utf-8 -*-
import os
import sass
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import get_template # noqa Leave this in to preload template locations
from django.utils.importlib import import_module
from django.utils.encoding import force_bytes
from compressor.offline.django import DjangoParser
from compressor.exceptions import TemplateDoesNotExist, TemplateSyntaxError
from sass_processor.templatetags.sass_tags import SassSrcNode
from sass_processor.storage import find_file
class Command(BaseCommand):
help = "Compile SASS/SCSS into CSS outside of the request/response cycle"
option_list = BaseCommand.option_list + (make_option('--delete-files', action='store_true',
dest='delete_files', default=False, help='Delete generated `*.css` files instead of creating them.'),)
def __init__(self):
self.parser = DjangoParser(charset=settings.FILE_CHARSET)
self.template_exts = getattr(settings, 'SASS_TEMPLATE_EXTS', ['.html'])
self.output_style = getattr(settings, 'SASS_OUTPUT_STYLE', 'compact')
super(Command, self).__init__()
def handle(self, *args, **options):
self.verbosity = int(options['verbosity'])
self.delete_files = options['delete_files']
self.compiled_files = []
templates = self.find_templates()
for template_name in templates:
self.parse_template(template_name)
if self.verbosity > 0:
if self.delete_files:
self.stdout.write('Successfully deleted {0} previously generated `*.css` files.'.format(len(self.compiled_files)))
else:
self.stdout.write('Successfully compiled {0} referred SASS/SCSS files.'.format(len(self.compiled_files)))
def find_templates(self):
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module, 'get_template_sources', loader.get_template_sources)
paths.update(list(get_template_sources('')))
except (ImportError, AttributeError):
pass
if not paths:
raise CommandError("No template paths found. None of the configured template loaders provided template paths")
templates = set()
for path in paths:
for root, _, files in os.walk(path):
templates.update(os.path.join(root, name)
for name in files if not name.startswith('.') and
any(name.endswith(ext) for ext in self.template_exts))
if not templates:
raise CommandError("No templates found. Make sure your TEMPLATE_LOADERS and TEMPLATE_DIRS settings are correct.")
return templates
def get_loaders(self):
try:
from django.template.loader import (
find_template as finder_func)
except ImportError:
from django.template.loader import (find_template_source as finder_func)
try:
# Force Django to calculate template_source_loaders from
# TEMPLATE_LOADERS settings, by asking to find a dummy template
finder_func('test')
# Had to transform this Exception, because otherwise even if there
# was a try catch it was crashing, this is a broad Exception but at
# it does what the try catch does by not crashing the command line
# execution.
except Exception:
pass
loaders = []
# At the top when you first import template_source_loaders it is set
# to None, because in django that is what it is set too. While it
# executes the finder_func it is setting the template_source_loaders
# I needed to re-import the value of it at this point because it was
# still None and importing it again made it filled with the proper
# django default values.
from django.template.loader import template_source_loaders
for loader in template_source_loaders:
if hasattr(loader, 'loaders'):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def parse_template(self, template_name):
try:
template = self.parser.parse(template_name)
except IOError: # unreadable file -> ignore
self.stdout.write("Unreadable template at: %s\n" % template_name)
return
except TemplateSyntaxError as e: # broken template -> ignore
self.stdout.write("Invalid template %s: %s\n" % (template_name, e))
return
except TemplateDoesNotExist: # non existent template -> ignore
self.stdout.write("Non-existent template at: %s\n" % template_name)
return
except UnicodeDecodeError:
self.stdout.write("UnicodeDecodeError while trying to read template %s\n" % template_name)
try:
nodes = list(self.walk_nodes(template))
except Exception as e:
# Could be an error in some base template
self.stdout.write("Error parsing template %s: %s\n" % (template_name, e))
else:
for node in nodes:
if self.delete_files:
self.delete_file(node)
else:
self.compile(node)
def compile(self, node):
sass_filename = find_file(node.path)
if not sass_filename or sass_filename in self.compiled_files:
return
content = sass.compile(include_paths=node.include_paths, filename=sass_filename, output_style=self.output_style)
basename, _ = os.path.splitext(sass_filename)
destpath = basename + '.css'
with open(destpath, 'wb') as fh:
fh.write(force_bytes(content))
self.compiled_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Compiled SASS/SCSS file: '{0}'\n".format(node.path))
def delete_file(self, node):
"""
Delete a *.css file, but only if it has been generated through a SASS/SCSS file.
"""
sass_filename = find_file(node.path)
if not sass_filename:
return
basename, _ = os.path.splitext(sass_filename)
destpath = basename + '.css'
if os.path.isfile(destpath):
os.remove(destpath)
self.compiled_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Deleted '{0}'\n".format(destpath))
def walk_nodes(self, node):
"""
Iterate over the nodes recursively yielding the templatetag 'sass_src'
"""
for node in self.parser.get_nodelist(node):
if isinstance(node, SassSrcNode):
if node.is_sass:
yield node
else:
for node in self.walk_nodes(node):
yield node
|
mit
| -9,080,522,471,985,560,000
| 44.448718
| 130
| 0.621157
| false
| 4.379246
| false
| false
| false
|
Nithanaroy/random_scripts
|
CreateNeoDB.py
|
1
|
1357
|
from py2neo import Graph
graph = Graph("http://neo4j:1234@localhost:7474/db/data/")
# Insert data
insert_query = '''
UNWIND {pairs} as pair
MERGE (p1:Person {name:pair[0]})
MERGE (p2:Person {name:pair[1]})
MERGE (p1)-[:KNOWS]-(p2);
'''
data = [["Jim", "Mike"], ["Jim", "Billy"], ["Anna", "Jim"],
["Anna", "Mike"], ["Sally", "Anna"], ["Joe", "Sally"],
["Joe", "Bob"], ["Bob", "Sally"]]
graph.cypher.execute(insert_query, {"pairs": data})
# Friends of a friend
foaf_query = '''
MATCH (person:Person {name: {name}})-[:KNOWS*2]-(foaf)
WHERE NOT (person)-[:KNOWS]-(foaf)
RETURN foaf.name AS name
'''
results = graph.cypher.execute(foaf_query, {"name": "Joe"})
print 'FOF'
for record in results:
print(record)
# Common friends
common_friends_query = """
MATCH (user:Person)-[:KNOWS]-(friend)-[:KNOWS]-(foaf:Person)
WHERE user.name = {user} AND foaf.name = {foaf}
RETURN friend.name AS friend
"""
results = graph.cypher.execute(common_friends_query, {"user": "Joe", "foaf": "Sally"})
for record in results:
print(record)
# Connecting paths
connecting_paths_query = """
MATCH path = shortestPath((p1:Person)-[:KNOWS*..6]-(p2:Person))
WHERE p1.name = {name1} AND p2.name = {name2}
RETURN path
"""
results = graph.cypher.execute(connecting_paths_query, {"name1": "Joe", "name2": "Billy"})
for record in results:
print(record)
|
mit
| 8,349,724,321,448,204,000
| 24.12963
| 90
| 0.640383
| false
| 2.640078
| false
| false
| false
|
aularon/meld
|
setup_win32.py
|
1
|
3455
|
#!/usr/bin/env python
import glob
import os
import site
import sys
from cx_Freeze import setup, Executable
import meld.build_helpers
import meld.conf
site_dir = site.getsitepackages()[1]
include_dll_path = os.path.join(site_dir, "gnome")
missing_dll = [
'libgtk-3-0.dll',
'libgdk-3-0.dll',
'libatk-1.0-0.dll',
'libintl-8.dll',
'libzzz.dll',
'libpyglib-gi-2.0-python27-0.dll',
'libwinpthread-1.dll',
'libcairo-gobject-2.dll',
'libgdk_pixbuf-2.0-0.dll',
'libpango-1.0-0.dll',
'libpangocairo-1.0-0.dll',
'libpangoft2-1.0-0.dll',
'libpangowin32-1.0-0.dll',
'libffi-6.dll',
'libfontconfig-1.dll',
'libfreetype-6.dll',
'libgio-2.0-0.dll',
'libglib-2.0-0.dll',
'libgmodule-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgirepository-1.0-1.dll',
'libgtksourceview-3.0-1.dll',
'libjpeg-8.dll',
'libpng16-16.dll',
'libgnutls-26.dll',
'libxml2-2.dll',
'librsvg-2-2.dll',
'libharfbuzz-gobject-0.dll',
'libwebp-4.dll',
]
gtk_libs = [
'lib/gdk-pixbuf-2.0',
'lib/girepository-1.0',
'share/glib-2.0',
'share/icons',
]
include_files = [(os.path.join(include_dll_path, path), path) for path in
missing_dll + gtk_libs]
build_exe_options = {
"compressed": False,
"icon": "data/icons/meld.ico",
"includes": ["gi"],
"packages": ["gi", "weakref"],
"include_files": include_files,
}
bdist_msi_options = {
"upgrade_code": "{1d303789-b4e2-4d6e-9515-c301e155cd50}",
}
setup(
name="Meld",
version=meld.conf.__version__,
description='Visual diff and merge tool',
author='Kai Willadsen',
author_email='kai.willadsen@gmail.com',
url='http://meldmerge.org',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Gnome',
'Topic :: Software Development',
'Topic :: Software Development :: Version Control',
],
options = {
"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options,
},
executables = [
Executable(
"bin/meld",
base="Win32GUI",
targetName="Meld.exe",
shortcutName="Meld",
shortcutDir="ProgramMenuFolder",
),
],
packages=[
'meld',
'meld.ui',
'meld.util',
'meld.vc',
],
package_data={
'meld': ['README', 'COPYING', 'NEWS']
},
scripts=['bin/meld'],
data_files=[
('share/man/man1',
['meld.1']
),
('share/doc/meld-' + meld.conf.__version__,
['COPYING', 'NEWS']
),
('share/meld',
['data/meld.css']
),
('share/meld/icons',
glob.glob("data/icons/*.png") +
glob.glob("data/icons/COPYING*")
),
('share/meld/ui',
glob.glob("data/ui/*.ui") + glob.glob("data/ui/*.xml")
),
],
cmdclass={
"build_i18n": meld.build_helpers.build_i18n,
"build_help": meld.build_helpers.build_help,
"build_icons": meld.build_helpers.build_icons,
"build_data": meld.build_helpers.build_data,
}
)
|
gpl-2.0
| 3,119,004,850,918,130,700
| 24.404412
| 85
| 0.557453
| false
| 2.952991
| false
| false
| false
|
lypnol/graph-theory
|
problem-02/submissions/coco-backtracking-improved.py
|
1
|
2420
|
from submission import Submission
def calculate_length(permutation, matrix):
n = len(permutation)
length = 0
for i in range(n-1):
length += matrix[permutation[i]][permutation[i+1]]
length += matrix[permutation[-1]][permutation[0]]
return length
def glouton(graphe, depart=None):
sommets = list(graphe.keys())
if depart is None:
depart = sommets.pop()
else:
sommets.remove(depart)
circuit = [depart]
position = depart
while sommets:
# selection du plus proche
min_l = float("inf")
closest_s = None
for s in sommets:
if graphe[position][s] < min_l:
closest_s = s
min_l = graphe[position][s]
sommets.remove(closest_s)
circuit.append(closest_s)
position = closest_s
return circuit, calculate_length(circuit, graphe)
def glouton_all_starts(graphe):
sommets = list(graphe.keys())
best_s = min([glouton(graphe, depart=s) for s in sommets], key=lambda x: x[1])
return best_s
def actual_length(path, matrix):
return sum((matrix[path[i]][path[i+1]] for i in range(len(path) - 1)))
def tsp_backtracking_closest_neighbours_rec(path, restant, max_length, matrix):
if not restant:
return path, calculate_length(path, matrix)
if actual_length(path, matrix) > max_length:
return (None, None)
best_length = max_length
best_path = None
for p in restant: #sorted(restant, key=lambda x: matrix[path[-1]][x]):
final_path, length = tsp_backtracking_closest_neighbours_rec(path + [p], restant - {p}, max_length, matrix)
if final_path is not None and length <= best_length:
max_length = length
best_length = length
best_path = final_path
if best_path is not None:
return best_path, best_length
else:
return (None, None)
def tsp_backtracking_closest_neighbours(matrix):
sommets = list(matrix.keys())
_, best_length = glouton_all_starts(matrix)
s = sommets.pop()
return tsp_backtracking_closest_neighbours_rec([s], set(sommets), best_length, matrix)
class CocoBacktrackingImproved(Submission):
def author(self):
return "coco-backtracking-improved"
def run(self, input):
matrix = input
path, length = tsp_backtracking_closest_neighbours(matrix)
return path + [path[0]]
|
mit
| -6,264,765,515,762,897,000
| 32.611111
| 115
| 0.62686
| false
| 3.333333
| false
| false
| false
|
z3r0zh0u/pyutls
|
MyProcLib.py
|
1
|
4286
|
"""
My Process Execution Library
"""
import os
import time
import Queue
import platform
import threading
import subprocess
NewLine = '\n'
if platform.system() == 'Windows':
NewLine = '\r\n'
def queue_output(out, queue):
"""Queue output"""
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def retrieve_output(queue):
"""Retrieve output"""
output = ''
try:
data = queue.get_nowait()
while data != '':
output += data
data = queue.get_nowait()
except Queue.Empty:
pass
return output
class MyProc:
def __init__(self, proc_name, debug = False):
self.proc_name = proc_name
self.debug = debug
self.interactive = False
self.proc = None
self.out_queue = None
self.err_queue = None
self.__debug_print('[*] Process: ' + proc_name)
def run_proc(self, param = None, no_wait = False):
"""Run process only"""
cmd = [self.proc_name]
if param is not None:
cmd += param.split()
self.__debug_print('[*] Run: ' + str(cmd))
if no_wait:
subprocess.Popen(cmd)
else:
subprocess.call(cmd)
def run_proc_output(self, param = None):
"""Run process and return the output"""
cmd = [self.proc_name]
if param is not None:
cmd += param.split()
self.__debug_print('[*] Run: ' + str(cmd))
output = subprocess.check_output(cmd)
self.__debug_print('[*] Output:' + NewLine + output)
return output
def run_proc_interactive(self, param = None):
"""Interactive with process"""
self.interactive = True
cmd = [self.proc_name]
if param is not None:
cmd += param.split()
self.__debug_print('[*] Run: ' + str(cmd))
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.out_queue = Queue.Queue()
self.err_queue = Queue.Queue()
out_thread = threading.Thread(target=queue_output, args=(self.proc.stdout, self.out_queue))
err_thread = threading.Thread(target=queue_output, args=(self.proc.stderr, self.err_queue))
out_thread.daemon = True
err_thread.daemon = True
out_thread.start()
err_thread.start()
time.sleep(0.1)
def send_input(self, input):
"""Send input to process"""
if self.interactive:
self.__debug_print('[*] Stdin: ' + input)
self.proc.stdin.write(input + NewLine)
time.sleep(0.1)
def get_output(self):
"""Get output"""
out_stdout = ''
out_stderr = ''
if self.interactive:
out_stdout = retrieve_output(self.out_queue)
out_stderr = retrieve_output(self.err_queue)
if len(out_stdout) > 0:
self.__debug_print('[*] Stdout: ' + NewLine + out_stdout)
self.__debug_print('-' * 40)
if len(out_stderr) > 0:
self.__debug_print('[*] Stderr: ' + NewLine + out_stderr)
self.__debug_print('-' * 40)
return out_stdout, out_stderr
def __debug_print(self, message):
"""Print debug info"""
if self.debug:
print message
def run_process():
"""Run process"""
proc_name = 'c:\\Windows\\System32\\cmd.exe'
proc = MyProc(proc_name, debug = True)
param = ' /c notepad test.txt'
proc.run_proc(param, no_wait = True)
param = ' /c ping 127.0.0.1'
output = proc.run_proc_output(param)
print output
proc.run_proc_interactive()
while True:
try:
input = raw_input("Input: ")
proc.send_input(input)
out_stdout, out_stderr = proc.get_output()
if out_stdout != '':
print out_stdout
if out_stderr != '':
print out_stderr
except Exception as e:
print '[!] Error: ' + str(e)
break
if __name__ == '__main__':
run_process()
|
gpl-2.0
| -2,528,289,493,108,418,600
| 20.984615
| 112
| 0.519365
| false
| 3.953875
| false
| false
| false
|
GammaC0de/pyload
|
src/pyload/plugins/downloaders/ZippyshareCom.py
|
1
|
4071
|
# -*- coding: utf-8 -*-
import re
import urllib.parse
from bs4 import BeautifulSoup
from pyload.core.utils.misc import eval_js
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class ZippyshareCom(SimpleDownloader):
__name__ = "ZippyshareCom"
__type__ = "downloader"
__version__ = "0.98"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?P<HOST>www\d{0,3}\.zippyshare\.com)/(?:[vd]/|view\.jsp.*key=)(?P<KEY>[\w^_]+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Zippyshare.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("Walter Purcaro", "vuolter@gmail.com"),
("sebdelsol", "seb.morin@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
COOKIES = [("zippyshare.com", "ziplocale", "en")]
URL_REPLACEMENTS = [(__pattern__ + ".*", r"http://\g<HOST>/v/\g<KEY>/file.html")]
NAME_PATTERN = r'(?:<title>Zippyshare.com - |"/)(?P<N>[^/]+)(?:</title>|";)'
SIZE_PATTERN = r'>Size:.+?">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r"does not exist (anymore )?on this server<"
TEMP_OFFLINE_PATTERN = r"^unmatchable$"
LINK_PATTERN = r"document.location = '(.+?)'"
def setup(self):
self.chunk_limit = -1
self.multi_dl = True
self.resume_download = True
def handle_free(self, pyfile):
self.captcha = ReCaptcha(pyfile)
captcha_key = self.captcha.detect_key()
if captcha_key:
try:
self.link = re.search(self.LINK_PATTERN, self.data)
self.captcha.challenge()
except Exception as exc:
self.error(exc)
else:
self.link = self.fixurl(self.get_link())
if ".com/pd/" in self.link:
self.load(self.link)
self.link = self.link.replace(".com/pd/", ".com/d/")
if self.link and pyfile.name == "file.html":
pyfile.name = urllib.parse.unquote(self.link.split("/")[-1])
def get_link(self):
#: Get all the scripts inside the html body
soup = BeautifulSoup(self.data)
scripts = [
s.getText()
for s in soup.body.findAll("script", type="text/javascript")
if "('dlbutton').href =" in s.getText()
]
#: Emulate a document in JS
inits = [
"""
var document = {}
document.getElementById = function(x) {
if (!this.hasOwnProperty(x)) {
this[x] = {getAttribute : function(x) { return this[x] } }
}
return this[x]
}
"""
]
#: inits is meant to be populated with the initialization of all the DOM elements found in the scripts
eltRE = r'getElementById\([\'"](.+?)[\'"]\)(\.)?(getAttribute\([\'"])?(\w+)?([\'"]\))?'
for m in re.findall(eltRE, " ".join(scripts)):
JSid, JSattr = m[0], m[3]
values = [
f for f in (elt.get(JSattr, None) for elt in soup.findAll(id=JSid)) if f
]
if values:
inits.append(
'document.getElementById("{}")["{}"] = "{}"'.format(
JSid, JSattr, values[-1]
)
)
#: Add try/catch in JS to handle deliberate errors
scripts = ["\n".join(("try{", script, "} catch(err){}")) for script in scripts]
#: Get the file's url by evaluating all the scripts
scripts = inits + scripts + ["document.dlbutton.href"]
return eval_js("\n".join(scripts))
|
agpl-3.0
| -8,509,155,195,058,406,000
| 33.5
| 110
| 0.519774
| false
| 3.59629
| false
| false
| false
|
mbelmadani/motifgp
|
motifgp/hammingregex.py
|
1
|
6109
|
import re
import numpy
def sxor(s1,s2):
# convert strings to a list of character pair tuples
# go through each tuple, converting them to ASCII code (ord)
# perform exclusive or on the ASCII code
# then convert the result back to ASCII (chr)
# merge the resulting array of characters as a string
return [ord(a) ^ ord(b) for a,b in zip(s1,s2)]
def hamming_pre_string(regex, sequence):
"""
To compute the hamming distance, we need to match de regex on sequence and then replace the match with "1"
"""
match = re.search(regex, sequence)
if match:
match = match.group(0)
else:
#match = ""
#"0" * len(sequence)
return None
placeholder = "1" * len(match)
pre_string = list(sequence.replace(match, placeholder))
for i in range(len(pre_string)):
if pre_string[i] != '1':
pre_string[i] = '0'
return "".join(pre_string)
def compute_hamming(list_of_regex, template, sequence):
"""
For each regex, create a weighted average from the list of regexs given.
Matches each regex vs the template on the sequence, calculate the
hamming distance on the template and adjusts the weight of the
result on the length of list_of_regex.
Sums weighted hammings strings
Return the overall performance of list_of_regexs vs. template on
sequence
"""
hamming_template = hamming_pre_string(template, sequence)
regexs = None
if type(list_of_regex) == str:
regexs = list(list_of_regex)
else:
regexs = list_of_regex
output = None
for regex in regexs:
hamming_bs = hamming_pre_string(regex, sequence)
#print bs1+"$", "\n", bs2+"$"
#print "".join([str(x) for x in sxor(bs1, bs2)])
if hamming_bs == None:
xor_string = [float(x) for x in hamming_template]
#"".join([str(x) for x in sxor(hamming_template, str("0"*len(hamming_template)))]) # Invert template because no match was found. So match == everything but the template motif.
else:
#print hamming_bs, hamming_template
xor_string = sxor(hamming_bs, hamming_template)
xor_string = [x/float(len(regexs)) for x in xor_string]
"""
print ">"
print xor_string
print "< "
"""
if output:
output = [x + y for x,y in zip(output, xor_string)]
else:
output = xor_string
return output
def score_hamming(floatstring):
"""
Converts the weigthed hamming distance string to a numerical value
"""
return sum( floatstring ) / float(len(floatstring))
class HammingBenchmark():
"""
Class to contain a benchmark of hamming distance against a synthetic dataset
"""
def __init__(self):
self.scores = {}
"""
self.max = -1
self.min = -1
self.mean = -1
self.std = -1
self.rmax = -1
self.rmin = -1
self.rmean = -1
self.rstd = -1
"""
def __repr__(self):
return "HammingBenchmark()"
def __str__(self):
output=""
for each in self.scores:
output += each+":\n"
benchmark_str = [
#self.scoxres, "\n",
"max:",self.scores[each]["max"],
"min:",self.scores[each]["min"],
"mean:",self.scores[each]["mean"],
"std:",self.scores[each]["std"],
]
output += ",".join([str(x) for x in benchmark_str]) + "\n"
#print benchmark_str
#output = ",".join(str(x) for x in benchmark_str)
return output
def compile(self, candidates, sequence_tuples):
"""
candidates; a batch of regular expression that are to be evaluated
sequence_tuples: a list of pairs of templates-sequences
"""
bins = {}
for each in candidates: # Slice candidates one by one. This can be changes to have a real bin behavior
sequence_scores = []
candidate = [each] #TODO:CHANGEME; Quick hack to evaluate each candidate on its own versus the sequence set
bins[each] = {}
bins[each]["score"] = []
bins[each]["max"] = -1
bins[each]["min"] = -1
bins[each]["std"] = -1
bins[each]["mean"] = -1
for template, sequence in sequence_tuples:
hamming_str_score = compute_hamming(candidate, template, sequence)
candidates_score = tuple((sum(hamming_str_score), score_hamming(hamming_str_score) , hamming_str_score ))
bins[each]["score"].append(candidates_score)
self.scores = bins
self.update()
def update(self):
for each in self.scores.keys():
numeric_scores = [x[0] for x in self.scores[each]["score"]]
#if not numeric_scores:
# numeric_scores.append(0)
self.scores[each]["max"] = max(numeric_scores)
self.scores[each]["min"] = min(numeric_scores)
self.scores[each]["std"] = numpy.std(numeric_scores)
self.scores[each]["mean"] = numpy.mean(numeric_scores)
def flush_data_points(self, ks, xs, outpath, seed, CLEAR=True):
"""
Prints a data point y such that k[x] = y
k is an individual. x is the mapping value
seed will be used to color the datapoint
outpath is where to append the datapoint. CLEAR overwrites instead of appending.
"""
if CLEAR:
f = open(outpath, 'w')
else:
f = open(outpath, 'a')
for idx in range(len(ks)):
each = ks[idx]
x = xs[idx]
scores = self.scores[each]
y = scores["mean"],scores["std"],
output = [str(x) , str(y) , str(seed)]
output = "\t".join(output)
output += "\n"
print output
f.write(output)
|
lgpl-3.0
| -8,711,762,516,253,431,000
| 32.201087
| 187
| 0.549026
| false
| 3.926093
| false
| false
| false
|
johnshiver/football_tools
|
football/core/models/weekly_stats.py
|
1
|
1824
|
from django.db import models
from django.conf import settings
from model_utils.models import TimeStampedModel
class WeeklyStats(TimeStampedModel):
player = models.ForeignKey('core.Player', related_name='player_stats')
season = models.ForeignKey('core.Season')
week = models.ForeignKey('core.Week', related_name='weekly_stats')
# rb stats
rushing_atts = models.SmallIntegerField(default=0)
rushing_yds = models.IntegerField(default=0)
rushing_tds = models.IntegerField(default=0)
# qb stats
passing_atts = models.SmallIntegerField(default=0)
passing_cmps = models.IntegerField(default=0)
passing_yds = models.IntegerField(default=0)
passing_tds = models.IntegerField(default=0)
passing_ints = models.SmallIntegerField(default=0)
# wr stats
receiving_rec = models.SmallIntegerField(default=0)
receiving_yds = models.IntegerField(default=0)
receiving_tds = models.IntegerField(default=0)
total_score = models.IntegerField(default=0)
def __str__(self):
return "{} for {}: score -> {}".format(self.week,
self.player,
self.total_score)
def calc_total_score(self):
total = 0
total += (settings.RUSHING_TD_POINTS * self.rushing_tds)
total += (settings.RUSHING_YD_POINTS * self.rushing_yds)
total += (settings.PASSING_YD_POINTS * self.passing_yds)
total += (settings.PASSING_TD_POINTS * self.passing_tds)
total += (settings.PASSING_INT_POINTS * self.passing_ints)
total += (settings.RECEIVING_YD_POINTS * self.receiving_yds)
total += (settings.RECEIVING_TD_POINTS * self.receiving_tds)
total += (settings.RECEIVING_REC_POINTS * self.receiving_rec)
return total
|
mit
| 6,390,556,806,062,743,000
| 37.808511
| 74
| 0.655702
| false
| 3.640719
| false
| false
| false
|
our-city-app/oca-backend
|
src/rogerthat/migrations/delete_all_models_by_kind.py
|
1
|
1605
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import db, ndb
from google.appengine.ext.ndb.query import QueryOptions
from rogerthat.bizz.job import run_job, MODE_BATCH
from rogerthat.consts import MIGRATION_QUEUE
def job(cls, namespace=None, batch_size=50):
if issubclass(cls, db.Model):
run_job(_qry_db, [cls, namespace], _worker_db, [], worker_queue=MIGRATION_QUEUE, mode=MODE_BATCH,
batch_size=batch_size)
elif issubclass(cls, ndb.Model):
run_job(_qry_ndb, [cls, namespace], _worker_ndb, [], worker_queue=MIGRATION_QUEUE, mode=MODE_BATCH,
batch_size=batch_size)
def _qry_db(cls, namespace=None):
return cls.all(keys_only=True,
namespace=namespace)
def _worker_db(keys):
db.delete(db.get(keys))
def _qry_ndb(cls, namespace=None):
return cls.query(default_options=QueryOptions(keys_only=True),
namespace=namespace)
def _worker_ndb(keys):
ndb.delete_multi(keys)
|
apache-2.0
| 3,319,584,261,818,788,000
| 31.755102
| 107
| 0.697196
| false
| 3.429487
| false
| false
| false
|
JohnyEngine/CNC
|
opencamlib/scripts/drop-cutter/drop_cutter_one-triangle_2.py
|
1
|
2299
|
import ocl
import pyocl
import camvtk
import vtk
import math
def drawPoints(myscreen, clpoints, ccpoints):
c=camvtk.PointCloud( pointlist=clpoints, collist=ccpoints)
c.SetPoints()
myscreen.addActor(c )
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
# triangle
a=ocl.Point(1,0,0.4)
b=ocl.Point(0,1,0)
c=ocl.Point(0,0,-0.2)
t = ocl.Triangle(b,c,a)
# draw the triangle with VTK
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
# cutter
radius1=1
length=5
angle = math.pi/4
#cutter = ocl.ConeCutter(0.37, angle)
cutter = ocl.BallCutter(0.532, length)
#cutter = ocl.CylCutter(0.3, length)
#cutter = ocl.BullCutter(0.5,0.123, length)
print cutter
# grid on which we run drop-cutter
minx=-0.5
dx=0.0051
maxx=1.5
miny=-0.7
dy=dx
maxy=1.5
z=-0.7
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
print len(clpoints), "cl-points to evaluate"
n=0
for cl in clpoints:
#cutter.vertexDrop(cl,t)
#cutter.edgeDrop(cl,t)
#cutter.facetDrop(cl,t)
cutter.dropCutter(cl,t) # this calls all three above: vertex,facet,edge
n=n+1
if (n % int(len(clpoints)/10)) == 0:
print n/int(len(clpoints)/10), " ",
print "done."
print "rendering..."
print " len(clpoints)=", len(clpoints)
camvtk.drawCLPointCloud(myscreen, clpoints)
print "done."
# draw a sphere, just for fun
origo = camvtk.Sphere(center=(0,0,0) , radius=0.1, color=camvtk.blue)
origo.SetOpacity(0.2)
myscreen.addActor( origo )
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
myscreen.camera.SetClippingRange(-20,20)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
apache-2.0
| 785,468,906,210,808,400
| 27.7375
| 79
| 0.606351
| false
| 2.554444
| false
| false
| false
|
bnoi/scikit-tracker
|
sktracker/io/tests/test_metadataio.py
|
1
|
2350
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
from sktracker import data
from sktracker.io import get_metadata
from sktracker.io import validate_metadata
def test_get_metadata():
fname = data.sample_ome()
real_metadata = {'PhysicalSizeY': 0.065,
'SizeC': 2,
'SizeZ': 8,
'SizeT': 20,
'PhysicalSizeX': 0.065,
'SizeY': 20,
'SizeX': 50,
'PhysicalSizeZ': 0.8,
'DimensionOrder': ['T', 'Z', 'C', 'Y', 'X'],
'AcquisitionDate': '2014-02-24T15:29:53',
'Shape': (20, 8, 2, 20, 50)}
guessed_metadata = get_metadata(fname, json_discovery=True)
guessed_metadata.pop("FileName", None)
assert real_metadata == guessed_metadata
def test_invalidate_metadata():
bad_metadata = {'SizeC': 2, 'SizeZ': 8}
assert_raises(ValueError, validate_metadata, bad_metadata, ['DimensionOrder'])
def test_validate_metadata():
good_metadata = {'PhysicalSizeY': 0.065,
'SizeC': 2,
'SizeZ': 8,
'SizeT': 20,
'PhysicalSizeX': 0.065,
'SizeY': 20,
'SizeX': 50,
'PhysicalSizeZ': 0.8,
'DimensionOrder': ['T', 'Z', 'C', 'Y', 'X'],
'AcquisitionDate': '2014-02-24T15:29:53',
'Shape': (20, 8, 2, 20, 50),
'FileName': '../../data/sample.ome.tif'}
default_good = validate_metadata(good_metadata)
extra_good = validate_metadata(good_metadata,
keys=['PhysicalSizeZ',
'DimensionOrder',
'AcquisitionDate'])
assert default_good and extra_good
def test_get_from_metadata_json():
from sktracker.io.metadataio import _get_from_metadata_json
assert _get_from_metadata_json(data.metadata_json()) == {'PhysicalSizeZ': 0.8}
store_path = data.sample_h5_temp()
assert _get_from_metadata_json(store_path) == {}
|
bsd-3-clause
| 3,016,241,812,330,578,000
| 31.191781
| 82
| 0.513191
| false
| 3.962901
| false
| false
| false
|
iABC2XYZ/abc
|
Scripts/RFQVane/VaneStructure2.py
|
1
|
7067
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 03 11:11:22 2017
@author: A
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import fsolve
from scipy.special import iv
#from scipy.signal import find_peaks_cwt
plt.close('all')
zStart=0+0.67
zEnd=230.045+0.67
zStep=0.005#########################################步长不要小于0.005,否则贝塞尔函数求解会出错
Freq=162.5
cLight=299792458
lambda_m=cLight/Freq/1.e6
cell_Beta_A_a_m_Z_L=np.loadtxt('pariout_python.txt')
cell=cell_Beta_A_a_m_Z_L[:,0]
Beta=cell_Beta_A_a_m_Z_L[:,3]
A=cell_Beta_A_a_m_Z_L[:,5]
a=cell_Beta_A_a_m_Z_L[:,7]
m=cell_Beta_A_a_m_Z_L[:,8]
Z=cell_Beta_A_a_m_Z_L[:,-3]
L=cell_Beta_A_a_m_Z_L[:,-4]
numCell=len(cell)
nREC=int((zEnd-zStart)/zStep)+1
xREC=np.zeros((nREC,2))
xREC_2=np.zeros((nREC,2))
zREC=np.zeros(nREC)
cellREC=np.zeros(nREC)
cellFlagREC=np.zeros(nREC)
RhoREC=np.zeros(nREC)
LREC=np.zeros(nREC)
Lcal=np.zeros(nREC)
iCellFlag=1
zRec=zStart
def RFQVane(x,a,k,z,m):########################################################定义RFQ极头函数
A=(m**2-1)/(m**2*iv(0,k*a)+iv(0,m*k*a))
return x**2/a**2-(1-A*iv(0,k*x)*np.cos(k*z))/(1-A*iv(0,k*a))
def Rho(a,k,m):
A=(m**2-1)/(m**2*iv(0,k*a)+iv(0,m*k*a))
Rho=0.75*a/np.sqrt(1-A*iv(0,k*a))
return Rho
iREC=0;
while (zRec<zEnd):
print(zRec)
diff_RecCell=zRec-Z
iCell=len(diff_RecCell[diff_RecCell>0]) -1 ###############################判断所取点在第几个Cell
iCellFlag=(-1)**iCell
if (iCellFlag>0):
zCal=zRec-Z[iCell]
zCal_2=Z[iCell]-zRec
else:
zCal=Z[iCell+1]-zRec
zCal_2=zRec-Z[iCell-1]
# zCal=zRec-Z[iCell]
#k=np.pi/L[iCell]
betaK=np.interp(zRec,Z,Beta)
k=np.pi/betaK/lambda_m/100*2
#k=np.pi/np.interp(zRec,Z,L)##############################用L数据计算对比发现和用beta计算CELL长度并没有区别
aInterP=np.interp(zRec,Z,a)
mInterP=np.interp(zRec,Z,m)
xRecTmp = fsolve(RFQVane,[-0.3],args=(aInterP,k,zCal,mInterP))
xRecTmp_2 = fsolve(RFQVane,[-0.3],args=(aInterP,k,zCal_2,mInterP))
RhoREC[iREC]=Rho(aInterP,k,mInterP)
xREC[iREC,:]=xRecTmp
xREC_2[iREC,:]=xRecTmp_2
zREC[iREC]=zRec
cellREC[iREC]=iCell
cellFlagREC[iREC]=iCellFlag
LREC[iREC]=np.interp(zRec,Z,L)
Lcal[iREC]=betaK*lambda_m/2*100
iREC+=1
zRec+=zStep
plt.figure('calculating result')
plt.plot(zREC,xREC[:,0],'b')
plt.hold
plt.savefig('result.png')
#plt.plot(zREC,xREC_2[:,0],'r')
######################################对比####################################
z_HV_REF=np.loadtxt('RFQ H DATA.txt')
Z_REF=z_HV_REF[:,0]/10.
X_REF=z_HV_REF[:,1]/10
Rho_REF=z_HV_REF[:,2]/10
plt.figure('Comp')
plt.plot(zREC-0.67,xREC,'b')
plt.hold
#plt.plot(zREC,xREC_2[:,0],'g')
plt.hold
plt.plot(Z_REF,X_REF,'r')
xRECInterP=np.interp(Z_REF,zREC-0.67,xREC[:,0])
plt.figure('Diff')
plt.plot(Z_REF,X_REF-xRECInterP,'r')
plt.hold
#plt.savefig('comp.png')
#plt.plot(zREC,cellFlagREC,'g')
########################对比Rho函数##################################################
'''
plt.figure('Rho')
plt.plot(zREC,RhoREC,'b')
plt.hold
plt.plot(Z_REF,Rho_REF,'r')
plt.hold
plt.plot(Z_REF,Rho_REF-np.interp(Z_REF,zREC,RhoREC),'g')
plt.plot(zREC,np.interp(zREC,Z_REF,Rho_REF),'g')
'''
###########################对比Cell长度读取和计算函数################################
'''
plt.figure('L_COMP')
plt.plot(zREC,LREC,'r')
plt.hold
plt.plot(zREC,Lcal,'b')
plt.hold
plt.figure('L_Ratio')
plt.plot(zREC,((LREC-Lcal)/LREC))
'''
########################分析Cell数################################################
def Smooth(x):
x[0]=x[0]
x[1]=np.average(x[0:2])
x[2:-3]=(x[0:-5]+x[1:-4]+x[2:-3]+x[3:-2]+x[4:-1])/5.
x[-2]=np.average(x[-3:-1])
x[-1]=x[-1]
return x
def FindPeaks(x):
xLeft=x[1:-2]> x[0:-3]
xRight=x[1:-2]> x[2:-1]
xFlag=xLeft*xRight
indexX=np.where(xFlag==1)
return indexX
def FindValley(x):
xLeft=x[1:-2]< x[0:-3]
xRight=x[1:-2]< x[2:-1]
xFlag=xLeft*xRight
indexX=np.where(xFlag==1)
return indexX
indexPeak=((Z_REF>4.) * (Z_REF<221.5))######################定义寻峰范围
ZREFPeak=Z_REF[indexPeak]
xREFPeak=X_REF[indexPeak]
xREFPeak=Smooth(xREFPeak)
xREFPeak=Smooth(xREFPeak)
xRECPeak=xRECInterP[indexPeak]
ZRECPeak=ZREFPeak
xRECPeak=Smooth(xRECPeak)
xRECPeak=Smooth(xRECPeak)
index_xRECPeakTuple=FindPeaks(xRECPeak)
index_xREFPeakTuple=FindPeaks(xREFPeak)
index_xRECPeak=index_xRECPeakTuple[0]
index_xREFPeak=index_xREFPeakTuple[0]
print(' xRECPeak:',len(index_xRECPeak),'\n','xREFPeak:',len(index_xREFPeak))
index_xREFValleyTuple=FindValley(xREFPeak)
index_xREFValley=index_xREFValleyTuple[0]
if len(index_xREFPeak)==len(index_xREFValley):
if ((Z_REF[index_xREFPeak[0]])<(Z_REF[index_xREFValley[0]])):
Lcell_HV=Z_REF[index_xREFValley]-Z_REF[index_xREFPeak]
P_cell_PV=Z_REF[index_xREFValley]
else:
Lcell_HV=Z_REF[index_xREFPeak]-Z_REF[index_xREFValley]
P_cell_PV=Z_REF[index_xREFPeak]
elif len(index_xREFPeak)<len(index_xREFValley):
Lcell_HV=Z_REF[index_xREFPeak]-Z_REF[index_xREFValley[:-1]]
P_cell_PV=Z_REF[index_xREFPeak]
else:
Lcell_HV=Z_REF[index_xREFValley]-Z_REF[index_xREFPeak[-1]]
P_cell_PV=Z_REF[index_xREFValley]
pariout=np.loadtxt('pariout_python.txt')
Cell_pariout=pariout[:,0]
Z_pariout=pariout[:,-3]
L_pariout=pariout[:,-4]
r0_pariout=pariout[:,9]
ncell_pariout=len(Z_pariout[(Z_pariout>4.)*(Z_pariout<221.5)])
'''
plt.figure('Length(HV_P-V)_comp_priout')
plt.plot(Z_REF[indexPeak],np.interp(Z_REF[indexPeak],P_cell_PV,Lcell_HV),'b')
plt.hold
plt.plot(Z_REF[indexPeak],np.interp(Z_REF[indexPeak],Z_pariout,L_pariout),'r')
print(' HV:',((len(index_xREFPeak))+len(index_xREFValley)),'\n','parioutcell:',ncell_pariout)
'''
'''
plt.figure('Peak')
plt.plot(ZRECPeak,xRECPeak,'b')
plt.hold
plt.plot(ZRECPeak,xREFPeak,'r')
plt.plot(ZRECPeak[index_xRECPeak],xRECPeak[index_xRECPeak],'bo')
plt.plot(ZRECPeak[index_xREFPeak],xREFPeak[index_xREFPeak],'r*')
plt.plot(ZRECPeak[index_xREFValley],xREFPeak[index_xREFValley],'r*')
'''
##############################计算固定极头半径######################################
r0_cal_rho=r0_pariout[4:]
L_cal_rho=L_pariout[4:]
r0_sum=0
for i in range(0,len(L_cal_rho)):
r0_sum=r0_sum+r0_cal_rho[i]*L_cal_rho[i]
r0_rho=r0_sum/Z_pariout[-1]
rho_constant=0.75*r0_rho
print(' CST_RHO_constant=',rho_constant,'cm')
##############################################################################
plt.show()
|
gpl-3.0
| -2,863,287,004,672,663,600
| 19.009146
| 93
| 0.554636
| false
| 2.199489
| false
| false
| false
|
sepeth/relationships
|
relationships/relationship.py
|
1
|
4426
|
import redis
from keys import key_list as default_key_list
class Relationship(object):
def __init__(self, redis_connection=None, key_list=None, actor=None):
if key_list:
default_key_list.update(key_list)
self.key_list = default_key_list
if redis_connection:
self.redis_connection = redis_connection
else:
self.redis_connection = redis.StrictRedis(
host='localhost',
port=6379,
db=0
)
self.actor = actor
def __call__(self, *args, **kwargs):
self.actor = args[0]
return self
def _action_call(self, command, from_id, to_id, operation_key):
command_values = ':'.join(('user', str(from_id), operation_key)), to_id
return getattr(self.redis_connection, command)(*command_values)
def _list_call(self, operation_key):
return self.redis_connection.smembers(
'user:{}:{}'.format(self._get_actor(), operation_key)
)
def _count_call(self, operation_key):
return self.redis_connection.scard(
'user:{}:{}'.format(
self._get_actor(),
operation_key
)
)
def _get_actor(self):
if hasattr(self, 'actor'):
return self.actor
raise ValueError("actor is not defined")
def block(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["blocked_by"])
def unblock(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["blocked_by"])
def follow(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["following"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["followers"])
def unfollow(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["following"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["followers"])
def friends(self):
return self.redis_connection.sinter(
"user:{}:{}".format(self._get_actor(), self.key_list["following"]),
"user:{}:{}".format(self._get_actor(), self.key_list["followers"]),
)
def followers(self):
return self._list_call(self.key_list["followers"])
def following(self):
return self._list_call(self.key_list["following"])
def blocks(self):
return self._list_call(self.key_list["blocked"])
def blocked(self):
return self._list_call(self.key_list["blocked_by"])
def follower_count(self):
return self._count_call(self.key_list["followers"])
def following_count(self):
return self._count_call(self.key_list["following"])
def block_count(self):
return self._count_call(self.key_list["blocked"])
def blocked_count(self):
return self._count_call(self.key_list["blocked_by"])
def is_follower(self, follower_id):
return self._action_call('sismember', self._get_actor(), follower_id, self.key_list["followers"])
def is_following(self, following_id):
return self._action_call('sismember', self._get_actor(), following_id, self.key_list["following"])
def is_blocked(self, blocked_id):
return self._action_call('sismember', self._get_actor(), blocked_id, self.key_list["blocked"])
def is_blocked_by(self, blocked_by_id):
return self._action_call('sismember', self._get_actor(), blocked_by_id,self.key_list["blocked_by"])
def get_network(self, output):
user_id = self._get_actor()
try:
import pydot
except ImportError:
raise ImportError("You need pydot library to get network functionality.")
graph = pydot.Dot('network_of_user_{}'.format(user_id), graph_type='digraph')
target_node = pydot.Node(user_id)
for _id in self(user_id).following():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(target_node, user_node))
for _id in self(user_id).followers():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(user_node, target_node))
graph.write_png(output)
|
mit
| 7,119,113,668,999,396,000
| 30.614286
| 108
| 0.592634
| false
| 3.613061
| false
| false
| false
|
martinburchell/crossword_collective
|
crossword.py
|
1
|
11875
|
import os.path
import urllib
import smtplib
import string
import StringIO
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from lxml import etree
from lxml.html.soupparser import fromstring
from lxml.cssselect import CSSSelector
from PIL import Image
from parser import MyHTMLParser
from line import Line
class Crossword(object):
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
def __init__(self, home_page, cross_type, data_dir, prefix, serial_number, density, border, border_color, smtp_server = None, from_email_address = None, to_email_address = None):
self.home_page = home_page
self.cross_type = cross_type
dir = os.path.join(data_dir,str(serial_number))
self.dir = dir
self.mkdir(dir)
self.prefix = prefix
self.serial_number = serial_number
self.density = density
self.border = border
self.border_color = border_color
self.basename = self.prefix + "_" + self.serial_number
self.smtp_server = smtp_server
self.from_email_address = from_email_address
self.to_email_address = to_email_address
def mkdir(self, dir):
if not os.path.isdir(dir):
os.mkdir(dir)
def download_pdf(self):
url = self.home_page + self.cross_type + "/" + self.serial_number
content = urllib.urlopen(url).read()
root = fromstring(content)
selector = CSSSelector('p#stand-first a')
pdf_url = False
for element in selector(root):
href = element.get("href")
if href != None and href[-4:] == ".pdf":
pdf_url = href
if pdf_url:
pdf_stream = urllib.urlopen(pdf_url)
pdf_basename = pdf_url[pdf_url.rfind("/") + 1:]
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
pdf_basename = ''.join(c for c in pdf_basename if c in valid_chars)
self.basename = pdf_basename[:-4]
self.pdf_filename = os.path.join(self.dir, pdf_basename)
self.mkdir(self.dir)
pdf_file = open(self.pdf_filename, "w")
while True:
buffer = pdf_stream.read(1024)
if buffer == "":
break
pdf_file.write(buffer)
pdf_file.close()
pdf_stream.close()
return True
return False
def tag_matches(self, element, tag):
return element.tag == tag or element.tag == "{%s}%s" % (self.XHTML_NAMESPACE, tag)
def convert_to_png(self):
# Hmmm...
png_basename = self.basename + ".png"
self.png_filename = os.path.join(self.dir, png_basename)
command = "convert -alpha off -density %s %s[0] -trim +repage -format png32 -depth 3 -define png:color-type=2 %s" % (self.density, self.pdf_filename, self.png_filename)
ok = os.system(command)
if ok == 0:
image = Image.open(self.png_filename)
self.image_width = image.size[0]
self.image_height = image.size[1]
return (ok == 0)
def find_grid(self):
image = Image.open(self.png_filename)
pixels = image.load()
threshold = 300
x_lines = []
for y in range (0, self.image_height):
x_count = 0
for x in range (0, self.image_width):
if (pixels[x, y] == (255,255,255) or x + 1 == self.image_width):
if x_count > threshold:
x_lines.append(Line(x - x_count, y, x, y))
x_count = 0
else:
x_count += 1
freq = {}
for line in x_lines:
width = line.end_x - line.start_x
n = freq.get(width, 0)
freq[width] = n + 1
max_count = 0
mode_width = None
for k, v in freq.iteritems():
if v > max_count:
max_count = v
mode_width = k
first_y = None
last_y = None
num_grid_lines = 0
previous_y = None
for line in x_lines:
if line.end_x - line.start_x == mode_width:
# only count non-adjacent lines
if previous_y == None or line.start_y - previous_y > 1:
num_grid_lines += 1
previous_y = line.start_y
if first_y == None:
first_y = line
last_y = line
self.grid_x = first_y.start_x
self.grid_y = first_y.start_y
self.grid_width = mode_width
self.grid_height = mode_width
if num_grid_lines < 2:
print "Not enough grid lines"
return False
self.grid_size = num_grid_lines - 1
self.square_size = mode_width / self.grid_size
return True
def reformat(self):
image_in = Image.open(self.png_filename)
if self.image_width - self.grid_width < 50:
# move the clues right of the grid
width_out = self.image_width * 2 + self.border * 3
grid_height = self.grid_y + self.grid_height
clues_height = self.image_height - self.grid_height
if clues_height > self.grid_height:
height_out = clues_height
else:
height_out = self.grid_height + self.border * 2
image_out = Image.new(image_in.mode,
(width_out, height_out),
self.border_color)
grid_box = (0, 0, self.image_width, grid_height)
grid = image_in.crop(grid_box)
image_out.paste(grid, (self.border, self.border))
clues = image_in.crop((0, grid_height + 1,
self.image_width, self.image_height))
image_out.paste(clues, (self.image_width + self.border * 2 + 1,
self.border))
else:
width_out = self.image_width + self.border * 2
height_out = self.image_height + self.border * 2
image_out = Image.new(image_in.mode,
(width_out, height_out),
self.border_color)
image_out.paste(image_in, (self.border, self.border))
self.image_width = width_out
self.image_height = height_out
self.grid_x += self.border
self.grid_y += self.border
image_out.save(self.png_filename);
return True
def create_pdf_html(self):
html_basename = self.basename + "_pdf.html"
self.html_filename = os.path.join(self.dir, html_basename)
html_file = open(self.html_filename, "w")
image = Image.open(self.png_filename).convert("1")
pixels = image.load()
html_file.write("<div id=\"v6vf\" style=\"text-align: left;\">\n")
html_file.write("\t<img src=\"\" width=\"%d\" height=\"%d\">\n" % (self.image_width, self.image_height))
html_file.write("\t<div>\n")
html_file.write("\t\t<table>\n")
html_file.write("\t\t\t<tbody>\n")
# make the array one square bigger to cope with the edge pixels
squares = [[0 for i in range(self.grid_size + 1)] for j in range(self.grid_size + 1)]
for y in range (0, self.grid_height):
square_y = y / self.square_size
for x in range (0, self.grid_width):
square_x = x / self.square_size
n = squares[square_x][square_y]
if pixels[x + self.grid_x, y + self.grid_y] == 0:
# black
n = n - 1
else:
# white
n = n + 1
squares[square_x][square_y] = n
for square_y in range (0, self.grid_size):
html_file.write("\t\t\t\t<tr>\n")
for square_x in range (0, self.grid_size):
if squares[square_x][square_y] > 0:
cell_class = "white"
else:
cell_class = "black"
html_file.write("\t\t\t\t\t<td class=\"%s\"><br></td>\n" % cell_class)
html_file.write("\t\t\t\t</tr>\n")
html_file.write("\t\t\t</tbody>\n")
html_file.write("\t\t</table>\n")
html_file.write("\t</div>\n")
html_file.write("</div>\n")
html_file.close()
return True
def create_pdf_css(self):
css_basename = self.basename + "_pdf.css"
self.css_filename = os.path.join(self.dir, css_basename)
css_file = open(self.css_filename, "w")
css_file.write("img\n")
css_file.write("{\n")
css_file.write("\tposition: absolute;\n")
css_file.write("\tleft: 0;\n")
css_file.write("\ttop: 0;\n")
css_file.write("\tz-index: -1;\n")
css_file.write("}\n\n")
css_file.write("table\n")
css_file.write("{\n")
css_file.write("\tposition: absolute;\n")
css_file.write("\tleft: %dpx;\n" % self.grid_x)
css_file.write("\ttop: %dpx;\n" % self.grid_y)
css_file.write("\twidth: %dpx;\n" % self.grid_width)
css_file.write("\theight: %dpx;\n" % self.grid_height)
css_file.write("\tborder: thin solid black;\n")
css_file.write("}\n\n")
css_file.write("td\n")
css_file.write("{\n")
css_file.write("\twidth:%dpx;\n" % (self.square_size -4))
css_file.write("\theight:%dpx;\n" % (self.square_size -4))
css_file.write("\ttext-align: center;\n")
css_file.write("\tvertical-align: middle;\n")
css_file.write("}\n\n")
css_file.write(".black\n")
css_file.write("{\n")
css_file.write("\tbackground-color:#000;\n")
css_file.write("}\n")
css_file.close()
return True
def send_email(self):
message = MIMEMultipart()
message["Subject"] = "%s Crossword Number %s " % (self.cross_type.capitalize(), self.serial_number)
message["From"] = self.from_email_address
message["To"] = self.to_email_address
message.preamble = message["Subject"]
f = open(self.html_filename)
text = MIMEText(f.read(), "html")
f.close()
text.add_header("Content-Disposition", "attachment", filename=self.basename + ".html")
message.attach(text)
f = open(self.css_filename)
text = MIMEText(f.read(), "css")
f.close()
text.add_header("Content-Disposition", "attachment", filename=self.basename + ".css")
message.attach(text)
server = smtplib.SMTP(self.smtp_server)
# server.set_debuglevel(1)
server.sendmail(self.from_email_address, self.to_email_address, message.as_string())
server.quit
return True
def create(self):
ok = self.download_pdf()
if not ok:
print "Failed to download PDF"
return False
ok = self.convert_to_png()
if not ok:
print "Failed to convert PDF to PNG"
return False
ok = self.find_grid()
if not ok:
print "Failed to find grid"
return False
ok = self.reformat()
if not ok:
print "Failed to reformat"
return False
ok = self.create_pdf_html()
if not ok:
print "Failed to create HTML"
return False
ok = self.create_pdf_css()
if not ok:
print "Failed to create CSS"
return False
if not self.smtp_server is None:
ok = self.send_email()
if not ok:
print "Failed to send email"
return False
return True
|
gpl-3.0
| -6,237,270,378,994,270,000
| 29.924479
| 182
| 0.527663
| false
| 3.66852
| false
| false
| false
|
martinloland/rov
|
pc/func.py
|
1
|
3401
|
'''
func.py
- General classes
- Handling events initatied by user input
'''
class SENS:
def __init__(self):
self.press = 1.0
self.temp = 0
self.volt = 0
self.curr = 0
self.roll = 0
self.yaw = 0
self.pitch = 0
self.ax = 0
self.ay = 0
self.az = 0
self.compass = 0
self.depth = 0
class ACT:
def __init__(self):
self.led = 0
self.pan = 90
self.tilt = 90
self.lf = 0
self.rf = 0
self.lb = 0
self.cb = 0
self.rb = 0
self.pwr = 100
class SOCKET:
def __init__(self, ip, port):
self.socket = socket.socket()
self.socket.bind((ip, port))
self.socket.listen(5)
self.conn, self.addr = self.socket.accept()
def close(self):
None
def closeProgram():
if not uiTest:
actFile.close()
pygame.quit()
sys.exit()
def snapshot():
filename = str(datetime.datetime.now().date()) + '_' + str(datetime.datetime.now().time()) + '.jpg'
filename = filename.replace(':','.')
print filename
path = os.path.join('snapshots', filename)
pygame.image.save(pygame.transform.rotate(ui.video.img, 180), path)
#Flash
surface.fill(WHITE)
pygame.display.flip()
def motor(buttons):
moving = False
pwrIncrement = 6
max = 190
min = 0
thresholdU = max-pwrIncrement
thresholdL = pwrIncrement
dev = math.sin(math.radians(sens.pitch))
#Power
if any("mDecrease" in s for s in buttons) and act.pwr >= thresholdL:
act.pwr -= pwrIncrement
if any("mIncrease" in s for s in buttons) and act.pwr <= thresholdU:
act.pwr += pwrIncrement
# Turning
if any("mForward" in s for s in buttons): #forward
moving = True
act.lb = act.pwr
act.rb = act.pwr
if any("mBack" in s for s in buttons): #backward
moving = True
act.lb = -act.pwr
act.rb = -act.pwr
if any("mLeft" in s for s in buttons):
moving = True
act.lb = -act.pwr
act.rb = act.pwr
if any("mRight" in s for s in buttons):
moving = True
act.lb = act.pwr
act.rb = -act.pwr
#up/down
if any("mUp" in s for s in buttons):
moving = True
act.lf = act.rf = sorted([-max, int(act.pwr*(1-dev)), max])[1]
act.cb = sorted([-max, int(act.pwr*(1+dev)), max])[1]
if any("mDown" in s for s in buttons):
moving = True
act.lf = act.rf = sorted([-max, int(-act.pwr*(1+dev)), max])[1]
act.cb = sorted([-max, int(-act.pwr*(1-dev)), max])[1]
if not moving:
act.lf = act.rf = act.lb = act.cb = act.rb = 0
def toggle_fullscreen():
global fullscreen
[SCREEN_WIDTH, SCREEN_HEIGHT] = [1296,730]
if fullscreen == False:
pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT),pygame.FULLSCREEN)
fullscreen = True
else:
pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
fullscreen = False
def gimbal(buttons):
increment = 6
panMax = 30
tiltMax = 55
zeroPoint = 90
threshP = [-panMax+increment, panMax-increment]
threshT = [-tiltMax+increment, tiltMax-increment]
if any("gRight" in s for s in buttons) and act.pan-zeroPoint > threshP[0]:
act.pan -= increment
if any("gLeft" in s for s in buttons) and act.pan-zeroPoint < threshP[1]:
act.pan += increment
if any("gDown" in s for s in buttons) and act.tilt-zeroPoint > threshT[0]:
act.tilt -= increment
if any("gUp" in s for s in buttons) and act.tilt-zeroPoint < threshT[1]:
act.tilt += increment
if any("resetGimbal" in s for s in buttons):
act.pan = act.tilt = 90
|
mit
| -3,157,018,102,161,540,600
| 23.780303
| 100
| 0.626286
| false
| 2.707803
| false
| false
| false
|
prateek-1708/pg-aws-python
|
src/ecs-deploy.py
|
1
|
4415
|
#!/usr/bin/python3
import boto3
import argparse
import pprint
import sys
##############################################################################
def debug(args):
print('Cluster Name: {}'.format(args.cluster))
print('Service Name: {}'.format(args.service))
print('Image Version: {}'.format(args.image_version))
##############################################################################
def die(message='I am dying...'):
print("Error: {}".format(message))
sys.exit(1)
##############################################################################
def debug_and_die():
debug()
die()
##############################################################################
def get_client(client_type):
try:
return boto3.client(client_type)
except:
die('Cannot call boto3.client...')
##############################################################################
def search_and_return_arn_from_haystack(haystack, key, needle):
arns = haystack[key]
match = [arn for arn in arns if needle in arn]
arn = match.pop()
return arn
##############################################################################
def read_arguments():
parser = argparse.ArgumentParser("Deploy Docker image to ecs cluster")
parser.add_argument(
"-c",
"--cluster",
required=True,
dest="cluster",
help="Cluster name where this docker image needs to be deployed"
)
parser.add_argument(
"-s",
"--service",
required=True,
dest="service",
help="Service name where this docker image needs to be deployed"
)
parser.add_argument(
"-i",
"--image-version",
required=True,
dest="image_version",
help="Version of the image to be deployed"
)
args = parser.parse_args()
if not args.cluster:
parser.error("Cluster name is required in order for this to work")
if not args.service:
parser.error("Service name is required in order for this to work")
if not args.image_version:
parser.error("Image version is required in order to do the deploy")
return parser.parse_args()
##############################################################################
def main():
args = read_arguments()
cluster_name_to_search = args.cluster
service_name_to_search = args.service
debug(args)
# create the kms client to do the decrypttion
ecs_client = get_client('ecs')
# Getting the cluster
clusters = ecs_client.list_clusters()
cluster_arn = search_and_return_arn_from_haystack(clusters, 'clusterArns', cluster_name_to_search)
# Getting the services
services = ecs_client.list_services(cluster=cluster_arn)
service_arn = search_and_return_arn_from_haystack(services, 'serviceArns', service_name_to_search)
# describing the service
service_details = ecs_client.describe_services(cluster=cluster_arn, services=[service_arn])
task_definition_arn = ((service_details['services']).pop())['taskDefinition']
task_def_details = ecs_client.describe_task_definition(taskDefinition=task_definition_arn)
task_definition = task_def_details['taskDefinition']
print(task_definition)
family = task_definition['family']
print(family)
volumes = task_definition['volumes']
container_definition = task_definition['containerDefinitions'][0]
print(container_definition)
image = container_definition['image']
print(image)
split_array = image.split("/")
image_name_and_tag = split_array[1].split(":")
new_image_name_and_tag = image_name_and_tag[0] + ":" + args.image_version
repo_and_image_name_with_tag = split_array[0] + "/" + new_image_name_and_tag
container_definition['image'] = repo_and_image_name_with_tag
response = ecs_client.register_task_definition(
family=family,
containerDefinitions=[container_definition],
volumes=volumes
)
pprint.pprint(response)
pprint.pprint(response['taskDefinition']['taskDefinitionArn'])
deployed = ecs_client.update_service(
cluster=cluster_arn,
service=service_arn,
taskDefinition=response['taskDefinition']['taskDefinitionArn']
)
pprint.pprint(deployed)
##############################################################################
if __name__ == '__main__':
main()
|
mit
| 2,897,100,967,628,455,400
| 28.637584
| 102
| 0.560589
| false
| 4.332679
| false
| false
| false
|
UdK-VPT/Open_eQuarter
|
mole3/qgisinteraction/plugin_interaction.py
|
1
|
13083
|
from qgis.PyQt import QtCore
from qgis.core import QgsProject, QgsCoordinateReferenceSystem, QgsMapLayer, QgsRasterLayer, QgsVectorLayer
from qgis.core import QgsField, QgsFeature, QgsDistanceArea, QgsPoint
from qgis import utils
from os import path
import sys
from mole3.qgisinteraction.layer_interaction import find_layer_by_name, add_attributes_if_not_exists, delete_layer_files
from mole3.qgisinteraction import legend
from mole3.project import config
def get_plugin_ifexists(plugin_name):
"""
Check if a plugin with the given name exists.
:param plugin_name: Name of the plugin to check existence of.
:type plugin_name: str
:return plugin: Return the plugin if it was found or None otherwise
:rtype: plugin instance
"""
from mole3 import oeq_global
try:
plugin = utils.plugins[plugin_name]
return plugin
except KeyError:
oeq_global.OeQ_push_warning(title="Mandatory Plugins: ", message="Please install Plugin '" + plugin_name + "' ")
return None
class PstInteraction(object):
def __init__(self, iface, plugin_name='pointsamplingtool'):
if isinstance(plugin_name, str):
try:
self.plugin_folder = path.dirname(sys.modules[plugin_name].__file__)
# if the pst is not part of the path, add it to the path, so the modules can be imported
if self.plugin_folder not in sys.path:
sys.path.insert(0, self.plugin_folder)
except KeyError:
print((KeyError, plugin_name))
from doPointSamplingTool import Dialog
self.pst_dialog = Dialog(iface)
self.path_to_output_layer = ''
def set_input_layer(self, layer_name):
layernode = legend.nodeByName(layer_name,'layer')
if len(layernode) == 0:
return None
in_layer = self.pst_dialog.inSample
#print in_layer
index = in_layer.findText(layer_name)
in_layer.setCurrentIndex(index)
#if layer_name is not None and not layer_name.isspace():
# layer_registry = QgsProject.instance()
# layer_available = layer_registry.mapLayersByName(layer_name)
# if layer_available:
# drop down menu, listing all available layers
def select_and_rename_files_for_sampling(self,sample_fields):
"""
Select all available layers for the point sampling and rename multiple occurrences of the same name.
Prepend an index, to separate the layers and append the information, which color value is displayed.
:return plugin: Return the plugin if it was found or None otherwise
:rtype: plugin instance
"""
import mole3.extensions as extensions
sample_list = self.pst_dialog.inData
table = self.pst_dialog.fieldsTable
number_of_samples = len(sample_list)
RGBa_appendices = ['R', 'G', 'B', 'a']
RGBa_index = 0
last_name = ''
prefix = 0
replacement_map = {}
for i in range(number_of_samples):
# select all fields via the inData-view,
# so the point sampling tool can manage its model accordingly/appropriately
sample_list.setItemSelected(sample_list.item(i), True)
# Get the source-name (as displayed in the field-table) and check if it was used already
# (the name has to be split, since it is displayed in the form 'layer_name : Band x' to get the layer_name)
table_index = table.rowCount()-1
table_text = table.item(table_index, 0).text().split(' : ')
layer_name = table_text[0]
band_name = table_text[1]
layer = find_layer_by_name(layer_name)
ext=extensions.by_layername(layer_name, 'Import')
#if ext:
# print "Test: "+ext[0].layer_name
# Check if the layer was already used
if last_name != layer_name:
last_name = layer_name
prefix += 1
RGBa_index = 0
if (layer.name() == config.building_outline_layer_name and
(band_name.startswith('AREA') or band_name.startswith('PERIMETER') or band_name.startswith(config.building_id_key))):
continue
elif (layer.type() == QgsMapLayer.RasterLayer and
layer.rasterType() == QgsRasterLayer.Multiband and
layer.bandCount() == 4
):
# Truncate the name to a maximum of 6 characters, since QGIS limits the length of a feature's name to 10
# prepend prefix (with leading zero), truncated name and RGBa-appendix
try:
rgba = RGBa_appendices[RGBa_index]
RGBa_index += 1
except IndexError as IError:
RGBa_index = 0
print((self.__module__, 'IndexError when appending the RGBa-Appendix: {}'.format(IError)))
if ext:
export_name = ext[0].field_id + '_' + rgba
else:
export_name = '{:02d}{}_{}'.format(prefix, layer_name[0:6], rgba)
replacement_map[layer_name] = export_name[:-2]
# Change the text in the table, so the pst can manage its model accordingly/appropriately
table.item(table_index, 1).setText(export_name)
continue
elif ext:
# NEW fieldname ist nicht klar
if ext[0].field_rename is not None:
if band_name.startswith(tuple(ext[0].field_rename.keys())):
if ext[0].field_rename[band_name]:
table.item(table_index, 1).setText(ext[0].field_rename[band_name])
continue
elif band_name.startswith(tuple(ext[0].par_in)):
continue
sample_list.setItemSelected(sample_list.item(i), False)
return replacement_map
def start_sampling(self, path_to_layer, layer_name):
if not path_to_layer or path_to_layer.isspace() or not layer_name or layer_name.isspace():
return ''
else:
delete_layer_files(layer_name)
full_path = path.join(path_to_layer, layer_name + '.shp')
self.set_input_layer(config.building_coordinate_layer_name)
self.pst_dialog.sampling(full_path)
return full_path
class OlInteraction(object):
def __init__(self, plugin_name = 'openlayers_plugin'):
"""
Make the plugin accessible by looking it up in the plugin-dictionary
:param plugin_name: Name of the open-layers-plugin (as stored in utils.plugins)
:type plugin_name: str
:return:
:rtype:
"""
self.plugin = None
try:
plugin = utils.plugins[plugin_name]
except KeyError as ke:
print("The open layers plugin has not been found under the given name " + plugin_name)
return None
if plugin is not None:
self.plugin = plugin
def open_osm_layer(self, layer_type_id):
"""
Interact with the Open-Street-Map plugin and open an open street map according to open_layer_type_id
:param open_layer_type_id: ID of the open-layer type
:type open_layer_type_id: int
:return:
:rtype:
"""
open_layer = self.plugin._olLayerTypeRegistry.getById(layer_type_id)
number_of_layers = len(QgsProject.instance().mapLayers())
self.plugin.addLayer(open_layer)
return (number_of_layers+1) == len(QgsProject.instance().mapLayers())
def set_map_crs(self, crs_string):
"""
Use the openlayer-plugin to set the project crs to the given crs and to do a re-projection to keep the currently viewed extent focused
:param crs: The new crs to set the project to
:type crs: str
:return:
:rtype:
"""
# if the given crs is valid
if not crs_string.isspace() and QgsCoordinateReferenceSystem().createFromUserInput(crs_string):
self.plugin.setMapCrs(QgsCoordinateReferenceSystem(crs_string, QgsCoordinateReferenceSystem.EpsgCrsId))
class RealCentroidInteraction(object):
def __init__(self, plugin_name='realcentroid'):
"""
Make the plugin accessible by looking it up in the plugin-dictionary
:param plugin_name: Name of the realcentroids-plugin (as stored in utils.plugins)
:type plugin_name: str
:return:
:rtype:
"""
self.plugin = None
try:
plugin = utils.plugins[plugin_name]
self.plugin = plugin
self.plugin.__init__(utils.iface)
except KeyError as KError:
print((KError, 'The realcentroid plugin has not been found by the given name "{}"'.format(plugin_name)))
def create_centroids(self, polygon_name, path_to_output_shape):
from mole3 import oeq_global
self.plugin.dlg.showEvent(QtCore.QEvent.Show)
polygon_combobox = self.plugin.dlg.layerBox
for i in range(polygon_combobox.count()):
if polygon_combobox.itemText(i) == polygon_name:
polygon_combobox.setCurrentIndex(i)
break
else:
print(('Layer {} not found in combobox.'.format(polygon_name)))
return None
self.plugin.dlg.shapefileName = path_to_output_shape
self.plugin.dlg.encoding = sys.getfilesystemencoding()
self.plugin.dlg.addBox.setCheckState(QtCore.Qt.Checked)
self.plugin.generate()
file_info = QtCore.QFileInfo(path_to_output_shape)
if file_info.exists():
layer_name = file_info.completeBaseName()
output_layer = QgsVectorLayer(path_to_output_shape,layer_name, "ogr")
oeq_global.OeQ_wait(0.5)
return output_layer
else:
return None
def calculate_accuracy(self, polygon_layer, point_layer):
"""
Calculate the distance of each centroid on a point-layer to their surrounding polygons
:param polygon_layer: A layer containing polygons
:type polygon_layer: QgsVectorLayer
:param point_layer: A layer containing the (supposed to be) centroids of that polygon
:type point_layer: QgsVectorLayer
:return:
:rtype:
"""
point_provider = point_layer.dataProvider()
add_attributes_if_not_exists(point_layer, [QgsField('DIST', QtCore.QVariant.Double)])
distance_area = QgsDistanceArea()
poly_iterator = polygon_layer.dataProvider().getFeatures()
point_iterator = point_provider.getFeatures()
poly_feature = QgsFeature()
point_feature = QgsFeature()
field_index = point_provider.fieldNameIndex('DIST')
while (poly_iterator.nextFeature(poly_feature) and
point_iterator.nextFeature(point_feature)):
geom= poly_feature.geometry()
if geom is not None:
try:
poly_point = geom.asPolygon()[0]
centroid = geom.asPoint()
except IndexError:
continue
distances = {}
for i, point in enumerate(poly_point):
end = poly_point[(i+1) % len(poly_point)]
try:
intersect = self.intersect_point_to_line(centroid, point, end)
if intersect != centroid:
dist = distance_area.measureLine(centroid, intersect)
distances[intersect] = dist
except ZeroDivisionError as InvalidMath:
continue
values = {field_index: min(distances.values())}
point_provider.changeAttributeValues({point_feature.id(): values})
def intersect_point_to_line(self, point, line_start, line_end):
"""
Finds the point i on a line which, given a point p describes a line ip, orthogonal to a given line
(as found on http://gis.stackexchange.com/questions/59169/how-to-draw-perpendicular-lines-in-qgis)
:param point: The point p
:type point: QgsPoint
:param line_start: The lines start
:type line_start: QgsPoint
:param line_end: The lines end
:type line_end: QgsPoint
:return: The point i, which is the end of the orthogonal line
:rtype: QgsPoint
"""
magnitude = line_start.sqrDist(line_end)
# minimum distance
u = ((point.x() - line_start.x()) * (line_end.x() - line_start.x()) + (point.y() - line_start.y()) * (line_end.y() - line_start.y()))/(magnitude)
# intersection point on the line
ix = line_start.x() + u * (line_end.x() - line_start.x())
iy = line_start.y() + u * (line_end.y() - line_start.y())
return QgsPoint(ix,iy)
|
gpl-2.0
| 1,665,867,565,011,549,700
| 39.630435
| 153
| 0.595047
| false
| 4.166561
| false
| false
| false
|
fugwenna/bunkbot
|
src/roulette/roulette_cog.py
|
1
|
1297
|
from random import randint
from discord.ext.commands import command, Context, Cog
from ..bunkbot import BunkBot
from ..channel.channel_service import ChannelService
from ..core.bunk_exception import BunkException
from ..core.bunk_user import BunkUser
from ..core.registry import CHANNEL_SERVICE, USER_SERVICE
from ..user.user_service import UserService
DESCRIPTION = """Basic one in six chance for a russian roulette"""
class Roulette(Cog):
def __init__(self, channels: ChannelService, users: USER_SERVICE):
self.channels: ChannelService = channels
self.users: UserService = users
@command(pass_context=True, cls=None, help=DESCRIPTION)
async def roulette(self, ctx: Context) -> None:
try:
await ctx.trigger_typing()
message: str = "Click..."
user: BunkUser = self.users.get_by_id(ctx.message.author.id)
bullet_location: int = randint(0, 5)
if randint(0, 5) == bullet_location:
message = "{0} :gun: BANG!!!!!!!!!".format(user.mention)
await ctx.send(message)
except Exception as e:
await self.channels.log_error(e, "roll")
def setup(bot) -> None:
bot.add_cog(Roulette(CHANNEL_SERVICE, USER_SERVICE))
|
mit
| -3,034,951,817,091,481,600
| 32.131579
| 72
| 0.642251
| false
| 3.727011
| false
| false
| false
|
kkozarev/mwacme
|
src/catalog/hek_find_halo_cmes_v2.py
|
1
|
4200
|
# -*- coding: utf-8 -*-
import json
import os
from sunpy.net import hek
from pprint import pprint
from datetime import datetime,timedelta
from time import strftime
import numpy as np
import matplotlib.pyplot as plt
client = hek.HEKClient()
#SEARCH FOR FAST AND WIDE CMEs IN THE HEK!
#'FAST' means linear speed higher than [minlinvel] km/s
minlinvel=500.
#'WIDE' means angular width larger than [minangwidth] degrees
minangwidth=60.
#Use the C2 start time
tstart = '08/01/2011 07:23:56'
tst=datetime.strptime(tstart,"%m/%d/%Y %H:%M:%S")
#The current time
tend = strftime("%m/%d/%Y %H:%M:%S") #'06/05/2015 12:40:29'
tet=datetime.strptime(tend,"%m/%d/%Y %H:%M:%S")
event_type = 'CE'
frm_name='CACTus (Computer Aided CME Tracking)'
#help(hek.attrs.fl)}
#The extent for the size of field around the Sun center to search, in arcseconds
extent=5000
result = client.query(hek.attrs.Time(tst,tet), hek.attrs.EventType(event_type),
hek.attrs.FRM.Name == frm_name,
hek.attrs.SpatialRegion(x1=-1.*extent,x2=extent,y1=-1.*extent,y2=extent),
hek.attrs.CE.RadialLinVel >= minlinvel,
hek.attrs.CE.AngularWidth >= minangwidth)
#Create the x-axis values in a numpy array
timearray=[]
[timearray.append(elem["event_starttime"]) for elem in result]
time0=datetime.strptime(timearray[0],"%Y-%m-%dT%H:%M:%S")-timedelta(days=30)
time1=datetime.strptime(timearray[-1],"%Y-%m-%dT%H:%M:%S")+timedelta(days=30)
timearray=np.array(timearray)
#Get the CME speeds
linvels=[]
[linvels.append(elem["cme_radiallinvel"]) for elem in result]
maxlinvel=np.amax(linvels)*1.
#Get the CME angular widths
angwidths=[]
[angwidths.append(elem["cme_angularwidth"]) for elem in result]
maxangwidth=np.amax(angwidths)*1.
#Set the equally-spaced groups of CME angular widths
nawgroups=4
angwidth_colors=['k','b','g','r']
degree_sign= u'\N{DEGREE SIGN}'
angwidth_groups=np.arange(1,nawgroups+1)*(maxangwidth-minangwidth)/4.+minangwidth
#Create the subsets of CME information based on angular widths
aw_linvel_subsets = [[] for i in range(nawgroups)]
aw_time_subsets = [[] for i in range(nawgroups)]
aw_aw_subsets = [[] for i in range(nawgroups)]
aw_markersizes_subsets = [[] for i in range(nawgroups)]
for ii,aw in enumerate(angwidths):
if (aw >= angwidth_groups[0] and aw < angwidth_groups[1]):
aw_linvel_subsets[1].append(linvels[ii])
aw_time_subsets[1].append(timearray[ii])
aw_aw_subsets[1].append(aw)
aw_markersizes_subsets[1].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
elif (aw >= angwidth_groups[1] and aw < angwidth_groups[2]):
aw_linvel_subsets[2].append(linvels[ii])
aw_time_subsets[2].append(timearray[ii])
aw_aw_subsets[2].append(aw)
aw_markersizes_subsets[2].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
elif (aw >= angwidth_groups[2] and aw <= angwidth_groups[3]):
aw_linvel_subsets[3].append(linvels[ii])
aw_time_subsets[3].append(timearray[ii])
aw_aw_subsets[3].append(aw)
aw_markersizes_subsets[3].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
else:
aw_linvel_subsets[0].append(linvels[ii])
aw_time_subsets[0].append(timearray[ii])
aw_aw_subsets[0].append(aw)
aw_markersizes_subsets[0].append(50+450*(aw-minangwidth)/(maxangwidth-minangwidth))
#Set the plot for CME SPEEDS with Marker sizes encoding CME angular widths
plt.title(str(len(result))+' Fast and Wide CMEs from HEK/CACTus in the AIA Era')
plt.axis([time0,time1,minlinvel/1.05,maxlinvel*1.05])
plt.xlabel('Date',fontsize=16)
plt.ylabel('CME Radial Linear Speed [km/s]',fontsize=16)
#Plot the subsets of CMES based on their angular widths
for ii in range(nawgroups):
if ii == 0:
staw=minangwidth
enaw=angwidth_groups[ii]
else:
staw=angwidth_groups[ii-1]
enaw=angwidth_groups[ii]
plt.scatter(aw_time_subsets[ii],aw_linvel_subsets[ii],c=angwidth_colors[ii],alpha=0.5,\
s=aw_markersizes_subsets[ii],label=str(staw)+degree_sign+' < CME Ang. Width < '+str(enaw)+degree_sign)
plt.legend(bbox_to_anchor=(1.05, 1.05))
#Show the plot
plt.show()
|
gpl-2.0
| -4,243,700,712,274,347,500
| 38.622642
| 118
| 0.689762
| false
| 2.851324
| false
| false
| false
|
flavors/countries
|
setup.py
|
1
|
1907
|
import os
import re
from setuptools import find_packages, setup
def get_long_description():
for filename in ('README.rst',):
with open(filename, 'r') as f:
yield f.read()
def get_version(package):
with open(os.path.join(package, '__init__.py')) as f:
pattern = r'^__version__ = [\'"]([^\'"]*)[\'"]'
return re.search(pattern, f.read(), re.MULTILINE).group(1)
setup(
name='django-countries-flavor',
version=get_version('countries'),
license='MIT',
description='A Django application that provides a data collection '
'for internationalization and localization purposes.',
long_description='\n\n'.join(get_long_description()),
author='mongkok',
author_email='domake.io@gmail.com',
maintainer='mongkok',
url='https://github.com/flavors/django-countries/',
packages=find_packages(exclude=['tests*']),
install_requires=[
'Django>=1.9',
'psycopg2>=2.6.2',
'requests>=1.1.0',
],
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: Django',
],
zip_safe=False,
tests_require=[
'Django>=1.9',
'factory-boy>=2.8.1',
'psycopg2>=2.6.2',
'requests>=1.1.0',
],
package_data={
'countries': [
'fixtures/**/*.json',
'locale/*/LC_MESSAGES/django.po',
'locale/*/LC_MESSAGES/django.mo',
],
},
)
|
mit
| 4,646,107,103,152,200,000
| 28.796875
| 71
| 0.567908
| false
| 3.806387
| false
| false
| false
|
russellb/nova
|
nova/notifier/list_notifier.py
|
1
|
2207
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.exception import ClassNotFound
list_notifier_drivers_opt = cfg.MultiStrOpt('list_notifier_drivers',
default=['nova.notifier.no_op_notifier'],
help='List of drivers to send notifications')
FLAGS = flags.FLAGS
FLAGS.register_opt(list_notifier_drivers_opt)
LOG = logging.getLogger(__name__)
drivers = None
class ImportFailureNotifier(object):
"""Noisily re-raises some exception over-and-over when notify is called."""
def __init__(self, exception):
self.exception = exception
def notify(self, message):
raise self.exception
def _get_drivers():
"""Instantiates and returns drivers based on the flag values."""
global drivers
if not drivers:
drivers = []
for notification_driver in FLAGS.list_notifier_drivers:
try:
drivers.append(utils.import_object(notification_driver))
except ClassNotFound as e:
drivers.append(ImportFailureNotifier(e))
return drivers
def notify(message):
"""Passes notification to multiple notifiers in a list."""
for driver in _get_drivers():
try:
driver.notify(message)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to send to "
"notification driver %(driver)s." % locals()))
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global drivers
drivers = None
|
apache-2.0
| 7,981,357,401,778,414,000
| 30.084507
| 79
| 0.676031
| false
| 4.310547
| false
| false
| false
|
lorien/grab
|
tests/grab_redirect.py
|
1
|
4599
|
# coding: utf-8
from six.moves.urllib.parse import quote, unquote
from grab.error import GrabTooManyRedirectsError
from tests.util import BaseGrabTestCase, build_grab
def build_location_callback(url, counter):
meta = {
'counter': counter,
'url': url,
}
def callback():
if meta['counter']:
status = 301
headers = [('Location', meta['url'])]
body = b''
else:
status = 200
headers = []
body = b'done'
meta['counter'] -= 1
return {
'type': 'response',
'status': status,
'body': body,
'headers': headers,
}
return callback
def build_refresh_callback(url, counter):
meta = {
'counter': counter,
'url': url,
}
def callback():
if meta['counter']:
status = 200
body = (
b'<html><head><meta '
b'http-equiv="refresh" content="5"></head>'
)
else:
status = 200
body = b'done'
meta['counter'] -= 1
return {
'type': 'response',
'status': status,
'body': body
}
return callback
class GrabRedirectTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_follow_refresh_off(self):
# By default meta-redirect is off
meta_url = self.server.get_url('/foo')
self.server.response_once['get.data'] =\
'<meta http-equiv="refresh" content="5; url=%s">' % meta_url
grab = build_grab()
grab.go(self.server.get_url())
self.assertEqual(self.server.request['path'], '/')
self.assertEqual(grab.doc.url, self.server.get_url())
def test_follow_refresh_on(self):
meta_url = self.server.get_url('/foo')
# Now test meta-auto-redirect
self.server.response_once['get.data'] =\
'<meta http-equiv="refresh" content="5; url=%s">' % meta_url
grab = build_grab()
grab.setup(follow_refresh=True)
grab.go(self.server.get_url())
self.assertEqual(self.server.request['path'], '/foo')
self.assertEqual(grab.doc.url, meta_url)
def test_spaces_in_refresh_url(self):
meta_url = self.server.get_url('/foo')
# Test spaces in meta tag
self.server.response_once['get.data'] =\
"<meta http-equiv='refresh' content='0;url= %s'>" % meta_url
grab = build_grab()
grab.setup(follow_refresh=True)
grab.go(self.server.get_url())
self.assertEqual(self.server.request['path'], '/foo')
self.assertEqual(grab.doc.url, meta_url)
def test_refresh_redirect_limit(self):
self.server.response['get.callback'] =\
build_refresh_callback(self.server.get_url(), 10)
grab = build_grab()
grab.setup(redirect_limit=10, follow_refresh=True)
grab.go(self.server.get_url())
self.assertTrue(b'done' in grab.doc.body)
self.server.response['get.callback'] =\
build_refresh_callback(self.server.get_url(), 10)
grab.setup(redirect_limit=5, follow_refresh=True)
self.assertRaises(GrabTooManyRedirectsError,
lambda: grab.go(self.server.get_url()))
def test_redirect_limit(self):
self.server.response['get.callback'] = (
build_location_callback(self.server.get_url(), 10)
)
grab = build_grab()
grab.setup(redirect_limit=5)
self.assertRaises(GrabTooManyRedirectsError,
lambda: grab.go(self.server.get_url()))
self.server.response['get.callback'] =\
build_location_callback(self.server.get_url(), 10)
grab.setup(redirect_limit=20)
grab.go(self.server.get_url())
self.assertTrue(b'done' in grab.doc.body)
# Test fails, Maybe test server incorrectly processed UTF-8 :(
#def test_redirect_utf_location(self):
# self.server.response_once['code'] = 301
# self.server.response_once['headers'] = [
# ('Location', (self.server.get_url() + u'фыва').encode('utf-8')),
# ]
# self.server.response_once['data'] = 'content-1'
# self.server.response['data'] = 'content-2'
# grab = build_grab(debug=True, follow_location=True)
# grab.go(self.server.get_url())
# print('~~~', grab.doc.url)
# self.assertTrue(
# quote(u'/фыва'.encode('utf-8'), safe='/') in unquote(grab.doc.url)
# )
|
mit
| 2,294,489,589,820,108,000
| 31.560284
| 79
| 0.557613
| false
| 3.699436
| true
| false
| false
|
quarkslab/irma
|
common/src/plugins/manager.py
|
1
|
4896
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import os
import sys
import pkgutil
import logging
from irma.common.utils.oopatterns import Singleton
##############################################################################
# Plugin imports
##############################################################################
from irma.common.plugins.exceptions import PluginError, PluginCrashed, \
PluginLoadError, PluginFormatError, DependencyMissing, \
ModuleDependencyMissing, BinaryDependencyMissing, FileDependencyMissing, \
FolderDependencyMissing
from irma.common.plugins.dependencies import Dependency, ModuleDependency, \
BinaryDependency, FileDependency, FolderDependency, PlatformDependency
class PluginManager(Singleton):
__plugins_cls = {}
##########################################################################
# plugin management
##########################################################################
def get_all_plugins(self):
return list(self.__plugins_cls.values())
def discover(self, path=os.path.dirname(__file__), prefix=None):
dirname = os.path.basename(path)
if prefix is None:
prefix = dirname
for importer, name, ispkg in pkgutil.walk_packages([path]):
try:
pkg_name = '%s.%s' % (prefix, name)
if pkg_name not in sys.modules:
__import__(pkg_name)
if ispkg:
self.discover(os.path.join(path, name), pkg_name)
except PluginFormatError as error:
logging.warn(' *** [{name}] Plugin error: {error}'
''.format(name=name, error=error))
except PluginLoadError as error:
logging.warn(' *** [{name}] Plugin failed to load: {error}'
''.format(name=name, error=error))
except PluginCrashed as error:
logging.warn(' *** [{name}] Plugin crashed: {error}'
''.format(name=name, error=error))
except ImportError as error:
logging.exception(error)
##########################################################################
# plugin registering
##########################################################################
@classmethod
def register_plugin(cls, plugin):
logging.debug('Found plugin {name}. Trying to register it.'
''.format(name=plugin.plugin_name))
# check for dependencies
for dependency in plugin.plugin_dependencies:
try:
dependency.check()
except DependencyMissing as error:
# get plugin info
plugin_name = plugin.plugin_name
# get dependency info
dependency = error.dependency
dependency_name = dependency.dependency_name
dependency_type = dependency.__class__.__name__
dependency_help = dependency.help
# warn user and stop loading
warning = '{name} miss dependencies: {deps} ({type}).'
if dependency_help is not None:
warning += ' {help}'
raise PluginLoadError(warning.format(type=dependency_type,
name=plugin_name,
deps=dependency_name,
help=dependency_help))
# if required, run additionnal verifications on the plugin
if hasattr(plugin, 'verify'):
try:
plugin.verify()
except Exception as error:
raise PluginLoadError(error)
# add plugin to internal list
if plugin.plugin_canonical_name in cls.__plugins_cls:
logging.debug('Plugin {name} already registered'
''.format(name=plugin.plugin_name))
else:
cls.__plugins_cls[plugin.plugin_canonical_name] = plugin
# mark plugin as active
if plugin.plugin_active is None:
plugin.plugin_active = True
logging.debug('Plugin {name} registered, active set as {state}'
''.format(name=plugin.plugin_name,
state=plugin.plugin_active))
|
apache-2.0
| 8,253,696,942,616,350,000
| 42.327434
| 78
| 0.520221
| false
| 5.219616
| false
| false
| false
|
menify/sandbox
|
tags/aql_beta_1_16032008/setup.py
|
1
|
3780
|
import logging
import utils
import options
_Warning = logging.Warning
#//===========================================================================//
_site_setup = []
_user_setup = {}
_tools_setup = {}
_tools_post_setup = {}
def ResetSetup( site_setup = _site_setup,
tools_setup = _tools_setup,
tools_post_setup = _tools_post_setup ):
del site_setup[:]
tools_setup.clear()
tools_post_setup.clear()
#//===========================================================================//
def AddSiteSetup( setup_function, _site_setup = _site_setup, toList = utils.toList ):
_site_setup += toList( setup_function )
def SiteSetup( options, os_env ):
global _site_setup
for f in _site_setup:
f( options = options, os_env = os_env )
UserSetup( options, os_env )
#//===========================================================================//
def AddUserSetup( setup_id, setup_function, _user_setup = _user_setup ):
AddToolSetup( setup_id, setup_function, _user_setup )
def UserSetup( options, os_env, _user_setup = _user_setup ):
for s in options.setup.GetList():
for f in _user_setup.get( s, [] ):
f( options = options, os_env = os_env )
#//===========================================================================//
def AddToolSetup( tool_name, setup_function, tools_setup = _tools_setup, toList = utils.toList ):
current_setup_functions = tools_setup.setdefault( tool_name, [] )
tools_setup[ tool_name ] = current_setup_functions + toList( setup_function )
#//===========================================================================//
def AddToolPostSetup( tool_name, setup_function, tools_post_setup = _tools_post_setup ):
AddToolSetup( tool_name, setup_function, tools_post_setup )
#//===========================================================================//
def _tool_setup( tool_name, env, tools_setup = _tools_setup ):
options = env.get( 'AQL_OPTIONS' )
if options is None:
return
options.SetEnv( env )
os_env = env['ENV']
for f in tools_setup.get( tool_name, [] ):
f( env = env, options = options, os_env = os_env )
#//===========================================================================//
def _tool_post_setup( tool_name, env, tools_post_setup = _tools_post_setup ):
_tool_setup( tool_name, env, tools_post_setup )
#//===========================================================================//
def _tool_exists( self, env ):
if self._aql_is_exist is None:
_tool_setup( self.name, env )
self._aql_is_exist = self._aql_exists( env )
return self._aql_is_exist
#//===========================================================================//
def _tool_generate( self, env ):
if self._aql_is_exist is None:
if not _tool_exists( self, env ):
_Warning( "Tool: '%s' has not been found, but it has been added." % (self.name) )
self._aql_generate( env )
_tool_post_setup( self.name, env )
#//===========================================================================//
def _init_tool( self, name, toolpath = [], **kw ):
_SCons_Tool_Tool_init( self, name, toolpath, **kw )
self._aql_is_exist = None
self._aql_generate = self.generate
self._aql_exists = self.exists
self.exists = lambda env, self = self: _tool_exists( self, env )
self.generate = lambda env, self = self: _tool_generate( self, env )
#//===========================================================================//
import SCons.Tool
_SCons_Tool_Tool_init = SCons.Tool.Tool.__init__
SCons.Tool.Tool.__init__ = _init_tool
|
mit
| 3,797,604,539,554,396,000
| 31.033898
| 101
| 0.462169
| false
| 3.884892
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.