repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
kcs/SOTAnaplo | log2csv.py | Python | mit | 21,801 | 0.00555 | #!/usr/bin/env python3
"""Simple log converter for SOTA
It inputs a simplified text log and converts it to SOTA CSV format.
The input is a simplified version which allows most of the data field
to be guessed by previous input.
Example of input file:
# lines starting with # are comments
# first line contains information about the activation
# multiple activations can be separated by a blank line
# blank lines right after the first line and between comments are not considered
[callsign] [date] SOTA-ref [YOFF-ref] [locator] [other notes] [...]
# fields are optional, they must be specified for the first time in a file
# after that thay will persist across activations
# if chases are also included the SOTA-ref must be set to *
# everything after the SOTA-ref is considered additional note
# and will not persist to next section (not even YOFF ref)
# one optional note is a type of contest if the SOTA operation was carried
# out within the rules of contest (for example Field-day), the type of contest
# should be specified in the format contes:_contest_type_ where _contest_type_
# will specify a rule file which determines the exchanges format and the
# scoring method (this is still need to be expanded)
# next lines hold qso data
[time] [callsign] [freq] [mode] [RST-sent] [RST-rcvd] [SOTA-ref] [notes]
# everything is optional, if not present the previous value will be reused
# however if some fields cannot be differentiated uniquely than it needs to be
# all present
# field rules:
# time will be in the format hhmm or hh:mm (numbers and optional colon)
# hour part is optional and first 0 is also optional
# examples
# 0810 or 08:10 complete time {08:10}
# 811 missing 0 {08:11}
# 9:2 missing 0s {09:02}
# 3 missing hour part {09:03}
# 05 missing hour part {09:05}
# 13 missing hour part {09:13}
# 4 missing hour part, minute less than previous, hour is incremented {10:04}
# callsign any combination of letters and numbers
# at least 1 number in the middle
# frequency decimal number in MHz n[.nnn][MHz]
# must be in a radio amateur band range
# mode a valid amateur radio mode
# RST's either RST or RS format based on mode
# if not present it is considered to be 59 or 599
# SOTA-ref is in format assoc/region-nnn
# anything else not fitting is considered notes
# a line is not allowed to consist only of notes
# further notes not meant for SOTA database can be commented out
"""
import sys
import re
from da | tetime import date
import os.path
import argparse
import json
fro | m contest import Contest
import country
import qslinfo
class LogException(Exception):
def __init__(self, message, pos):
self.message = message
self.pos = pos
# string matching functions
call_prefix = r"(?:(?=.?[a-z])[0-9a-z]{1,2}(?:(?<=3d)a)?)"
call = re.compile(r"(?:"+call_prefix+r"[0-9]?/)?("+call_prefix+r"[0-9][a-z0-9]*)(?:/[0-9a-z]+){0,2}", re.I)
sota_ref = re.compile(r"[a-z0-9]{1,3}/[a-z]{2}-[0-9]{3}", re.I)
wwff_ref = re.compile(r"[a-z0-9]{1,2}f{2}-[0-9]{3,4}", re.I)
locator = re.compile(r"[a-x]{2}[0-9]{2}[a-x]{2}", re.I)
date_reg = re.compile(r"([0-9]{4})(?P<sep>[.-])([0-9]{2})(?P=sep)([0-9]{2})")
time_reg = re.compile(r"(?P<hour>0?[0-9]|1[0-9]|2[0-3])?((?(hour)[0-5]|[0-5]?)[0-9])")
freq = re.compile(r"((?:[0-9]+\.)?[0-9]+)([kMG]?Hz|[mc]?m)?")
rst = re.compile(r"[1-5][1-9][1-9]?")
word = re.compile(r"\S+")
contest = re.compile(r"contest:(\w+)\s*")
annotation = re.compile(r"[@%$]{1,2}")
def find_word(string, start=0):
"""Find the first word starting from `start` position
Return the word and the position before and after the word
"""
while start < len(string) and string[start].isspace():
start += 1
end = start
while end < len(string) and not string[end].isspace():
end += 1
return string[start:end], start, end
bands = {
'160m' : ( 1.8, 2.0 ),
'80m' : ( 3.5, 4.0 ),
'40m' : ( 7.0, 7.3 ),
'30m' : ( 10.1, 10.15 ),
'20m' : ( 14.0, 14.35 ),
'17m' : ( 18.068, 18.168 ),
'15m' : ( 21.0, 21.45 ),
'12m' : ( 24.89, 24.99 ),
'10m' : ( 28.0, 29.7 ),
'6m' : ( 50.0, 54.0 ),
'2m' : ( 144.0, 148.0 ),
'1.25m' : ( 219.0, 225.0 ),
'70cm' : ( 420.0, 450.0 ),
'35cm' : ( 902.0, 928.0 ),
'23cm' : ( 1240.0, 1300.0 ),
'13cm' : ( 2300.0, 2450.0 ),
'9cm' : ( 3400.0, 3475.0 ),
'6cm' : ( 5650.0, 5850.0 ),
'3cm' : ( 10000.0, 10500.0 ),
'1.25cm' : ( 24000.0, 24250.0 ),
'6mm' : ( 47000.0, 47200.0 ),
'4mm' : ( 75500.0, 81500.0 ),
'2.5mm' : ( 122250.0, 123000.0 ),
'2mm' : ( 134000.0, 141000.0 ),
'1mm' : ( 241000.0, 250000.0 ),
}
def match_freq(s):
# check if the string s is a correct amateur band frequency
# return the string if it is or False otherwise
# the string can either specify the frequency or the band
# specifying the band must contain the m unit as consacrated bands
# frequency can either specify the unit, or be a single number
# which is considered to be in MHz, if unit is not MHz if will
# be converted, or if missing will be added to output string
m = freq.fullmatch(s)
if not m:
return False
mul = 1.0
if m.group(2):
if s.endswith('m'):
if s in bands:
return s
else:
return False
if m.group(2) == kHz:
mul = 0.001
elif m.group(2) == GHz:
mul = 1000.0
n = float(m.group(1)) * mul
for f in bands.values():
if n >= f[0] and n <= f[1]:
if mul == 1.0:
return m.group(1) + 'MHz'
else:
return "{:.3f}MHz".format(n)
return False
def quote_text(string):
"""Quote a string by the CSV rules:
if the text contains commas, newlines or quotes it will be quoted
quotes inside the text will be doubled
"""
if not string:
return string
if ',' in string or '\n' in string or '"' in string:
# check if already quoted
if string[0] == '"' and string[-1] == '"':
# check if every inner quote is doubled
if '"' not in string[1:-1].replace('""', ''):
return string
# if not then inner part must be doubled
string = string[1:-1]
# double the inner quotes and quote string
string = '"{}"'.format(string.replace('"','""'))
return string
class Activation:
"""Class holding information about an activation or a chase
Activations contain information about the date and place of activation,
callsign used and all qsos.
Also a link to the previous activation is stored
In a chase multiple qsos can be merged from a single day.
"""
def __init__(self, string, prev=None):
"""Initialize the activation from the string
At least callsign, date, and sota reference are needed, other
information is optional.
If a previous activation is given then the callsign and date
can be preserved from it, but new sota reference is mandatory.
An asterisk instead of sota reference means a chase
"""
self.previous = prev
# start splitting the string into words
w, pos, end = find_word(string)
# callsign
m = call.fullmatch(w)
if m:
self.callsign = w.upper()
w, pos, end = find_word(string, end)
elif prev:
self.callsign = prev.callsign
else:
raise LogException("Error in activation definition, missing callsign", pos)
# date
m = date_reg.fullmatch(w)
if m:
try:
self.date = date(int(m.group(1)), int(m.group(3)), int(m.group(4)))
except ValueError:
raise LogException("Error in activation definition, invalid date format", pos)
w, pos, end = find_word(string, end)
elif prev:
self.date = prev.date
else:
raise LogException("Error in activation definition, missing date", pos)
# sota reference is mandatory
m = sota_ref.fullmatch(w)
if m:
|
rbeyer/scriptorium | cropsl.py | Python | apache-2.0 | 2,797 | 0.000715 | #!/usr/bin/env python
'''You can easily read off two sample,line coordinates from qview, but ISIS
crop wants one sample,line and then offsets. This just takes two coordinates,
does the math, and then calls crop.'''
# Copyright 2016, 2019, Ross A. Beyer (rbeyer@seti.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The arguments to ISIS crop require a sample/line pair and then a set of offsets.
# I typically have two sample/line pairs read from qview, and got tired of always
# bringing up the calculator to compute the offsets.
import argparse
import subprocess
import sys
from pathlib import Path
def crop(fr, to, samp, line, nsamp, nline):
cmd = ('crop', f'from= {fr}', f'to= {to}',
f'samp= {samp}', f'line= {line}',
f'nsamp= {nsamp}', f'nline= {nline}')
return subprocess.run(cmd, check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
def calcoffset(first, second):
(f_samp, f_line) = first.split(':')
(s_samp, s_line) = second.split(':')
nsamp = int(s_samp) - int(f_samp)
nline = int(s_line) - int(f_line)
return(f_samp, f_line, str(nsamp), str(nline))
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-o', '--output', help="The output filename.")
parser.add_argument('-f', '--first',
help='The sample and line of the first point, '
'separated by a colon, like -f 3:10')
parser.add_argument('-s', '--second',
help='The sample and line of the second point, '
'separated by a colon.')
parser.add_argument('cube', help='Cube file(s) to crop.', nargs='+')
args = parser.parse_args()
for cub in args.cube:
in_p = Path(cub)
if(args.output):
out_p = Path(args.output)
else:
out_p = in_p.with_suffix('.crop | .cub')
(samp, line, nsamp, nline) = calcoffset(args.firs | t, args.second)
print(crop(in_p, out_p, samp, line, nsamp, nline).args)
if(args.output):
# If there's a specific output filename, only do one.
break
if __name__ == "__main__":
sys.exit(main())
|
absperf/wagtailapproval | runtests.py | Python | bsd-2-clause | 293 | 0 | #!/usr/bin/env python
import os
import sys
def run():
from django.core.management import execute_from_command_line
os.environ['DJANGO_SET | TINGS_MODULE'] = 'tests.app.settings'
execute_from_command_line([sys.argv[0], 'test'] + sys.ar | gv[1:])
if __name__ == '__main__':
run()
|
wolfiex/DSMACC-testing | dsmacc/examples/autoencoderminst.py | Python | gpl-3.0 | 2,098 | 0.007626 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pylab as plt
import numpy as np
import seaborn as sns; sns.set()
import keras
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.optimizers import Adam
from keras.datasets import cifar10,mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
nelement = x_train.shape[0]
indim = x_train.shape[1]*x_train.shape[2]
inshape = x_train.shape[1]
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_tra | in.shape[2]) / 255
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2]) / 255
#https://stats.stackexchange.com/questio | ns/190148/building-an-autoencoder-in-tensorflow-to-surpass-pca
l_big = 512
l_mid = 128
l_lat = 2
epochs =5
m = Sequential()
m.add(Dense(l_big, activation='elu', input_shape=(indim,)))
m.add(Dense(l_mid, activation='elu'))
m.add(Dense(l_lat, activation='linear', name="bottleneck"))
m.add(Dense(l_mid, activation='elu'))
m.add(Dense(l_big, activation='elu'))
m.add(Dense(indim, activation='sigmoid'))
m.compile(loss='mean_squared_error', optimizer = Adam())
hist = m.fit(x_train, x_train, batch_size=indim, epochs=epochs, verbose=1,
validation_data=(x_test, x_test))
encoder = Model(m.input, m.get_layer('bottleneck').output)
Zenc = encoder.predict(x_train) # bottleneck representation
Renc = m.predict(x_train) # reconstruction
plt.title('Autoencoder')
plt.scatter(Zenc[:5000,0], Zenc[:5000,1], c=y_train[:5000].reshape(5000), s=8, cmap='tab20')
plt.gca().get_xaxis().set_ticklabels([])
plt.gca().get_yaxis().set_ticklabels([])
plt.tight_layout()
plt.show()
plt.figure(figsize=(9,3))
toPlot = (x_train, Renc)
for i in range(10):
for j in range(2):
ax = plt.subplot(3, 10, 10*j+i+1)
plt.imshow(toPlot[j][i,:].reshape(inshape,inshape), interpolation="nearest",
vmin=0, vmax=1)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.tight_layout()
plt.show()
|
iho/wagtail | wagtail/wagtailsnippets/blocks.py | Python | bsd-3-clause | 637 | 0.00157 | from __future__ import unicode_literals
from django.utils.functional import cached_property
from django.contrib.conten | ttypes.models import ContentType
from wagtail.wagtailcore.blo | cks import ChooserBlock
class SnippetChooserBlock(ChooserBlock):
def __init__(self, target_model, **kwargs):
super(SnippetChooserBlock, self).__init__(**kwargs)
self.target_model = target_model
@cached_property
def widget(self):
from wagtail.wagtailsnippets.widgets import AdminSnippetChooser
content_type = ContentType.objects.get_for_model(self.target_model)
return AdminSnippetChooser(content_type)
|
guildai/guild-examples | noisy/noisy2.py | Python | apache-2.0 | 249 | 0.008032 | import nu | mpy as np
import tensorboardX as tbx
x = 0.1
noise = 0.1
def f(x):
return np.sin(5 * x) * (1 - np.tanh(x ** 2)) + np.random.randn() * noi | se
loss = f(x)
writer = tbx.SummaryWriter(".")
writer.add_scalar("loss", loss)
writer.close()
|
r-o-b-b-i-e/pootle | tests/core/management/subcommands.py | Python | gpl-3.0 | 6,653 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from collections import OrderedDict
from django.core import management
from django.core.management.base import CommandError, SystemCheckError
from pootle.core.delegate import subcommands
from pootle.core.plugin import provider
from pootle.core.management.subcommands import CommandWithSubcommands
@pytest.mark.django_db
def test_command_with_subcommands_instance(capsys, command_caller):
class FooCommand(CommandWithSubcommands):
name = "foo"
help = "Do a foo"
def handle(self, *args, **options):
self.stdout.write("Did a foo")
foo_command = FooCommand()
assert not foo_command.subcommands
command_caller(foo_command)
out, err = capsys.readouterr()
assert out == u'Did a foo\n'
def test_command_with_subcommands_help(capsys, command_calls):
class FooCommand(CommandWithSubcommands):
name = "foo"
help = "Do a foo"
foo_command = FooCommand()
exited = command_calls(foo_command, "--help")
assert exited
out, err = capsys.readouterr()
assert u'Do a foo' in out
assert "subcommands" not in out.lower()
@pytest.mark.django_db
def test_command_with_subcommands_sub(capsys, command_calls):
class FooCommand(CommandWithSubcommands):
name = "foo"
help = "Do a foo with subcommands"
class BarSubcommand(management.BaseCommand):
pass
@provider(subcommands, sender=FooCommand)
def provide_subcommands(**kwargs):
return dict(bar=BarSubcommand)
foo_command = FooCommand()
assert foo_command.subcommands["bar"] == BarSubcommand
command_calls(foo_command)
out, err = capsys.readouterr()
assert (
out
== (u'Do a foo with subcommands\nAvailable subcommands'
u'\n=====================\n\nbar\n'))
exited = command_calls(foo_command, "--help")
assert exited
out, err = capsys.readouterr()
assert "subcommands" in out.lower()
assert "{bar}" in out
def test_command_with_subcommands_many_subs(capsys, command_calls):
class FooCommand(CommandWithSubcommands):
name = "foo"
help = "Do a foo with subcommands"
class BarSubcommand(management.BaseCommand):
@property
def help(self):
return "Do a bar for a foo"
@provider(subcommands, sender=FooCommand)
def provide_subcommands(**kwargs):
return OrderedDict(
[("bar1", BarSubcommand),
("bar2", BarSubcommand),
("bar3", BarSubcommand)])
@provider(subcommands, sender=FooCommand)
def provide_more_subcommands(**kwargs):
return OrderedDict(
[("bar4", BarSubcommand),
("bar5", BarSubcommand),
("bar6", BarSubcommand)])
foo_command = FooCommand()
assert foo_command.subcommands.keys() == [
"bar1", "bar2", "bar3", "bar4", "bar5", "bar6"]
exited = command_calls(foo_command, "--help")
assert exited
out, err = capsys.readouterr()
assert "subcommands" in out.lower()
for k in foo_command.subcommands.keys():
assert k in out
assert "Do a bar for a foo" in out
@pytest.mark.django_db
def test_command_with_subcommands_sub_call(capsys, command_calls):
class FooCommand(CommandWithSubcommands):
name = "foo"
class BarSubcommand(management.BaseCommand):
name = "bar"
def handle(self, *args, **options):
self.stdout.write("Bar subcommand called")
@provider(subcommands, sender=FooCommand)
def provide_subcommands(**kwargs):
return dict(bar=BarSubcommand)
foo_command = FooCommand()
command_calls(foo_command, "bar")
out, err = capsys.readouterr()
assert out == "Bar subcommand called\n"
exited = command_calls(foo_command, "bar", "--help")
assert exited
out, err = capsys.readouterr()
assert "usage: foo bar" in out
@pytest.mark.django_db
def test_command_with_subcommands_sub_args(capsys, command_calls):
class FooCommand(CommandWithSubcommands):
name = "foo"
class BarSubcommand(management.BaseCommand):
msg_called = "Bar called by %s with the subcommand %s"
def add_arguments(self, parser):
super(BarSubcommand, self).add_arguments(parser)
parser.add_argument(
'--fooarg',
type=str,
help='Help with foo arg')
def handle(self, *args, **option | s):
self.stdout.write(
"Bar called with fooarg: %s" % options["fooarg"])
@provider(subcommands, sender=FooCommand)
def provide_subcommands(**kwargs):
return dict(bar=BarSubcommand)
foo_command = FooCommand()
command_calls(foo_command, "bar", "--fooarg", "BAR")
out, err = capsys.readouterr()
assert out = | = "Bar called with fooarg: BAR\n"
def test_command_with_subcommands_bad_args(capsys):
class FooCommand(CommandWithSubcommands):
pass
foo_command = FooCommand()
with pytest.raises(SystemExit):
foo_command.run_from_argv(["", "foo", "bad", "args"])
out, err = capsys.readouterr()
assert err.startswith("usage: foo")
assert "unrecognized arguments: bad args" in err
def test_command_with_subcommands_bad_exec(capsys):
class RandomError(Exception):
pass
class FooCommand(CommandWithSubcommands):
def execute(self, *args, **options):
raise RandomError("OOPS")
foo_command = FooCommand()
with pytest.raises(RandomError):
foo_command.run_from_argv(["", "foo"])
def test_command_with_subcommands_bad_syscheck(capsys):
class FooCommand(CommandWithSubcommands):
def execute(self, *args, **options):
raise SystemCheckError("BAD SYSTEM")
foo_command = FooCommand()
with pytest.raises(SystemExit):
foo_command.run_from_argv(["", "foo"])
out, err = capsys.readouterr()
assert err == "BAD SYSTEM\n"
def test_command_with_subcommands_bad_commanderror(capsys):
class FooCommand(CommandWithSubcommands):
def execute(self, *args, **options):
raise CommandError("BAD COMMAND")
foo_command = FooCommand()
with pytest.raises(SystemExit):
foo_command.run_from_argv(["", "foo"])
out, err = capsys.readouterr()
assert err == u'CommandError: BAD COMMAND\n'
|
rpdillon/wikid | wikid/wikid.py | Python | gpl-3.0 | 13,569 | 0.004127 | #!/usr/bin/env python
# wikid, Copyright (c) 2010, R. P. Dillon <rpdillon@etherplex.org>
# This file is part of wikid.
#
# wikid is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the i | mplied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mercurial
import datetime
import web
import templates
from os import access
from os import F_OK
from os import chdir
from os import getcwd
from | os import listdir
from os import mkdir
from os import remove
from os import sep
from os.path import expanduser
from os.path import isdir
from re import compile
from re import sub
from re import MULTILINE
from urllib import unquote_plus
from textile import textile
from rest import *
#######################################################################
# Global utility functions that work independent of a particular
# repository
# extensionPath is the path in which this extension is installed.
extensionPath = None
def getExtensionPath():
"""
Finds the path associated with the current extension and returns
it.
"""
if extensionPath == None:
from mercurial import hg
for module in hg.extensions.extensions():
if module[1].__name__ == "hgext_wikid":
return module[1].__path__[0]
else:
return extensionPath
def getTemplate(templateName, globals=None):
templateDir = 'templates'
template = str(getExtensionPath()) + sep + templateDir + sep + templateName
return web.template.frender(template, globals=globals)
def getParams():
"""
Retrives the web.ctx.query string and parses it into a list of
params passed to server in the last request.
"""
import re
tokens = re.split("[?&=]", web.ctx.query[1:])
d = {}
if len(tokens) == 1: return d
def popDict(l, d):
if len(l) != 0:
d[l[0]] = l[1]
return popDict(l[2:], d)
else:
return d
return popDict(tokens, d)
def getParam(param, default=None):
"""
Retrieves the given parameter, if it was specified. If it was not
specified, returns the provided default value. If the default
value was not provided, returns None.
"""
params = getParams()
if param in params:
return params[param]
else:
return default
#######################################################################
# Classes
class StaticLibs(object):
"""
Allows us to serve static content from the lib directory.
TODO: abstract this into a class that serves content from the
specified directory.
"""
def GET(self, filename):
assert '..' not in filename
try:
# Allows static content (like .js and .css files) to be cached.
web.header( 'Cache-Control', 'max-age=1000000')
f = open(str(getExtensionPath()) + "/lib/" + str(filename))
return f.read()
except IOError:
web.notfound()
class WikiContent(object):
"""
A base class for all urls that contain wiki content. Provides
access to repository, ui and version objects, and provides some
utility methods (like calling out to the markup conversion
engine, checking for the existence of a node, etc.)
"""
DEFAULT_PAGE = "MainPage"
ui = None
repo = None
rev = None
@staticmethod
def setUi(ui):
"""
Sets the Mercurial user interface object. We rarely use this
because we're essentially writing a web-based ui, but it is
needed for some operations (like committing data to the
repository).
"""
WikiContent.ui = ui
@staticmethod
def setRepo(repo):
"""
Sets the Mercurial repository from which we will read and
write.
"""
WikiContent.repo = repo
@staticmethod
def setRev(rev):
"""
In the case the user doesn't specify a revision for a
particular operation (which is most of the cases), this is the
revision to use.
Often is 'tip', the latest revision.
"""
WikiContent.rev = rev
def _nodeExist(self, node, rev=None):
"""
Returns whether or not a node exists in the repository.
"""
if rev == None:
rev = self.rev
return node in self.repo[rev]
def _getNodeText(self, node, revision=None):
"""
Returns the raw text of the specified node. This is usually
text in the markup language used by the wiki -- the default is
reStructuredText, but it could be Textile, Markdown, etc.
"""
if self._nodeExist(node, revision):
return self.repo[revision][node].data()
else:
return ""
def _toHtml(self, doc):
"""
Returns an HTML interpretation of the node's plaintext.
This is the place to swap out different markup engines. The
default is reStructuredText.
"""
doc = html_body(unicode(doc)).encode() # docutils engine, using reST
#doc = textile(doc)
return doc
def _doCommit(self, msg):
time = datetime.datetime.now()
time = time.replace(microsecond=0)
mercurial.commands.commit(self.ui, self.repo,
message=msg,
date=str(time), user=self.ui.username(), logfile=None)
# Increment the current rev, because we just created a new revision
WikiContent.rev = WikiContent.repo.changelog.nodemap[WikiContent.repo.changelog.tip()]
def _privileged(self):
"""
We want to have a flexible mechanism for figuring out which
requests should be allowed to modify the Wiki's content.
Because wikid is distributed, it is reasonable to assume that
if a user wishes to modify the content, they will clone the
repo and run it locally. Therefore, the privileged user is
the user connecting from the same computer on which the server
is located.
"""
return web.ctx.ip == '127.0.0.1'
class ReadNode(WikiContent):
"""
ReadNode is responsible for reading nodes and displaying them.
"""
def GET(self, node):
revision = getParam("rev", self.rev)
wikipage = getTemplate('wikipage.html')
# Sets the default document, in the case that is was not specified
if node == "": node = self.DEFAULT_PAGE
if node in self.repo[revision]:
doc = self._getNodeText(node, revision)
doc = self._toHtml(doc)
else:
raise web.seeother("/edit/" + node)
return wikipage(node, doc, self._privileged())
class PrintNode(WikiContent):
"""
PrintNode is responsible for providing the specified document in a
from suitable for printing.
"""
def GET(self, node):
from docutils import core
revision = getParam("rev", self.rev)
# Sets the default document, in the case that is was not specified
if node == "": node = self.DEFAULT_PAGE
if node in self.repo[revision]:
doc = self._getNodeText(node, revision)
doc = core.publish_string(doc, writer_name="html4css1")
else:
raise web.seeother("/edit/" + node)
return doc
class EditNode(WikiContent):
def _clean(self, text):
"""
Cleans up the input from the user and prepares it for writing
to disk.
So far, this changes Windows line breaks into Unix-style
breaks and unquotes the text.
There has to be a better way to handle this, but this seems to
let us get by, for now.
|
vistoyn/python-foruse | foruse/__init__.py | Python | mit | 274 | 0.00365 | # -*- coding: utf-8 -*-
| __author__ = "Ildar Bikmamatov"
__email__ = "vistoyn@gmail.com"
__copyright__ = "Copyright 2016"
__license__ = "MIT"
__version__ = "1.0.1"
from . import log
from .lib import *
from .error import *
from .c | olors import colorf
from .datelib import *
|
pombredanne/anitya | anitya/lib/backends/cpan.py | Python | gpl-2.0 | 3,025 | 0 | # -*- coding: utf-8 -*-
"""
(c) 2014-2016 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
Ralph Bean <rbean@redhat.com>
"""
import anitya.lib.xml2dict as xml2dict
from anitya.lib.backends import BaseBackend, get_versions_by_regex, REGEX
from anitya.lib.exceptions import AnityaPluginException
class CpanBackend(BaseBackend):
''' The custom class for projects hosted on CPAN.
This backend allows to specify a version_url and a regex that will
be used to retrieve the version information.
'''
name = 'CPAN (perl)'
examples = [
'http://search.cpan.org/dist/Net-Whois-Raw/',
'http://search.cpan.org/dist/SOAP/',
]
@classmethod
def get_version(cls, project):
''' Method called to retrieve the latest version of the projects
provided, project that relies on the backend of this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: the latest version found upstream
:return type: str
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the version cannot be retrieved correctly
'''
return cls.get_ordered_versions(project)[-1]
@classmethod
def get_versions(cls, project):
''' Method called to retrieve all the versions (that can be found)
of the projects provided, project that relies on the backend of
this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: a list of all the possible releases found
:return type: list
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the versions cannot be retrieved correctly
'''
url = 'http://search.cpan.org/dist/%(name)s/' % {
'name': project.name}
regex = REGEX % {'name': project.name}
return get_versions_by_regex(url, regex, project)
@classmethod
def check_feed(cls):
''' Return a generator over the latest uploads to CPAN
by que | rying an RSS feed.
'''
url = 'http://search.cpan.org/uploads.rdf'
try:
response = cls.call_url(url)
except Exception: # pragma: no cover
raise AnityaPluginException('Could not contact %s' % url)
try:
parser = xml2dict.XML2Dict()
data = parser.fromstring(response.text)
except Exception: # pragma: no cover
raise AnityaPluginException('No XML returned by %s' % url)
| items = data['RDF']['item']
for entry in items:
title = entry['title']['value']
name, version = title.rsplit('-', 1)
homepage = 'http://search.cpan.org/dist/%s/' % name
yield name, homepage, cls.name, version
|
discipl/NAML | app/initPostgres.py | Python | gpl-3.0 | 133 | 0.007519 | from data | base.postgresDB import init_database
if __name__ == '__main__':
print('Configuring PostgresSQL...')
| init_database() |
jerryyeezus/nlp-summarization | test.py | Python | mit | 648 | 0.003086 | from pyrouge import Rouge155
r = Rouge155()
r.system_dir = "./summary"
r.model_dir = "./summaries-gold/battery-life_amazon_kindle"
r.system_filename_pattern = "battery-life.(\d+).summary"
r.model_filename_pattern = "battery-life_amazon_kindle.[A-Z].#ID#.gold"
output = r.convert_and_evaluate()
print(output)
output_dict = r.output_to_dict(o | utput)
r.system_dir = "./summary"
r.model_dir = "./summaries-gold/room_holiday_inn_london | "
r.system_filename_pattern = "hotel_service.(\d+).summary"
r.model_filename_pattern = "room_holiday_inn_london.[A-Z].#ID#.gold"
output = r.convert_and_evaluate()
print(output)
output_dict = r.output_to_dict(output)
|
Bluejudy/bluejudyd | lib/broadcast.py | Python | mit | 13,387 | 0.003364 | #! /usr/bin/python3
"""
Broadcast a message, with or without a price.
Multiple messages per block are allowed. Bets are be made on the 'timestamp'
field, and not the block index.
An address is a feed of broadcasts. Feeds may be locked with a broadcast whose
text field is identical to ‘lock’ (case insensitive). Bets on a feed reference
the address that is the source of the feed in an output which includes the
(latest) required fee.
Broadcasts without a price may not be used for betting. Broadcasts about events
with a small number of possible outcomes (e.g. sports games), should be
written, for example, such that a price of 1 XBJ means one outcome, 2 XBJ means
another, etc., which schema should be described in the 'text' field.
fee_fraction: .05 XBJ means 5%. It may be greater than 1, however; but
because it is stored as a four‐byte integer, it may not be greater than about
42.
"""
import struct
import decimal
D = decimal.Decimal
from fractions import Fraction
import logging
from . import (util, exceptions, config, worldcoin)
from . import (bet)
FORMAT = '>IdI'
LENGTH = 4 + 8 + 4
ID = 30
# NOTE: Pascal strings are used for storing texts for backwards‐compatibility.
def validate (db, source, timestamp, value, fee_fraction_int, text, block_index):
problems = []
if fee_fraction_int > 4294967295:
problems.append('fee fraction greater than 42.94967295')
if timestamp < 0: problems.append('negative timestamp')
if not source:
problems.append('null source address')
# Check previous broadcast in this feed.
cursor = db.cursor()
broadcasts = list(cursor.execute('''SELECT * FROM broadcasts WHERE (status = ? AND source = ?) ORDER BY tx_index ASC''', ('valid', source)))
cursor.close()
if broadcasts:
last_broadcast = broadcasts[-1]
if last_broadcast['locked']:
problems.append('locked feed')
elif timestamp <= last_broadcast['timestamp']:
problems.append('feed timestamps not monotonically increasing')
if not (block_index >= 317500 or config.TESTNET): # Protocol change.
if len(text) > 52:
problems.append('text too long')
return problems
def compose (db, source, timestamp, value, fee_fraction, text):
# Store the fee fraction as an integer.
fee_fraction_int = int(fee_fraction * 1e8)
problems = validate(db, source, timestamp, value, fee_fraction_int, text, util.last_block(db)['block_index'])
if problems: raise exceptions.BroadcastError(problems)
data = struct.pack(config.TXTYPE_FORMAT, ID)
if len(text) <= 52:
curr_format = FORMAT + '{}p'.format(len(text) + 1)
else:
curr_format = FORMAT + '{}s'.format(len(text))
data += struct.pack(curr_format, timestamp, value, fee_fraction_int,
text.encode('utf-8'))
return (source, [], data)
def parse (db, tx, message):
cursor = db.cursor()
# Unpack message.
try:
if len(message) - LENGTH <= 52:
curr_format = FORMAT + '{}p'.format(len(message) - LENGTH)
else:
curr_format = FORMAT + '{}s'.format(len(message) - LENGTH)
timestamp, value, fee_fraction_int, text = struct.unpack(curr_format, message)
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
text = ''
status = 'valid'
except (struct.error) as e:
timestamp, value, fee_fraction_int, text = 0, None, 0, None
status = 'invalid: could not unpack'
if status == 'valid':
# For SQLite3
timestamp = min(timestamp, config.MAX_INT)
value = min(value, config.MAX_INT)
problems = validate(db, tx['source'], timestamp, value, fee_fraction_int, text, tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
# Lock?
lock = False
if text and text.lower() == 'lock':
lock = True
timestamp, value, fee_fraction_int, text = 0, None, None, None
else:
lock = False
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'timestamp': timestamp,
'value': value,
'fee_fraction_int': fee_fraction_int,
'text': text,
'locked': lock,
'status': status,
}
sql='insert into broadcasts values(:tx_index, :tx_hash, :block_index, :source, :timestamp, :value, :fee_fraction_int, :text, :locked, :status)'
| cursor.execute(sql, bindings)
# Negative values (default to ignore).
if value == None or value < 0:
# Cancel Open Bets?
if value == - | 2:
cursor.execute('''SELECT * FROM bets \
WHERE (status = ? AND feed_address = ?)''',
('open', tx['source']))
for i in list(cursor):
bet.cancel_bet(db, i, 'dropped', tx['block_index'])
# Cancel Pending Bet Matches?
if value == -3:
cursor.execute('''SELECT * FROM bet_matches \
WHERE (status = ? AND feed_address = ?)''',
('pending', tx['source']))
for bet_match in list(cursor):
bet.cancel_bet_match(db, bet_match, 'dropped', tx['block_index'])
cursor.close()
return
# Handle bet matches that use this feed.
cursor.execute('''SELECT * FROM bet_matches \
WHERE (status=? AND feed_address=?)
ORDER BY tx1_index ASC, tx0_index ASC''',
('pending', tx['source']))
for bet_match in cursor.fetchall():
broadcast_bet_match_cursor = db.cursor()
bet_match_id = bet_match['tx0_hash'] + bet_match['tx1_hash']
bet_match_status = None
# Calculate total funds held in escrow and total fee to be paid if
# the bet match is settled. Escrow less fee is amount to be paid back
# to betters.
total_escrow = bet_match['forward_quantity'] + bet_match['backward_quantity']
fee_fraction = fee_fraction_int / config.UNIT
fee = int(fee_fraction * total_escrow) # Truncate.
escrow_less_fee = total_escrow - fee
# Get known bet match type IDs.
cfd_type_id = util.BET_TYPE_ID['BullCFD'] + util.BET_TYPE_ID['BearCFD']
equal_type_id = util.BET_TYPE_ID['Equal'] + util.BET_TYPE_ID['NotEqual']
# Get the bet match type ID of this bet match.
bet_match_type_id = bet_match['tx0_bet_type'] + bet_match['tx1_bet_type']
# Contract for difference, with determinate settlement date.
if bet_match_type_id == cfd_type_id:
# Recognise tx0, tx1 as the bull, bear (in the right direction).
if bet_match['tx0_bet_type'] < bet_match['tx1_bet_type']:
bull_address = bet_match['tx0_address']
bear_address = bet_match['tx1_address']
bull_escrow = bet_match['forward_quantity']
bear_escrow = bet_match['backward_quantity']
else:
bull_address = bet_match['tx1_address']
bear_address = bet_match['tx0_address']
bull_escrow = bet_match['backward_quantity']
bear_escrow = bet_match['forward_quantity']
leverage = Fraction(bet_match['leverage'], 5040)
initial_value = bet_match['initial_value']
bear_credit = bear_escrow - (value - initial_value) * leverage * config.UNIT
bull_credit = escrow_less_fee - bear_credit
bear_credit = round(bear_credit)
bull_credit = round(bull_credit)
# Liquidate, as necessary.
if bull_credit >= escrow_less_fee or bull_credit <= 0:
if bull_credit >= escrow_less_fee:
bull_credit = escrow_less_fee
bear_credit = 0
bet_match_status = 'settled: liquidated for bull'
util.credit(db, tx['block_index'], bull_address, config.XBJ, bull_credit, action= |
capntransit/tract2council | tract2council.py | Python | gpl-3.0 | 2,643 | 0.005675 | import sys, os, json, time
from shapely.geometry import Polygon
# http://toblerity.org/shapely/manual.html
contains = {}
intersects = {}
dPoly = {}
unmatched = []
TRACTCOL = 'BoroCT2010' # rename this for 2000 census
def addPoly(coords):
polys = []
if (isinstance(coords[0][0], float)):
polys.append(Polygon(coords))
else:
for (c) in coords:
polys.extend(addPoly(c))
return polys
def inDistrict(tract):
tPoly = addPoly(tract['geometry']['coordinates'])
tractNum = tract['properties'][TRACTCOL]
intersects = set()
area = 0
intersection = {}
iap = {}
for (i) in range (0, len(tPoly)):
tractPolygon = tPoly[i]
area += tractPolygon.area
for (dn, dp) in dPoly.items():
for (p) in dp:
if (p.contains(tractPolygon)):
iap[dn] = 1
break;
elif (p.intersects(tractPolygon)):
intersects.add(dn)
if dn not in intersection:
intersection[dn] = p.intersection(tractPolygon).area
else:
intersection[dn] += p.intersection(tractPolygon).area
if (len(intersection) > 0):
for (dn, inter) in intersection.items():
iap[dn] = inter / area
return (tractNum, iap)
if __name__ == '__main__':
if (len(sys.argv) < 2):
print ("Usage: tract2council.py tract.json council.json")
exit()
tractfile = sys.argv[1]
councilfile = sys.argv[2]
for (f) in (tractfile, councilfile):
| if (not os.path.isfile(f)) | :
print ("File " + f + " is not readable")
exit()
try:
with open(tractfile) as tractfo:
tractData = json.load(tractfo)
except Exception:
print ("Unable to read tract file " + tractfile)
exit()
try:
with open(councilfile) as councilfo:
councilData = json.load(councilfo)
except Exception as e:
print ("Unable to read council file " + councilfile+": {0}".format(e))
exit()
for (district) in councilData['features']:
dn = district['properties']['CounDist']
c = district['geometry']['coordinates']
dPoly[dn] = addPoly(c)
print ("there are " + str(len(tractData['features'])) + " census tracts")
for (tract) in tractData['features']:
(tn, i) = inDistrict(tract)
intersects[tn] = i
intersectsFile = 'tracts_' + str(round(time.time())) + '.json'
with open(intersectsFile, 'w') as intersectsfo:
json.dump(intersects, intersectsfo)
|
grow/grow | grow/pods/podspec.py | Python | mit | 1,721 | 0.001162 | """Podspec helper."""
from grow.translations import locales
class Error(Exception):
def __init__(self, message):
super(Error, self).__init__(message)
self.message = message
class PodSpecParseError(Error):
pass
class PodSpec(object):
def __init__(self, yaml, pod):
yaml = yaml or {}
self.yaml = yaml
self.pod = pod
self.grow_version = yaml.get('grow_version')
_default_locale = yaml.get('localization', {}).get('default_locale', None)
self.default_locale = locales.Locale.parse(_default_locale)
self.fields = yaml
def get_config(self):
return self.yaml
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
tagged_name = '{}@'.format(name)
if tagged_name in self.fields:
return self.fields[tagged_name]
return object.__getattribute__(self, name)
def __iter__(self):
return self.yaml.__iter__()
@property
def home(self):
return self.pod.get_home_doc()
@property
def root(self):
return self.fields.get('root', '')
@property
def localization(self):
return self.fields.get('localization')
def get_locale_alias(self, locale):
"""Get the locale alias for a given locale."""
if 'localization' in self.yaml and 'aliases' in self.yaml['localization']:
aliases = self.yaml['localization']['a | liases']
| for custom_locale, babel_locale in aliases.items():
normalized_babel_locale = babel_locale.lower()
if locale == normalized_babel_locale:
return custom_locale
return locale
|
eawag-rdm/ckanext-hierarchy | ckanext/hierarchy/plugin.py | Python | agpl-3.0 | 4,054 | 0.00296 | import ckan.plugins as p
from ckanext.hierarchy.logic import action
from ckanext.hierarchy import helpers
fr | om ckan.lib.plugins import DefaultOrganizationForm
import ckan.plugins.toolkit as tk
from lucparser import LucParser
import re
import logging
import pdb
log = logging.getLogger(__name__)
# This plugin is designed to work only these versions of CKAN
p.toolkit.check_ckan_version(min_version='2.0')
class HierarchyDisplay(p.SingletonPlugin):
p.implements(p.IConfigurer, inherit=True)
p.implements(p.IActions, inherit=True)
p.implements(p.ITemplateHel | pers, inherit=True)
p.implements(p.IPackageController, inherit=True)
# IConfigurer
def update_config(self, config):
p.toolkit.add_template_directory(config, 'templates')
p.toolkit.add_template_directory(config, 'public')
p.toolkit.add_resource('public/scripts/vendor/jstree', 'jstree')
# IActions
def get_actions(self):
return {'group_tree': action.group_tree,
'group_tree_section': action.group_tree_section,
'group_tree_children':action.group_tree_children
}
# ITemplateHelpers
def get_helpers(self):
return {'group_tree': helpers.group_tree,
'group_tree_section': helpers.group_tree_section,
'group_tree_crumbs': helpers.group_tree_crumbs,
'get_allowable_parent_groups': helpers.get_allowable_parent_groups,
'render_tree': helpers.render_tree
}
# IPackageController
# Modify the search query to include the datasets from
# the children organizations in the result list
# HvW: Do this always
def before_search(self, search_params):
''' If include children selected the query string is modified '''
def _get_organizations_from_subquery(subquery):
patall = '"?([\w-]+)"?'
patwrong = 'AND|OR|NOT'
patnot = 'NOT\s+("?([\w-]+)"?)'
parentorgs = set(re.findall(patall, subquery))
parentwrong = set(re.findall(patwrong, subquery))
parentnot = set(re.findall(patnot, subquery))
parentorgs = list(parentorgs - parentwrong - parentnot)
return parentorgs
lp = LucParser()
for qtyp in ['fq', 'q']:
query = search_params.get(qtyp, None)
if query:
queryterms = lp.deparse(query)
for i, q in enumerate(queryterms):
if not isinstance(q, dict):
continue
fieldname = q.get('field')
if fieldname not in ['owner_org', 'organization']:
continue
parentgroups = _get_organizations_from_subquery(q.get('term'))
children = [tk.get_action('group_tree_children')
({}, data_dict={'id': p, 'type':'organization'})
for p in parentgroups]
childlist = [c[{'owner_org': 'id', 'organization':
'name'}[fieldname]]
for child in children for c in child]
if childlist:
childsearch = ' OR ' + ' OR '.join(childlist)
search_params[qtyp] = lp.add_to_query(
search_params[qtyp],
childsearch, fieldname=fieldname)
return search_params
class HierarchyForm(p.SingletonPlugin, DefaultOrganizationForm):
p.implements(p.IGroupForm, inherit=True)
# IGroupForm
def group_types(self):
return ('organization',)
def group_controller(self):
return 'organization'
def setup_template_variables(self, context, data_dict):
from pylons import tmpl_context as c
model = context['model']
group_id = data_dict.get('id')
c.allowable_parent_groups = helpers.get_allowable_parent_groups(group_id)
|
andersk/zulip | zerver/lib/transfer.py | Python | apache-2.0 | 3,678 | 0.001631 | import logging
import multiprocessing
import os
from mimetypes import guess_type
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from zerver.lib.avatar_hash import user_avatar_path
from zerver.lib.upload import S3UploadBackend, upload_image_to_s3
from zerver.models import Attachment, RealmEmoji, UserProfile
s3backend = S3UploadBackend()
def transfer_uploads_to_s3(processes: int) -> None:
# TODO: Eventually, we'll want to add realm icon and logo
transfer_avatars_to_s3(processes)
transfer_message_files_to_s3(processes)
transfer_emoji_to_s3(processes)
def _transfer_avatar_to_s3(user: UserProfile) -> None:
avatar_path = user_avatar_path(user)
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path) + ".original"
try:
with open(file_path, "rb") as f:
s3backend.upload_avatar_image(f, user, user)
logging.info("Uploaded avatar for %s in realm %s", user.id, user.realm.name)
except FileNotFoundError:
pass
def transfer_avatars_to_s3(processes: int) -> None:
users = list(UserProfile.objects.all())
if processes == 1:
for user in users:
_transfer_avatar_to_s3(user)
else: # nocoverage
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(_transfer_avatar_to_s3, users):
pass
def _transfer_message_files_to_s3(at | tachment: Attachment) -> None:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", attachment.path_id)
try:
with open(file_path, "rb") as f:
guessed_type = guess_type(attachment.file_name)[0]
upload_image_to_s3(
s3backend.uploads_bucket,
attachment.path_id,
guessed_type,
attachment.owner,
f.read(),
)
logging.info("Uploaded message file in path % | s", file_path)
except FileNotFoundError: # nocoverage
pass
def transfer_message_files_to_s3(processes: int) -> None:
attachments = list(Attachment.objects.all())
if processes == 1:
for attachment in attachments:
_transfer_message_files_to_s3(attachment)
else: # nocoverage
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(_transfer_message_files_to_s3, attachments):
pass
def _transfer_emoji_to_s3(realm_emoji: RealmEmoji) -> None:
if not realm_emoji.file_name or not realm_emoji.author:
return # nocoverage
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=realm_emoji.realm.id,
emoji_file_name=realm_emoji.file_name,
)
emoji_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", emoji_path) + ".original"
try:
with open(emoji_path, "rb") as f:
s3backend.upload_emoji_image(f, realm_emoji.file_name, realm_emoji.author)
logging.info("Uploaded emoji file in path %s", emoji_path)
except FileNotFoundError: # nocoverage
pass
def transfer_emoji_to_s3(processes: int) -> None:
realm_emojis = list(RealmEmoji.objects.filter())
if processes == 1:
for realm_emoji in realm_emojis:
_transfer_emoji_to_s3(realm_emoji)
else: # nocoverage
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(_transfer_emoji_to_s3, realm_emojis):
pass
|
apple/swift-lldb | packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setExceptionBreakpoints.py | Python | apache-2.0 | 2,075 | 0.000964 | """
Test lldb-vscode setBreakpoints request
"""
from __future__ | import print_function
import unittest2
import vscode
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import lldbvscode_testcase
class TestVSCode_setExceptionBreakpoints(
lldbvscode_testcase.VSCodeTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfWindows
@skipIfDarwin # Skip | this test for now until we can figure out why tings aren't working on build bots
@expectedFailureNetBSD
@no_debug_info_test
def test_functionality(self):
'''Tests setting and clearing exception breakpoints.
This packet is a bit tricky on the debug adaptor side since there
is no "clear exception breakpoints" packet. Exception breakpoints
are set by sending a "setExceptionBreakpoints" packet with zero or
more exception filters. If exception breakpoints have been set
before, any exising breakpoints must remain set, and any new
breakpoints must be created, and any breakpoints that were in
previous requests and are not in the current request must be
removed. This exception tests this setting and clearing and makes
sure things happen correctly. It doesn't test hitting breakpoints
and the functionality of each breakpoint, like 'conditions' and
x'hitCondition' settings.
'''
# Visual Studio Code Debug Adaptors have no way to specify the file
# without launching or attaching to a process, so we must start a
# process in order to be able to set breakpoints.
program = self.getBuildArtifact("a.out")
self.build_and_launch(program)
filters = ['cpp_throw', 'cpp_catch']
response = self.vscode.request_setExceptionBreakpoints(filters)
if response:
self.assertTrue(response['success'])
self.continue_to_exception_breakpoint('C++ Throw')
self.continue_to_exception_breakpoint('C++ Catch')
|
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nstimer.py | Python | apache-2.0 | 10,753 | 0.041012 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nstimer(base_resource) :
""" Configuration for Timer resource. """
def __init__(self) :
self._name = ""
self._interval = 0
self._unit = ""
self._comment = ""
self._newname = ""
self.___count = 0
@property
def name(self) :
ur"""Timer name.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Timer name.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def interval(self) :
ur"""The frequency at which the policies bound to this timer are invoked. The minimum value is 20 msec. The maximum value is 20940 in seconds and 349 in minutes.<br/>Default value: 5<br/>Minimum length = 1<br/>Maximum length = 20940000.
"""
try :
return self._interval
except Exception as e:
raise e
@interval.setter
def interval(self, interval) :
ur"""The frequency at which the policies bound to this timer are invoked. The minimum value is 20 msec. The maximum value is 20940 in seconds and 349 in minutes.<br/>Default value: 5<br/>Minimum length = 1<br/>Maximum length = 20940000
"""
try :
self._interval = interval
except Exception as e:
raise e
@property
def unit(self) :
ur"""Timer interval unit.<br/>Default value: SEC<br/>Possible values = SEC, MIN.
"""
try :
return self._unit
except Exception as e:
raise e
@unit.setter
def unit(self, unit) :
ur"""Timer interval unit.<br/>Default value: SEC<br/>Possible values = SEC, MIN
"""
try :
self._unit = unit
except Exception as e:
raise e
@property
def comment(se | lf) :
ur"""Comments associated with this timer.
"""
try :
return self._comm | ent
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
ur"""Comments associated with this timer.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def newname(self) :
ur"""The new name of the timer.<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
ur"""The new name of the timer.<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nstimer_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nstimer
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add nstimer.
"""
try :
if type(resource) is not list :
addresource = nstimer()
addresource.name = resource.name
addresource.interval = resource.interval
addresource.unit = resource.unit
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].interval = resource[i].interval
addresources[i].unit = resource[i].unit
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete nstimer.
"""
try :
if type(resource) is not list :
deleteresource = nstimer()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update nstimer.
"""
try :
if type(resource) is not list :
updateresource = nstimer()
updateresource.name = resource.name
updateresource.interval = resource.interval
updateresource.unit = resource.unit
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].interval = resource[i].interval
updateresources[i].unit = resource[i].unit
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of nstimer resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nstimer()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
unsetresource.interval = resource.interval
unsetresource.unit = resource.unit
unsetresource.comment = resource.comment
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
unsetresources[i].interval = resource[i].interval
unsetresources[i].unit = resource[i].unit
unsetresources[i].comment = resource[i].comment
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, |
LordSputnik/mutagen | tests/test_oggspeex.py | Python | gpl-2.0 | 2,072 | 0.000965 | import os
import shutil
from mutagen._compat import cBytesIO
from mutagen.ogg import OggPage
from mutagen.oggspeex import OggSpeex, OggSpeexInfo, delete
from tests import add
from tests.test_ogg import TOggFileType
from tempfile import mkstemp
class TOggSpeex(TOggFileType):
Kind = OggSpeex
def setUp(self):
original = os.path.join("tests", "data", "empty.spx")
fd, self.filename = mkstemp(suffix='.ogg')
os.close(fd)
shutil.copy(original, self.filename)
self.audio = self.Kind(self.filename)
def test_module_delete(self):
delete(self.filename)
self.scan_file()
self.failIf(OggSpeex(self.filename).tags)
def test_channels(self):
self.failUnlessEqual(2, self.audio.info.channels)
def test_sample_rate(self):
self.failUnlessEqual(44100, self.audio.info.sample_rate)
def test_bitrate(self):
self.failUnlessEqual(0, self.audio.info.bitrate)
def test_invalid_not_first(self):
page = OggPage(open(self.filename, "rb"))
page.first = | False
self.failUnlessRaises(IOError, OggSpeexInfo, cBytesIO(page.write()))
def test_vendor(self):
self.failUnless(
self.audio.tags.vendor.startswith("Encoded with Speex 1.1.12"))
self.failUnlessRaises(KeyError | , self.audio.tags.__getitem__, "vendor")
def test_not_my_ogg(self):
fn = os.path.join('tests', 'data', 'empty.oggflac')
self.failUnlessRaises(IOError, type(self.audio), fn)
self.failUnlessRaises(IOError, self.audio.save, fn)
self.failUnlessRaises(IOError, self.audio.delete, fn)
def test_multiplexed_in_headers(self):
shutil.copy(
os.path.join("tests", "data", "multiplexed.spx"), self.filename)
audio = self.Kind(self.filename)
audio.tags["foo"] = ["bar"]
audio.save()
audio = self.Kind(self.filename)
self.failUnlessEqual(audio.tags["foo"], ["bar"])
def test_mime(self):
self.failUnless("audio/x-speex" in self.audio.mime)
add(TOggSpeex)
|
zhaohuaw/odoo-infrastructure | addons/infrastructure/database_type.py | Python | agpl-3.0 | 2,270 | 0.003084 | # -*- coding: utf-8 -*-
##############################################################################
#
# Infrastructure
# Copyright (C) 2014 Ingenieria ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class database_type(osv.osv):
""""""
_name = 'infrastructure.database_type'
_description = 'database_type'
_columns = {
'name': fields.char(string='Name', required=True),
'prefix': fields.char(string='Prefix', required=True, size=4),
'url_prefix': fields.char(string='URL Prefix'),
'automatic_drop': fields.boolean(string='Automatic Drop'),
'automatic_drop_days': fields.integer(string='Automatic Drop Days'),
'protect_db': fields.boolean(string='Protect DBs?'),
'color': fields.integer(string='Color'),
'automatic_deactivation': fields.boolean(string='Atumatic Deactivation?'),
'auto_deactivation_days': fields.integer(string='Automatic Drop Days'),
| 'url_example': fields.char(string='URL Example | '),
'bd_name_example': fields.char(string='BD Name Example'),
'db_back_up_policy_ids': fields.many2many('infrastructure.db_back_up_policy', 'infrastructure_database_type_ids_db_back_up_policy_ids_rel', 'database_type_id', 'db_back_up_policy_id', string='Suggested Backup Policies'),
}
_defaults = {
}
_constraints = [
]
database_type()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
avehtari/GPy | GPy/kern/src/ODE_t.py | Python | bsd-3-clause | 7,626 | 0.026226 | from .kern import Kern
from ...core.parameterization import Param
from paramz.transformations import Logexp
import numpy as np
from .independent_outputs import index_to_slices
class ODE_t(Kern):
def __init__(self, input_dim, a=1., c=1.,variance_Yt=3., lengthscale_Yt=1.5,ubias =1., active_dims=None, name='ode_st'):
assert input_dim ==2, "only defined for 2 input dims"
super(ODE_t, self).__init__(input_dim, active_dims, name)
self.variance_Yt = Param('variance_Yt', variance_Yt, Logexp())
self.lengthscale_Yt = Param('lengthscale_Yt', lengthscale_Yt, Logexp())
self.a= Param('a', a, Logexp())
self.c = Param('c', c, Logexp())
self.ubias = Param('ubias', ubias, Logexp())
self.link_parameters(self.a, self.c, self.variance_Yt, self.lengthscale_Yt,self.ubias)
def K(self, X, X2=None):
"""Compute the covariance matrix between X and X2."""
X,slices = X[:,:-1],index_to_slices(X[:,-1])
if X2 is None:
X2,slices2 = X,slices
K = np.zeros((X.shape[0], X.shape[0]))
else:
X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1])
K = np.zeros((X.shape[0], X2.shape[0]))
tdist = (X[:,0][:,None] - X2[:,0][None,:])**2
ttdist = (X[:,0][:,None] - X2[:,0][None,:])
vyt = self.variance_Yt
lyt=1/(2*self.lengthscale_Yt)
a = -self.a
c = self.c
kyy = lambda tdist: np.exp(-lyt*(tdist))
k1 = lambda tdist: (2*lyt - 4*lyt**2 *(tdist) )
k4 = lambda tdist: 2*lyt*(tdist)
for i, s1 in enumerate(slices):
for j, s2 in enumerate(slices2):
for ss1 in s1:
for ss2 in s2:
if i==0 and j==0:
K[ss1,ss2] = vyt*kyy(tdist[ss1,ss2])
elif i==0 and j==1:
K[ss1,ss2] = (k4(ttdist[ss1,ss2])+1)*vyt*kyy(tdist[ss1,ss2])
#K[ss1,ss2] = (2*lyt*(ttdist[ss1,ss2])+1)*vyt*kyy(tdist[ss1,ss2])
elif i==1 and j==1:
K[ss1,ss2] = ( k1(tdist[ss1,ss2]) + 1. )*vyt* kyy(tdist[ss1,ss2])+self.ubias
else:
K[ss1,ss2] = (-k4(ttdist[ss1,ss2])+1)*vyt*kyy(tdist[ss1,ss2])
#K[ss1,ss2] = (-2*lyt*(ttdist[ss1,ss2])+1)*vyt*kyy(tdist[ss1,ss2])
#stop
return K
def Kdiag(self, X):
vyt = self.variance_Yt
lyt = 1./(2*self.lengthscale_Yt)
a = -self.a
c = self.c
k1 = (2*lyt )*vyt
Kdiag = np.zeros(X.shape[0])
slices = index_to_slices(X[:,-1])
for i, ss1 in enumerate(slices):
for s1 in ss1:
if i==0:
Kdiag[s1]+= vyt
elif i==1:
#i=1
Kdiag[s1]+= k1 + vyt+self.ubias
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
raise ValueError("invalid input/output index")
return Kdiag
def update_gradients_full(self, dL_dK, X, X2=None):
"""derivative of the covariance matrix with respect to the parameters."""
X,slices = X[:,:-1],index_to_slices(X[:,-1])
if X2 is None:
X2,slices2 = X,slices
K = np.zeros((X.shape[0], X.shape[0]))
else:
X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1])
vyt = self.variance_Yt
lyt = 1./(2*self.lengthscale_Yt)
tdist = (X[:,0][:,None] - X2[:,0][None,:])**2
ttdist = (X[:,0][:,None] - X2[:,0][None,:])
#rdist = [tdist,xdist]
rd=tdist.shape[0]
dka = np.zeros([rd,rd])
dkc = np.zeros([rd,rd])
dkYdvart = np.zeros([rd,rd])
dkYdlent = np.zeros([rd,rd])
dkdubias = np.zeros([rd,rd])
kyy = lambda tdist: np.exp(-lyt*(tdist))
dkyydlyt = lambda tdist: kyy(tdist)*(-tdist)
k1 = lambda tdist: (2*lyt - 4*lyt**2 * (tdist) )
k4 = lambda ttdist: 2*lyt*(ttdist)
dk1dlyt = lambda tdist: 2. - 4*2.*lyt*tdist
dk4dlyt = lambda ttdist: 2*(ttdist)
for i, s1 in enumerate(slices):
for j, s2 in enumerate(slices2):
for ss1 in s1:
for ss2 in s2:
if i==0 and j==0:
dkYdvart[ss1,ss2] = kyy(tdist[ss1,ss2])
dkYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])
dkdubias[ss1,ss2] = 0
elif i==0 and j==1:
dkYdvart[ss1,ss2] = (k4(ttdist[ss1,ss2])+1)*kyy(tdist[ss1,ss2])
#dkYdvart[ss1,ss2] = ((2*lyt*ttdist[ss1,ss2])+1)*kyy(tdist[ss1,ss2])
dkYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])* (k4(ttdist[ss1,ss2])+1.)+\
vyt*kyy(tdist[ss1,ss2])*(dk4dlyt(ttdist[ss1,ss2]))
#dkYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])* (2*lyt*(ttdist[ss1,ss2])+1.)+\
#vyt*kyy(tdist[ss1,ss2])*(2*ttdist[ss1,ss2])
dkdubias[ss1,ss2] = 0
elif i==1 and j==1:
dkYdvart[ss1,ss2] = (k1(tdist[ss1,ss2]) + 1. )* kyy(tdist[ss1,ss2])
dkYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])*( k1(tdist[ss1,ss2]) + 1. ) +\
vyt*kyy(tdist[ss1,ss2])*dk1dlyt(tdist[ss1,ss2])
dkdubias[ss1,ss2] = 1
else:
dkYdvart[ss1,ss2] = (-k4(ttdist[ss1,ss2])+1)*kyy(tdist[ss1,ss2])
#dkYdvart[ss1,ss2] = (-2*lyt*(ttdist[ss1,ss2])+1)*kyy(tdist[ss1,ss2])
d | kYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])* (-k4(ttdist[ss1,ss2])+1.)+\
vyt*kyy(tdist[ss1,ss2])*(-dk4dlyt(ttdist[ss1,ss2]) )
dkdubias[ss1,ss2] = 0
| #dkYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])* (-2*lyt*(ttdist[ss1,ss2])+1.)+\
#vyt*kyy(tdist[ss1,ss2])*(-2)*(ttdist[ss1,ss2])
self.variance_Yt.gradient = np.sum(dkYdvart * dL_dK)
self.lengthscale_Yt.gradient = np.sum(dkYdlent*(-0.5*self.lengthscale_Yt**(-2)) * dL_dK)
self.ubias.gradient = np.sum(dkdubias * dL_dK)
|
mbkumar/pymatgen | pymatgen/io/tests/test_cif.py | Python | mit | 43,823 | 0.000707 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
import numpy as np
from pymatgen.io.cif import CifParser, CifWriter, CifBlock
from pymatgen.io.vasp.inputs import Poscar
from pymatgen import Element, Specie, Lattice, Structure, Composition, DummySpecie
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.util.testing import PymatgenTest
from pymatgen.electronic_structure.core import Magmom
try:
import pybtex
except ImportError:
pybtex = None
class CifBlockTest(PymatgenTest):
def test_to_string(self):
with open(self.TEST_FILES_DIR / 'Graphite.cif') as f:
s = f.read()
c = CifBlock.from_string(s)
cif_str_2 = str(CifBlock.from_string(str(c)))
cif_str = """data_53781-ICSD
_database_code_ICSD 53781
_audit_creation_date 2003-04-01
_audit_update_record 2013-02-01
_chemical_name_systematic Carbon
_chemical_formula_structural C
_chemical_formula_sum C1
_chemical_name_structure_type Graphite(2H)
_chemical_name_mineral 'Graphite 2H'
_exptl_crystal_density_diffrn 2.22
_publ_section_title 'Structure of graphite'
loop_
_citation_id
_citation_journal_full
_citation_year
_citation_journal_volume
_citation_page_first
_citation_page_last
_citation_journal_id_ASTM
primary 'Physical Review (1,1893-132,1963/141,1966-188,1969)'
1917 10 661 696 PHRVAO
loop_
_publ_author_name
'Hull, A.W.'
_cell_length_a 2.47
_cell_length_b 2.47
_cell_length_c 6.8
_cell_angle_alpha 90.
_cell_angle_beta 90.
_cell_angle_gamma 120.
_cell_volume 35.93
_cell_formula_units_Z 4
_symmetry_space_group_name_H-M 'P 63/m m c'
_symmetry_Int_Tables_number 194
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, x-y, -z+1/2'
2 '-x+y, y, -z+1/2'
3 '-y, -x, -z+1/2'
4 '-x+y, -x, -z+1/2'
5 '-y, x-y, -z+1/2'
6 'x, y, -z+1/2'
7 '-x, -x+y, z+1/2'
8 'x-y, -y, z+1/2'
9 'y, x, z+1/2'
10 'x-y, x, z+1/2'
11 'y, -x+y, z+1/2'
12 '-x, -y, z+1/2'
13 '-x, -x+y, -z'
14 'x-y, -y, -z'
15 'y, x, -z'
16 'x-y, x, -z'
17 'y, -x+y, -z'
18 '-x, -y, -z'
19 'x, x-y, z'
20 '-x+y, y, z'
21 '-y, -x, z'
22 '-x+y, -x, z'
23 '-y, x-y, z'
24 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
C0+ 0
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_symmetry_multiplicity
_atom_site_Wyckoff_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_B_iso_or_equiv
_atom_site_occupancy
_atom_site_attached_hydrogens
C1 C0+ 2 b 0 0 0.25 . 1. 0
C2 C0+ 2 c 0.3333 0.6667 0.25 . 1. 0"""
for l1, l2, l3 in zip(str(c).split("\n"), cif_str.split("\n"),
cif_str_2.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
self.assertEqual(l2.strip(), l3.strip())
def test_double_quotes_and_underscore_data(self):
cif_str = """data_test
_symmetry_space_group_name_H-M "P -3 m 1"
_thing '_annoying_data'"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_symmetry_space_group_name_H-M"], "P -3 m 1")
self.assertEqual(cb["_thing"], "_annoying_data")
self.assertEqual(str(cb), cif_str.replace('"', "'"))
def test_double_quoted_data(self):
cif_str = """data_test
_thing ' '_annoying_data''
_other " "_more_annoying_data""
_more ' "even more" ' """
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " '_annoying_data'")
self.assertEqual(cb["_other"], ' "_more_annoying_data"')
self.assertEqual(cb["_more"], ' "even more" ')
def test_nested_fake_multiline_quotes(self):
cif_str = """data_test
_thing
;
long quotes
;
still in the quote
;
actually going to end now
;"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " long quotes ; still in the quote"
" ; actually going to end now")
def test_long_loop(self):
data = {'_stuff1': ['A' * 30] * 2,
'_stuff2': ['B' * 30] * 2,
'_stuff3': ['C' * 30] * 2}
loops = [['_stuff1', '_stuff2', '_stuff3']]
cif_str = """data_test
loop_
_stuff1
_stuff2
_stuff3
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"""
self.assertEqual(str(CifBlock(data, loops, 'test')), cif_str)
class CifIOTest(PymatgenTest):
def test_CifParser(self):
parser = CifParser(self.TEST_FILES_DIR / 'LiFePO4.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li4 Fe4 P4 O16",
"Incorrectly parsed cif.")
parser = CifParser(self.TEST_FILES_DIR / 'V2O3.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "V4 O6")
bibtex_str = """
@article{cifref0,
author = "Andersson, G.",
title = "Studies on vanadium oxides. I. Phase analysis",
journal = "Acta Chemica Scandinavica (1-27,1973-42,1988)",
volume = "8",
year = "1954",
pages = "1599--1606"
}
"""
self.assertEqual(parser.get_bibtex_string().strip(), bibtex_str.strip())
parser = CifParser(self.TEST_FILES_DIR / 'Li2O.cif')
prim = parser.get_structures(True)[0]
self.assertEqual(prim.formula, "Li2 O1")
conv = parser.get_structures(False)[0]
self.assertEqual(conv.formula, "Li8 O4")
# test for disordered structures
parser = CifParser(self.TEST_FILES_DIR / 'Li10GeP2S12.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li20.2 Ge2.06 P3.94 S24",
"Incorrectly parsed cif.")
cif_str = """#\#CIF1.1
##########################################################################
# Crystallographic Information Format file
# Produced by PyCifRW module
#
# This is a CIF file. CIF has been adopted by the International
# Union of Crystallography as the standard for data archiving and
# transmission.
#
# For information on this file format, follow the CIF links at
# http://www.iucr.org
##########################################################################
data_FePO4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 10.4117668699
_cell_length_b 6.06717187997
_cell_length_c 4.75948953998
loop_ # sometimes this is in a loop (incorrectly)
_cell_angle_alpha
91.0
_cell_angle_beta 92.0
_cell_angle_gamma 93.0
_chemical_name_systematic 'Generated by pymatgen'
_symmetry_Int_Tables_number 1
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_ | atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_attached_hydrogens
_atom_site_B_iso_or_equiv
_atom_site_occupancy
Fe Fe1 | 1 0.218728 0.750000 0.474867 0 . 1
Fe JJ2 1 0.281272 0.250000 0.974867 0 . 1
# there's a typo here, parser should read the symbol from the
# _atom_site_type_symbol
Fe Fe3 1 0.718728 0.750000 0.025133 0 . 1
Fe Fe4 1 0.781272 0.250000 0.525133 0 . 1
P P5 1 0.094613 0.250000 0.418243 0 . 1
P P6 1 0.405387 0.750000 0.918243 0 . 1
P P7 1 0.594613 0.250000 0.081757 0 . 1
P P8 1 0.905387 0.750000 0.581757 0 . 1
O O9 1 0.043372 0.750000 0.707138 0 . 1
O O10 1 0.096642 0.250000 0.741320 0 . 1
O O11 1 0.165710 0.046072 0.285384 0 . 1
O O12 1 0.1657 |
ghorejsi/hunt-the-wump | tests/map_tests.py | Python | gpl-2.0 | 256 | 0.003906 | __author__ = 'petastream'
import unittest
from game | .map impo | rt Map
class TestMap(unittest.TestCase):
def setUp(self):
self.map = Map()
def test_generate(self):
self.map.generate()
if __name__ == "__main__":
unittest.main() |
mbkumar/pydii | examples/NiAl_mp-1487/gen_def_energy.py | Python | mit | 9,115 | 0.012946 | #!/usr/bin/env python
"""
This file computes the raw defect energies (for vacancy and antisite defects)
by parsing the vasprun.xml files in the VASP DFT calculations
for binary intermetallics, where the meta data is in the folder name
"""
#from __future__ import unicode_literals
from __future__ import division
__author__ = "Bharat Medasani"
__data__ = "Sep 14, 2014"
import os
import sys
import glob
from argparse import ArgumentParser
from pymatgen.matproj.rest import MPRester
from monty.serialization import dumpfn
from monty.json import MontyEncoder
from pymatgen.serializers.json_coders import pmg_dump
from pymatgen.io.vaspio.vasp_output import Vasprun
def solute_def_parse_energy(mpid, solute, mapi_key=None):
if not mpid:
print ("============\nERROR: Provide an mpid\n============")
return
if not solute:
print ("============\nERROR: Provide solute element\n============")
return
if not mapi_key:
with MPRester() as mp:
structure = mp.get_structure_by_material_id(mpid)
else:
with MPRester(ampi_key) as mp:
structure = mp.get_structure_by_material_id(mpid)
energy_dict = {}
solutes = []
def_folders = glob.glob(os.path.join(
mpid,"solute*subspecie-{}".format(solute)))
def_folders += glob.glob(os.path.join(mpid,"bulk"))
for defdir in def_folders:
fldr_name = os.path.split(defdir)[1]
vr_file = os.path.join(defdir,'vasprun.xml')
if not os.path.exists(vr_file):
print (fldr_name, ": vasprun.xml doesn't exist in the folder. " \
"Abandoning parsing of energies for {}".format(mpid))
break # Further processing for the mpid is not useful
try:
vr = Vasprun(vr_file)
except:
print (fldr_name, ":Failure, couldn't parse vaprun.xml file. "
"Abandoning parsing of energies for {}".format(mpid))
break
if not vr.converged:
print (fldr_name, ": Vasp calculation not converged. "
"Abandoning parsing of energies for {}".format(mpid))
break # Further processing for the mpid is not useful
fldr_fields = fldr_name.split("_")
if 'bulk' in fldr_fields:
bulk_energy = vr.final_energy
bulk_sites = vr.structures[-1].num_sites
elif 'solute' in fldr_fields:
site_index = int(fldr_fields[1])
site_multiplicity = int(fldr_fields[2].split("-")[1])
site_specie = fldr_fields[3].split("-")[1]
substitution_specie = fldr_fields[4].split("-")[1]
energy = vr.final_energy
solutes.append({'site_index':site_index,
'site_specie':site_specie,'energy':energy,
'substitution_specie':substitution_specie,
'site_multiplicity':site_multiplicity
})
else:
if not solutes:
print "Solute folders do not exist"
return {}
print ("Solute {} calculations successful for {}".format(solute,mpid))
for solute in solutes:
solute_flip_energy = solute['energy']-bulk_energy
solute['energy'] = solute_flip_energy
energy_dict[mpid] = {'solutes':solutes}
return energy_dict
return {} # Return Null dict due to failure
def vac_antisite_def_parse_energy(mpid, mapi_key=None):
if not mpid:
print ("============\nERROR: Provide an mpid\n============")
return
if not mapi_key:
with MPRester() as mp:
structure = mp.get_structure_by_material_id(mpid)
else:
with MPRester(ampi_key) as mp:
structure = mp.get_structure_by_material_id(mpid)
energy_dict = {}
antisites = []
vacancies = []
def_folders = glob.glob(os.path.join(mpid,"vacancy*"))
def_folders += glob.glob(os.path.join(mpid,"antisite*"))
def_folders += glob.glob(os.path.join(mpid,"bulk"))
for defdir in def_folders:
fldr_name = os.path.split(defdir)[1]
vr_file = os.path.join(defdir,'vasprun.xml')
if not os.path.exists(vr_file):
print (fldr_name, ": vasprun.xml doesn't exist in the folder. " \
"Abandoning parsing of energies for {}".format(mpid))
break # Further processing for the mpid is not useful
try:
vr = Vasprun(vr_file)
except:
print (fldr_name, ":Failure, couldn't parse vaprun.xml file. "
"Abandoning parsing of energies for {}".format(mpid))
break
if not vr.converged:
print (fldr_name, ": Vasp calculation not converged. "
"Abandoning parsing of energies for {}".format(mpid))
break # Further processing for the mpid is not useful
fldr_fields = fldr_name.split("_")
if 'bulk' in fldr_fields:
bulk_energy = vr.final_energy
bulk_sites = vr.structures[-1].num_sites
elif 'vacancy' in fldr_fields:
site_index = int(fldr_fields[1])
site_multiplicity = int(fldr_fields[2].split("-")[1])
site_specie = fldr_fields[3].split("-")[1]
energy = vr.final_energy
vacancies.append({'site_index':site_index,
'site_specie':site_specie,'energy':energy,
'site_multiplicity':site_multiplicity
})
elif 'antisite' in fldr_fields:
site_index = int(fldr_fields[1])
site_multiplicity = int(fldr_fields[2].split("-")[1])
site_specie = fldr_fields[3].split("-")[1]
substitution_specie = fldr_fields[4].split("-")[1]
energy = vr.final_energy
antisites.append({'site_index':site_index,
'site_specie':site_specie,'energy':energy,
'substitution_specie':substitution_specie,
'site_multiplicity':site_multiplicity
})
else:
print "All calculations successful for ", mpid
e0 = bulk_energy/bulk_sites*structure.num_sites
for vac in vacancies:
vac_flip_energy = vac['energy']-bulk_energy
vac['energy'] = vac_flip_energy
for antisite in antisites:
as_flip_energy = antisite['energy']-bulk_energy
antisite['energy'] = as_flip_energy
energy_dict[unicode(mpid)] = {u"structure":structure,
'e0':e0,'vacancies':vacancies,'antisites':antisites}
return energy_dict
return {} # Return Null dict due to failure
def im_vac_antisite_def_energy_parse():
m_description = 'Command to parse vacancy and antisite defect ' \
'energies for intermetallics from the VASP DFT ' \
'calculations.'
parser = ArgumentParser(description=m_description)
parser.add_argument("--mpid",
type=str.lower,
help="Materials Project id of the intermetallic structure.\n" \
"For more info on Materials Project, please refer to " \
"www.materialsproj | ect.org")
parser.add_argument("--mapi_key",
default = None,
help="Your Materials Project REST API key.\n" \
"For more info, please refer to " \
"www.materialsproject.org/opne")
args = parser.parse_args()
print args
energy_dict = vac_antisite_def_parse_energy(args.mpid, args.mapi_key)
print type(energy_dict)
for key,value in energy_dict.items():
| print key
print type(key), type(value)
for key2, val2 in value.items():
print type(key2), type(val2)
if energy_dict:
fl_nm = args.mpid+'_raw_defect_energy.json'
dumpfn(energy_dict, fl_nm, cls=MontyEncoder, indent=2)
def im_sol_sub_def_energy_parse():
m_description = 'Command to parse solute substitution defect ' \
'energies for intermetallics from the VASP DFT ' \
'calculations.'
parser = ArgumentParser(description=m_description)
parser.add_argum |
ezequielo/diff-cover | diff_cover/tests/test_snippets.py | Python | apache-2.0 | 9,261 | 0.000216 | from __future__ import unicode_literals
import mock
import os
import tempfile
from pygments.token import Token
from diff_cover.snippets import Snippet
from diff_cover.tests.helpers import load_fixture,\
fixture_path, assert_long_str_equal, unittest
import six
class SnippetTest(unittest.TestCase):
SRC_TOKENS = [
(Token.Comment, u'# Test source'),
(Token.Text, u'\n'),
(Token.Keyword, u'def'),
(Token.Text, u' '),
(Token.Name.Function, u'test_func'),
(Token.Punctuation, u'('),
(Token.Name, u'arg'),
(Token.Punctuation, u')'),
(Token.Punctuation, u':'),
(Token.Text, u'\n'),
(Token.Text, u' '),
(Token.Keyword, u'print'),
(Token.Text, u' '),
(Token.Name, u'arg'),
(Token.Text, u'\n | '),
(Token.Text, u' '),
(Token.Keyword, u'return'),
(Tok | en.Text, u' '),
(Token.Name, u'arg'),
(Token.Text, u' '),
(Token.Operator, u'+'),
(Token.Text, u' '),
(Token.Literal.Number.Integer, u'5'),
(Token.Text, u'\n'),
]
FIXTURES = {
'style': 'snippet.css',
'default': 'snippet_default.html',
'invalid_violations': 'snippet_invalid_violations.html',
'no_filename_ext': 'snippet_no_filename_ext.html',
'unicode': 'snippet_unicode.html',
}
def test_style_defs(self):
style_str = Snippet.style_defs()
expected_styles = load_fixture(self.FIXTURES['style']).strip()
# Check that a sample of the styles are present
# (use only a sample to make the test more robust
# against Pygment changes).
for expect_line in expected_styles.split('\n'):
self.assertIn(expect_line, style_str)
def test_format(self):
self._assert_format(
self.SRC_TOKENS, 'test.py',
4, [4, 6], self.FIXTURES['default']
)
def test_format_with_invalid_start_line(self):
for start_line in [-2, -1, 0]:
with self.assertRaises(ValueError):
Snippet('# test', 'test.py', start_line, [])
def test_format_with_invalid_violation_lines(self):
# Violation lines outside the range of lines in the file
# should be ignored.
self._assert_format(
self.SRC_TOKENS, 'test.py',
1, [-1, 0, 5, 6],
self.FIXTURES['invalid_violations']
)
def test_no_filename_ext(self):
# No filename extension: should default to text lexer
self._assert_format(
self.SRC_TOKENS, 'test',
4, [4, 6],
self.FIXTURES['no_filename_ext']
)
def test_unicode(self):
unicode_src = [(Token.Text, u'var = \u0123 \u5872 \u3389')]
self._assert_format(
unicode_src, 'test.py',
1, [], self.FIXTURES['unicode']
)
def _assert_format(self, src_tokens, src_filename,
start_line, violation_lines,
expected_fixture):
snippet = Snippet(src_tokens, src_filename,
start_line, violation_lines)
result = snippet.html()
expected_str = load_fixture(expected_fixture, encoding='utf-8')
assert_long_str_equal(expected_str, result, strip=True)
self.assertTrue(isinstance(result, six.text_type))
class SnippetLoaderTest(unittest.TestCase):
def setUp(self):
"""
Create a temporary source file.
"""
_, self._src_path = tempfile.mkstemp()
# Path tool should not be aware of testing command
path_mock = mock.patch('diff_cover.violations_reporter.GitPathTool').start()
path_mock.absolute_path = lambda path: path
path_mock.relative_path = lambda path: path
def tearDown(self):
"""
Delete the temporary source file.
"""
os.remove(self._src_path)
mock.patch.stopall()
def test_one_snippet(self):
self._init_src_file(10)
violations = [2, 3, 4, 5]
expected_ranges = [(1, 9)]
self._assert_line_range(violations, expected_ranges)
def test_multiple_snippets(self):
self._init_src_file(100)
violations = [30, 31, 32, 35, 36, 60, 62]
expected_ranges = [(26, 40), (56, 66)]
self._assert_line_range(violations, expected_ranges)
def test_no_lead_line(self):
self._init_src_file(10)
violations = [1, 2, 3]
expected_ranges = [(1, 7)]
self._assert_line_range(violations, expected_ranges)
def test_no_lag_line(self):
self._init_src_file(10)
violations = [9, 10]
expected_ranges = [(5, 10)]
self._assert_line_range(violations, expected_ranges)
def test_one_line_file(self):
self._init_src_file(1)
violations = [1]
expected_ranges = [(1, 1)]
self._assert_line_range(violations, expected_ranges)
def test_empty_file(self):
self._init_src_file(0)
violations = [0]
expected_ranges = []
self._assert_line_range(violations, expected_ranges)
def test_no_violations(self):
self._init_src_file(10)
violations = []
expected_ranges = []
self._assert_line_range(violations, expected_ranges)
def test_end_range_on_violation(self):
self._init_src_file(40)
# With context, the range for the snippet at 28 is 33
# Expect that the snippet expands to include the violation
# at the border.
violations = [28, 33]
expected_ranges = [(24, 37)]
self._assert_line_range(violations, expected_ranges)
def _compare_snippets_html_output(self, filename, violations, expected_out_filename):
# Need to be in the fixture directory
# so the source path is displayed correctly
old_cwd = os.getcwd()
self.addCleanup(lambda: os.chdir(old_cwd))
os.chdir(fixture_path(''))
# One higher-level test to make sure
# the snippets are being rendered correctly
snippets_html = '\n\n'.join(
Snippet.load_snippets_html(filename, violations)
)
# Load the fixture for the expected contents
expected_path = fixture_path(expected_out_filename)
with open(expected_path) as fixture_file:
expected = fixture_file.read()
if isinstance(expected, six.binary_type):
expected = expected.decode('utf-8')
# Check that we got what we expected
assert_long_str_equal(expected, snippets_html, strip=True)
def test_load_snippets_html(self):
self._compare_snippets_html_output('snippet_src.py',
[10, 12, 13, 50, 51, 54, 55, 57],
'snippet_list.html')
def test_load_utf8_snippets(self):
self._compare_snippets_html_output('snippet_unicode.py',
[10, 12, 13, 50, 51, 54, 55, 57],
'snippet_unicode_html_output.html')
def test_load_declared_arabic(self):
self._compare_snippets_html_output('snippet_8859.py',
[7],
'snippet_arabic_output.html')
def _assert_line_range(self, violation_lines, expected_ranges):
"""
Assert that the snippets loaded using `violation_lines`
have the correct ranges of lines.
`violation_lines` is a list of line numbers containing violations
(which should get included in snippets).
`expected_ranges` is a list of `(start, end)` tuples representing
the starting and ending lines expected in a snippet.
Line numbers start at 1.
"""
# Load snippets from the source file
snippet_list = Snippet.load_snippets(
self._src_path, violation_lines
)
# Check that we got the right number of snippets
self.assertEqual(len(snippet_list), len(expected_ranges))
# Check that the snippets have the desired ranges
|
dnjohnstone/hyperspy | hyperspy/_components/offset.py | Python | gpl-3.0 | 3,522 | 0.00142 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy.component import Component
from hyperspy.docstrings.parameters import FUNCTION_ND_DOCSTRING
class Offset(Component):
r"""Component to add a constant value in the y-axis.
.. math::
f(x) = k
============ =============
Variable Parameter
============ =============
:math:`k` offset
============ =============
Parameters
-----------
offset : float
"""
def __init__(self, offset=0.):
Component.__init__(self, ('offset',))
self.offset.free = True
self.offset.value = offset
self.isbackground = True
self.convolved = False
# Gradients
self.offset.grad = self.grad_offset
def function(se | lf, x):
return self._function(x, self.offset.value)
def _function(self, x, o):
return np.ones_like(x) * o
@staticmethod
def grad_offset(x):
return np.ones_like(x)
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the parameters by the two area method
Parameters
----------
signal : BaseSignal instance
x1 : float
Defines the left limit of the spectral range to use | for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
"""
super(Offset, self)._estimate_parameters(signal)
axis = signal.axes_manager.signal_axes[0]
i1, i2 = axis.value_range_to_indices(x1, x2)
if only_current is True:
self.offset.value = signal()[i1:i2].mean()
if self.binned:
self.offset.value /= axis.scale
return True
else:
if self.offset.map is None:
self._create_arrays()
dc = signal.data
gi = [slice(None), ] * len(dc.shape)
gi[axis.index_in_array] = slice(i1, i2)
self.offset.map['values'][:] = dc[tuple(
gi)].mean(axis.index_in_array)
if self.binned:
self.offset.map['values'] /= axis.scale
self.offset.map['is_set'][:] = True
self.fetch_stored_values()
return True
def function_nd(self, axis):
"""%s
"""
if self._is_navigation_multidimensional:
x = axis[np.newaxis, :]
o = self.offset.map['values'][..., np.newaxis]
else:
x = axis
o = self.offset.value
return self._function(x, o)
function_nd.__doc__ %= FUNCTION_ND_DOCSTRING
|
OsipovStas/ayc-2013 | ayc/show.py | Python | gpl-2.0 | 455 | 0.008791 | # | coding=utf-8
__author__ = 'stasstels'
import cv2
import sys
image = sys.argv[1]
targets = sys.argv[2]
# Load an color image in grayscale
img = cv2.imread(image, cv2.IMREAD_COLOR)
with open(targets, "r") as f:
for line in f:
print line
(_, x, y) = line.split()
cv2.circle(img, (int(x), int(y)), 20, (255, 0, 255), -1)
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAl | lWindows()
|
suresh/notebooks | fluentpy/list_comprehension.py | Python | mit | 968 | 0 | # Fluent Python Book
# List comprehensions are faster than for-loops
import time
from random import choices
symbols = list('abcdefghijklmn')
print(symbols)
symbols_big = choices(symbols, k=2000000)
# print(symbols_big)
start = time.time()
ord_list1 = []
for sym in | symbols_big:
ord_list1.append(ord(sym))
# print('ord list1:', ord_list1)
end = time.time()
print('for loop ran in %f s' % (end - start))
start = time.time()
# list comprehension
ord_list2 = [ord(sym) for sym in symbols_big]
# print('ord list2:', ord_list2)
end = time.time()
print('for loop ran in %f s' % (end - start))
# let's do a performance bench | mark of this list comprehension
l_nums = [i for i in range(1000000)]
start = time.time()
sq_nums = []
for i in l_nums:
sq_nums.append(i ** 2)
end = time.time()
print('for loop ran in %f s' % (end - start))
start = time.time()
sq_nums = [i ** 2 for i in range(1000000)]
end = time.time()
print('list comp ran in %f s' % (end - start))
|
jeffknupp/kickstarter_video_two | proxy.py | Python | apache-2.0 | 3,925 | 0.000255 | """An HTTP proxy that supports IPv6 as well as the HTTP CONNECT method, among
other things."""
# Standard libary imports
import socket
import thread
import select
__version__ = '0.1.0 Draft 1'
BUFFER_LENGTH = 8192
VERSION = 'Python Proxy/{}'.format(__version__)
HTTP_VERSION = 'HTTP/1.1'
class ConnectionHandler(object):
"""Handles connections between the HTTP client and HTTP server."""
def __init__(self, connection, _, timeout):
self.client = connection
self.client_buffer = ''
self.timeout = timeout
self.target = None
method, path, protocol = self.get_base_header()
if method == 'CONNECT':
self.method_connect(path)
else:
self.method_others(method, path, protocol)
def get_base_header(self):
"""Return a tuple of (method, path, protocol) from the recieved
message."""
while 1:
self.client_buffer += self.client.recv(BUFFER_LENGTH)
end = self.client_buffer.find('\n')
if end != -1:
break
print '{}'.format(self.client_buffer[:end])
data = (self.client_buffer[:end+1]).split()
self.client_buffer = self.client_buffer[end+1:]
r | eturn data
def method_connect(self, path):
"""Handle HTTP CONNECT messages."""
self._connect_target(path)
| self.client.send('{http_version} 200 Connection established\n'
'Proxy-agent: {version}\n\n'.format(
http_version=HTTP_VERSION,
version=VERSION))
self.client_buffer = ''
self._read_write()
def method_others(self, method, path, protocol):
"""Handle all non-HTTP CONNECT messages."""
path = path[7:]
i = path.find('/')
host = path[:i]
path = path[i:]
self._connect_target(host)
self.target.send('{method} {path} {protocol}\n{client_buffer}'.format(
method=method,
path=path,
protocol=protocol,
client_buffer=self.client_buffer))
self.client_buffer = ''
self._read_write()
def _connect_target(self, host):
"""Create a connection to the HTTP server specified by *host*."""
i = host.find(':')
if i != -1:
port = int(host[i+1:])
host = host[:i]
else:
port = 80
(soc_family, _, _, _, address) = socket.getaddrinfo(host, port)[0]
self.target = socket.socket(soc_family)
self.target.connect(address)
def _read_write(self):
"""Read data from client connection and forward to server
connection."""
time_out_max = self.timeout/3
socs = [self.client, self.target]
count = 0
while 1:
count += 1
(recv, _, error) = select.select(socs, [], socs, 3)
if error:
break
if recv:
for in_ in recv:
data = in_.recv(BUFFER_LENGTH)
if in_ is self.client:
out = self.target
else:
out = self.client
if data:
out.send(data)
count = 0
if count == time_out_max:
break
self.client.close()
self.target.close()
def start_server(host='localhost', port=8080, ipv_6=False, timeout=60,
handler=ConnectionHandler):
"""Start the HTTP proxy server."""
if ipv_6:
soc_type = socket.AF_INET6
else:
soc_type = socket.AF_INET
soc = socket.socket(soc_type)
soc.bind((host, port))
print 'Serving on {0}:{1}.'.format(host, port)
soc.listen(0)
while 1:
thread.start_new_thread(handler, soc.accept()+(timeout,))
if __name__ == '__main__':
start_server()
|
joemarchese/PolyNanna | participants.py | Python | mit | 3,229 | 0.007742 | """
How to Use this File.
participants is a dictionary where a key is the name of the participant and the value is
a set of all the invalid selections for that participant.
participants = {'Bob': {'Sue', 'Jim'},
'Jim': {'Bob', 'Betty'},
} # And so on.
history is a dictionary where a key is the name of the participant and the value
is a list of tuples that contain a year and that participant's recipient
for that year.
history = {'Bob': [(2010, 'Betty'), (2011, 'Freddie')],
'Jim': [(2011, 'Sue']
# And so on.
}
"""
participants = {'Adam': {'Adam', 'Jeff', 'Joe', 'David'},
'Adrienne': {'Adrienne', 'Joe'},
'Amanda': {'Amanda', 'Stefan' ,'Angela'},
'Angela': {'Angela', 'Renee', 'Jeff', 'Nanna', 'Stefan', 'Justin', 'Amanda'},
'David': {'David', 'Jeff', 'Joe', 'Adam', 'Shaina'},
'Francesca': {'Francesca', 'Renee', 'George'},
'George': {'George', 'Renee', 'Francesca'},
'Jeff': {'Jeff', 'Renee', 'Angela', 'Nanna', 'Joe', 'Adam', 'David'},
'Joe': {'Joe', 'Jeff', 'David', | 'Adam', 'Adrienne'},
'Justin': {'Justin' | , 'Angela', 'Stefan'},
'Nanna': {'Nanna', 'Jeff', 'Angela', 'Renee'},
'Renee': {'Renee', 'Jeff', 'Angela', 'Nanna', 'Francesca', 'George'},
'Shaina': {'Shaina', 'David'},
'Stefan': {'Stefan', 'Angela', 'Justin', 'Amanda'},
}
history = {'Adam': [(2015, 'Justin'), (2016, 'Amanda'), (2017, 'Angela'), (2018, 'Stefan')],
'Adrienne': [(2016, 'Jeff'), (2017, 'Stefan'), (2018, 'Justin')],
'Amanda': [(2015, 'Adam'), (2016, 'Adrienne'), (2017, 'Jeff'), (2018, 'George')],
'Angela': [(2015, 'Joe'), (2016, 'David'), (2017, 'Francesca'), (2018, 'Adrienne')],
'David': [(2015, 'Stefan'), (2016, 'Francesca'), (2017, 'Renee')],
'Francesca': [(2015, 'Angela'), (2016, 'Joe'), (2017, 'Adam'), (2018, 'Jeff')],
'George': [(2015, 'Jeff'), (2016, 'Angela'), (2017, 'Adrienne'), (2018, 'Joe')],
'Jeff': [(2015, 'Nanna'), (2016, 'Justin'), (2017, 'Shaina'), (2018, 'Amanda')],
'Joe': [(2015, 'Renee'), (2016, 'George'), (2017, 'Justin'), (2018, 'Angela')],
'Justin': [(2015, 'Francesca'), (2016, 'Adam'), (2017, 'George'), (2018, 'Renee')],
'Nanna': [(2015, 'David')],
'Renee': [(2015, 'Amanda'), (2016, 'Stefan'), (2017, 'David'), (2018, 'Adam')],
'Shaina': [(2017, 'Amanda')],
'Stefan': [(2015, 'George'), (2016, 'Renee'), (2017, 'Joe'), (2018, 'Francesca')],
}
class Participant:
"""The class for individual participants that contains their attributes."""
def __init__(self, name, restricted_set=None, giving_to=None):
self.name = name
self.restricted_set = restricted_set
self.restricted_set = participants.get(self.name)|set([y[1] for y in history.get(self.name)])
self.giving_to = giving_to
def main():
return sorted([Participant(p) for p in participants.keys()],
key=lambda p: len(p.restricted_set), reverse=True)
if __name__ == '__main__':
main() |
cysuncn/python | spark/crm/PROC_O_AFA_MAINTRANSDTL.py | Python | gpl-3.0 | 6,349 | 0.018374 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_AFA_MAINTRANSDTL').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveCo | ntext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_dat | e[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
##原表
O_TX_AFA_MAINTRANSDTL = sqlContext.read.parquet(hdfs+'/O_TX_AFA_MAINTRANSDTL/*')
O_TX_AFA_MAINTRANSDTL.registerTempTable("O_TX_AFA_MAINTRANSDTL")
#目标表 :F_TX_AFA_MAINTRANSDTL 增量
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_TX_AFA_MAINTRANSDTL/"+V_DT+".parquet")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT WORKDATE AS WORKDATE
,AGENTSERIALNO AS AGENTSERIALNO
,SYSID AS SYSID
,UNITNO AS UNITNO
,SUBUNITNO AS SUBUNITNO
,AGENTFLAG AS AGENTFLAG
,TRANSCODE AS TRANSCODE
,CHANNELCODE AS CHANNELCODE
,CHANNELDATE AS CHANNELDATE
,CHANNELSEQ AS CHANNELSEQ
,BRNO AS BRNO
,TELLERNO AS TELLERNO
,CHKTELLERNO AS CHKTELLERNO
,AUTHTELLERNO AS AUTHTELLERNO
,SENDTELLERNO AS SENDTELLERNO
,TERMINALNO AS TERMINALNO
,MBFLAG AS MBFLAG
,DCFLAG AS DCFLAG
,TRANSFLAG AS TRANSFLAG
,CASHPRONO AS CASHPRONO
,ACCCLASS AS ACCCLASS
,CLEARACCSEQ AS CLEARACCSEQ
,CURRENCY AS CURRENCY
,AMOUNT AS AMOUNT
,REALAMOUNT AS REALAMOUNT
,FEEFLAG AS FEEFLAG
,TRANSFEECODE AS TRANSFEECODE
,FEECODE AS FEECODE
,COSTFEECODE AS COSTFEECODE
,FEEAMOUNT AS FEEAMOUNT
,TRANSAMOUNT AS TRANSAMOUNT
,COSTAMOUNT AS COSTAMOUNT
,ENTRUSTDATE AS ENTRUSTDATE
,BUSSEQNO AS BUSSEQNO
,SENDBANK AS SENDBANK
,SENDSETTLEBANK AS SENDSETTLEBANK
,PAYEROPNBANK AS PAYEROPNBANK
,PAYERACC AS PAYERACC
,PAYERNAME AS PAYERNAME
,PAYERADDR AS PAYERADDR
,RECVBANK AS RECVBANK
,RECVSETTLEBANK AS RECVSETTLEBANK
,PAYEEOPNBANK AS PAYEEOPNBANK
,PAYEEACC AS PAYEEACC
,PAYEENAME AS PAYEENAME
,PAYEEADDR AS PAYEEADDR
,VOUCHTYPE AS VOUCHTYPE
,VOUCHDATE AS VOUCHDATE
,VOUCHNO AS VOUCHNO
,IDTYPE AS IDTYPE
,IDNO AS IDNO
,THIRDSYSERRCODE AS THIRDSYSERRCODE
,THIRDSYSERRMSG AS THIRDSYSERRMSG
,THIRDCHKFLAG AS THIRDCHKFLAG
,BANKCHKFLAG AS BANKCHKFLAG
,TRADEBUSISTEP AS TRADEBUSISTEP
,TRADESTEP AS TRADESTEP
,TRADESTATUS AS TRADESTATUS
,STATUS AS STATUS
,PRIORITY AS PRIORITY
,POSTSCRIPT AS POSTSCRIPT
,BOOKNAME AS BOOKNAME
,TRUSTPAYNO AS TRUSTPAYNO
,BUSTYPE AS BUSTYPE
,BUSSUBTYPE AS BUSSUBTYPE
,PREWORKDATE AS PREWORKDATE
,PREAGENTSERIALNO AS PREAGENTSERIALNO
,PRINTCNT AS PRINTCNT
,PKGAGTDATE AS PKGAGTDATE
,PKGNO AS PKGNO
,PKGTYPE AS PKGTYPE
,PKGCOSEQ AS PKGCOSEQ
,CLRDTATE AS CLRDTATE
,BATSEQNO AS BATSEQNO
,AGENTPROTOCOLNO AS AGENTPROTOCOLNO
,OTXSTAT AS OTXSTAT
,RESPONSELIMIT AS RESPONSELIMIT
,RESPONSEDATE AS RESPONSEDATE
,NOTE1 AS NOTE1
,NOTE2 AS NOTE2
,NOTE3 AS NOTE3
,NOTE4 AS NOTE4
,NOTE5 AS NOTE5
,ACCTBRNO AS ACCTBRNO
,CNAPSTYPE AS CNAPSTYPE
,CNAPSMSGTYPE AS CNAPSMSGTYPE
,OTXREJCTCODE AS OTXREJCTCODE
,OTXREJCTMSG AS OTXREJCTMSG
,CHKBUSTYPE AS CHKBUSTYPE
,CHKBUSSUBTYPE AS CHKBUSSUBTYPE
,FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'AFA' AS ODS_SYS_ID
FROM O_TX_AFA_MAINTRANSDTL A --统一业务流水表
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_TX_AFA_MAINTRANSDTL = sqlContext.sql(sql)
F_TX_AFA_MAINTRANSDTL.registerTempTable("F_TX_AFA_MAINTRANSDTL")
dfn="F_TX_AFA_MAINTRANSDTL/"+V_DT+".parquet"
F_TX_AFA_MAINTRANSDTL.cache()
nrows = F_TX_AFA_MAINTRANSDTL.count()
F_TX_AFA_MAINTRANSDTL.write.save(path=hdfs + '/' + dfn, mode='overwrite')
F_TX_AFA_MAINTRANSDTL.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_TX_AFA_MAINTRANSDTL lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
|
mick-d/nipype_source | nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py | Python | bsd-3-clause | 1,206 | 0.02073 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.filtering.denoising import GaussianBlu | rImageFilter
def test_GaussianBlurImageFilter_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_ | exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='%s',
position=-2,
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
sigma=dict(argstr='--sigma %f',
),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = GaussianBlurImageFilter.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_GaussianBlurImageFilter_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = GaussianBlurImageFilter.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
rochacbruno/flask_simplelogin | setup.py | Python | mit | 1,254 | 0 | # Fix for older setuptools
import re
import os
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
d | ef read(fname):
try:
return open(fpath(fname), encoding='utf8').read()
except TypeError: # Python 2's open doesn't have the encoding kwarg
return open(fpath(fname)).read()
def desc():
return read('README.md')
# grep flask_simplelogin/__init__.py since python 3.x cannot
# import it before using 2to3
file_text = read(fpath('flask_simplelogin/__init__.py'))
def grep(attrname):
pattern = r"{0}\W*=\W*'([^']+) | '".format(attrname)
strval, = re.findall(pattern, file_text)
return strval
setup(
name='flask_simplelogin',
version=grep('__version__'),
url='https://github.com/cuducos/flask_simplelogin/',
license='MIT',
author=grep('__author__'),
author_email=grep('__email__'),
description='Flask Simple Login - Login Extension for Flask',
long_description=desc(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=['Flask>=0.12', 'click', 'flask_wtf']
)
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractKieshitlWordpressCom.py | Python | bsd-3-clause | 642 | 0.029595 | def extractKieshitlWordpressCom(item):
'''
Parser for 'kieshitl.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
| return None
tagmap = [
('I am a Big Villain', 'I am a Big Villain', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, post | fix=postfix, tl_type=tl_type)
return False |
feigaochn/leetcode | p759_set_intersection_size_at_least_two.py | Python | mit | 955 | 0 | class Solution:
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key=lambda p: (p[1], p[0]))
cover = [intervals[0][1] - 1]
cover.append(cover[-1] + 1)
for a, b in intervals[1:]:
if a <= cover[-2] and cover[-1] <= b:
pass
elif cover[-1] < a:
cover.append(b - 1)
cover.append(b)
| elif cover[-2] < a <= cover[-1] <= b:
if cover[-1] == b:
cover[-1] = b - 1
cover.append(b)
# print(cover)
return len(cover)
| sol = Solution().intersectionSizeTwo
print(sol([[1, 3], [1, 4], [2, 5], [3, 5]]))
print(sol([[1, 2], [2, 3], [2, 4], [4, 5]]))
print(sol([[1, 10]]))
print(
sol([[2, 10], [3, 7], [3, 15], [4, 11], [6, 12], [6, 16], [7, 8], [7, 11],
[7, 15], [11, 12]])) # 5
|
necozay/tulip-control | doc/genbib.py | Python | bsd-3-clause | 1,618 | 0.001236 | #!/usr/bin/env python
"""Generate bibliography.rst
The input file is of the following form. Blank lin | es are ignored. New
lines are collapsed to a single space. Besides newlines, text for each
entry is copied without filtering, and thus reST syntax can be
used. Note that the key line must begin with '['.
[M55]
G. H. Mealy. `A | Method for Synthesizing Sequential Circuits
<http://dx.doi.org/10.1002/j.1538-7305.1955.tb03788.x>`_. *Bell System
Technical Journal (BSTJ)*, Vol.34, No.5, pp. 1045 -- 1079, September,
1955.
"""
from __future__ import print_function
import sys
import io
def print_entry(out_f, bkey, entry_text):
nl = '\n'
idt = 4*' '
if bkey is not None:
bkey_canon = bkey.lower()
out_f.write(':raw-html:`<a href="#'+bkey_canon+'" id="'+bkey_canon+
'">['+bkey+']</a>`'+nl+idt+'\\'+entry_text+2*nl)
if len(sys.argv) < 2:
print('Usage: genbib.py FILE')
sys.exit(1)
out_f = io.open('bibliography.rst', 'w')
with io.open(sys.argv[1], 'r') as f:
bkey = None
entry_text = None
out_f.write(u'''Bibliography
============
.. role:: raw-html(raw)
:format: html
''')
for line in f:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '[' and line:
print_entry(out_f, bkey, entry_text)
closing_sym = line.index(']')
bkey = line[1:closing_sym]
entry_text = u''
elif bkey is None:
ValueError('Entry text found without preceding key.')
else:
entry_text += line+' '
print_entry(out_f, bkey, entry_text)
|
andresgomezvidal/autokey_scripts | data/General/file manager/asignaturas_actuales.py | Python | mit | 200 | 0.015 | import time
t1=.3
t2=.1
path="~/Dropbox/Ingenieria/asignatura | s_actuales"
time.sleep(t2)
key | board.send_key("<f6>")
time.sleep(t2)
keyboard.send_keys(path)
time.sleep(t1)
keyboard.send_key("<enter>")
|
SoftwareHeritage/swh-web-ui | swh/web/tests/common/test_middlewares.py | Python | agpl-3.0 | 1,521 | 0.00263 | # Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-le | vel directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from hypothesis import given
import pytest
from django.test import modify_settings
from swh.web.common.utils import rev | erse
from swh.web.tests.strategies import snapshot
@modify_settings(
MIDDLEWARE={"remove": ["swh.web.common.middlewares.ExceptionMiddleware"]}
)
@given(snapshot())
def test_exception_middleware_disabled(client, mocker, snapshot):
mock_browse_snapshot_directory = mocker.patch(
"swh.web.browse.views.snapshot.browse_snapshot_directory"
)
mock_browse_snapshot_directory.side_effect = Exception("Something went wrong")
url = reverse("browse-snapshot-directory", url_args={"snapshot_id": snapshot})
with pytest.raises(Exception, match="Something went wrong"):
client.get(url)
@given(snapshot())
def test_exception_middleware_enabled(client, mocker, snapshot):
mock_browse_snapshot_directory = mocker.patch(
"swh.web.browse.views.snapshot.browse_snapshot_directory"
)
mock_browse_snapshot_directory.side_effect = Exception("Something went wrong")
url = reverse("browse-snapshot-directory", url_args={"snapshot_id": snapshot})
resp = client.get(url)
assert resp.status_code == 500
assert hasattr(resp, "traceback")
assert "Traceback" in getattr(resp, "traceback")
|
divereigh/firewalld | src/firewall/core/ipset.py | Python | gpl-2.0 | 6,843 | 0.006284 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
from firewall.core.prog import runProg
from firewall.core.logger import log
from firewall.functions import tempFile, readfile
from firewall.config import COMMANDS
IPSET_MAXNAMELEN = 32
IPSET_TYPES = [
# bitmap and set types are currently not supported
# "bitmap:ip",
# "bitmap:ip,mac",
# "bitmap:port",
# "list:set",
"hash:ip",
#"hash:ip,port",
#"hash:ip,port,ip",
#"hash:ip,port,net",
#"hash:ip,mark",
"hash:net",
#"hash:net,net",
#"hash:net,port",
#"hash:net,port,net",
#"hash:net,iface",
"hash:mac",
]
IPSET_CREATE_OPTIONS = {
"family": "inet|inet6",
"hashsize": "value",
"maxelem": "value",
"timeout": "value in secs",
# "counters": None,
# "comment": None,
}
class ipset:
def __init__(self):
self._command = COMMANDS["ipset"]
def __run(self, args):
# convert to string list
_args = ["%s" % item for item in args]
log.debug2("%s: %s | %s", self.__class__, self._command, " ".join(_args))
(status, ret) = runProg(self._command, _args)
if status != 0:
raise ValueError("'%s %s' failed: %s" % (self._command,
" ".join(_args), ret))
return ret
def check_name(self, name):
if len(name) > IPSET_MAXNAMELEN:
raise FirewallError(INVALID_NAME,
"ipset name '%s' is not valid" % n | ame)
def supported_types(self):
ret = { }
output = ""
try:
output = self.__run(["--help"])
except ValueError as e:
log.debug1("ipset error: %s" % e)
lines = output.splitlines()
in_types = False
for line in lines:
#print(line)
if in_types:
splits = line.strip().split(None, 2)
ret[splits[0]] = splits[2]
if line.startswith("Supported set types:"):
in_types = True
return ret
def check_type(self, type_name):
if len(type_name) > IPSET_MAXNAMELEN or type_name not in IPSET_TYPES:
raise FirewallError(INVALID_TYPE,
"ipset type name '%s' is not valid" % type_name)
def create(self, set_name, type_name, options=None):
self.check_name(set_name)
self.check_type(type_name)
args = [ "create", set_name, type_name ]
if options:
for k,v in options.items():
args.append(k)
if v != "":
args.append(v)
return self.__run(args)
def destroy(self, set_name):
self.check_name(set_name)
return self.__run([ "destroy", set_name ])
def add(self, set_name, entry, options=None):
args = [ "add", set_name, entry ]
if options:
args.append("%s" % " ".join(options))
return self.__run(args)
def delete(self, set_name, entry, options=None):
args = [ "del", set_name, entry ]
if options:
args.append("%s" % " ".join(options))
return self.__run(args)
def test(self, set_name, entry, options=None):
args = [ "test", set_name, entry ]
if options:
args.append("%s" % " ".join(options))
return self.__run(args)
def list(self, set_name=None):
args = [ "list" ]
if set_name:
args.append(set_name)
return self.__run(args).split()
def save(self, set_name=None):
args = [ "save" ]
if set_name:
args.append(set_name)
return self.__run(args)
def restore(self, set_name, type_name, entries,
create_options=None, entry_options=None):
self.check_name(set_name)
self.check_type(type_name)
temp_file = tempFile()
if ' ' in set_name:
set_name = "'%s'" % set_name
args = [ "create", set_name, type_name, "-exist" ]
if create_options:
for k,v in create_options.items():
args.append(k)
if v != "":
args.append(v)
temp_file.write("%s\n" % " ".join(args))
for entry in entries:
if ' ' in entry:
entry = "'%s'" % entry
if entry_options:
temp_file.write("add %s %s %s\n" % (set_name, entry,
" ".join(entry_options)))
else:
temp_file.write("add %s %s\n" % (set_name, entry))
temp_file.close()
stat = os.stat(temp_file.name)
log.debug2("%s: %s restore %s", self.__class__, self._command,
"%s: %d" % (temp_file.name, stat.st_size))
args = [ "restore" ]
(status, ret) = runProg(self._command, args,
stdin=temp_file.name)
if log.getDebugLogLevel() > 2:
try:
lines = readfile(temp_file.name)
except:
pass
else:
i = 1
for line in readfile(temp_file.name):
log.debug3("%8d: %s" % (i, line), nofmt=1, nl=0)
if not line.endswith("\n"):
log.debug3("", nofmt=1)
i += 1
os.unlink(temp_file.name)
if status != 0:
raise ValueError("'%s %s' failed: %s" % (self._command,
" ".join(args), ret))
return ret
def flush(self, set_name):
args = [ "flush" ]
if set_name:
args.append(set_name)
return self.__run(args)
def rename(self, old_set_name, new_set_name):
return self.__run([ "rename", old_set_name, new_set_name ])
def swap(self, set_name_1, set_name_2):
return self.__run([ "swap", set_name_1, set_name_2 ])
def version(self):
return self.__run([ "version" ])
def check_ipset_name(ipset):
if len(ipset) > IPSET_MAXNAMELEN:
return False
return True
|
karesansui/karesansui | karesansui/tests/lib/file/testk2v.py | Python | mit | 1,427 | 0.018921 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import unittest
import tempfile
from karesansui.lib.file.k2v import *
class TestK2V(unittest.TestCase):
_w = {'key.1':'h oge',
'ke | y.2':'fo o',
'key.3':'bar ',
'key.4':' piyo',
'key.5':'s p am'}
def setUp(self):
self._tmpfile = tempfile.mkstemp()
fp = open(self._tmpfile[1], 'w')
self._t = K2V(self._tmpfile[1])
def tearDown(self):
os.unlink(self._tmpfile[1])
def test_write_0(self):
_d = self._t.write(self._w)
for i in xrange(1,6):
self.assertEqual(self._w['key.%d'%i],_d['key.%d'%i])
def test_read_0(self):
| _d = self._t.read()
for i in xrange(1,6):
self.assertEqual(self._w['key.%d'%i],_d['key.%d'%i])
def test_lock_sh_0(self):
self.fail('TODO:')
def test_lock_ex_0(self):
self.fail('TODO:')
def test_lock_un_0(self):
self.fail('TODO:')
class SuiteK2V(unittest.TestSuite):
def __init__(self):
tests = ['test_write_0', 'test_read_0',
'test_lock_ex_0', 'test_lock_un_0']
unittest.TestSuite.__init__(self,map(TestK2V, tests))
def all_suite_k2v():
return unittest.TestSuite([SuiteK2V()])
def main():
unittest.TextTestRunner(verbosity=2).run(all_suite_k2v())
if __name__ == '__main__':
main()
|
titilambert/home-assistant | homeassistant/components/generic_thermostat/climate.py | Python | apache-2.0 | 16,726 | 0.000837 | """Adds support for generic thermostat units."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, callback
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = "Generic Thermostat"
CONF_HEATER = "heater"
CONF_SENSOR = "target_sensor"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_TARGET_TEMP = "target_temp"
CONF_AC_MODE = "ac_mode"
CONF_MIN_DUR = "min_cycle_duration"
CONF_COLD_TOLERANCE = "cold_tolerance"
CONF_HOT_TOLERANCE = "hot_tolerance"
CONF_KEEP_ALIVE = "keep_alive"
CONF_INITIAL_HVAC_MODE = "initial_hvac_mode"
CONF_AWAY_TEMP = "away_temp"
CONF_PRECISION = "precision"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): cv.positive_time_period,
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_KEEP_ALIVE): cv.positive_time_period,
vol.Optional(CONF_INITIAL_HVAC_MODE): vol.In(
[HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
),
vol.Optional(CONF_AWAY_TEMP): vol.Coerce(float),
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the generic thermostat platform."""
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
cold_tolerance = config.get(CONF_COLD_TOLERANCE)
hot_tolerance = config.get(CONF_HOT_TOLERANCE)
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_hvac_mode = config.get(CONF_INITIAL_HVAC_MODE)
away_temp = config.get(CONF_AWAY_TEMP)
precision = config.get(CONF_PRECISION)
unit = hass.config.units.temperature_unit
async_add_entities(
[
GenericThermostat(
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
)
]
)
class GenericThermostat(ClimateEntity, RestoreEntity):
"""Representation of a Generic Thermostat device."""
def __init__(
self,
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
):
"""Initialize the thermostat."""
self._name = name
self.heater_entity_id = heater_entity_id
self.sensor_entity_id = sensor_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._cold_tolerance = cold_tolerance
self._hot_tolerance = hot_tolerance
self._keep_alive = keep_alive
self._hvac_mode = initial_hvac_mode
self._saved_target_temp = target_temp or away_temp
self._temp_precision = precision
if self.ac_mode:
self._hvac_list = [HVAC_MODE_COOL, HVAC_MODE_OFF]
else:
self._hvac_list = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
self._active = False
self._cur_temp = None
self._temp_lock = asyncio.Lock()
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temp = target_temp
self._unit = unit
self._support_flags = SUPPORT_FLAGS
if away_temp:
self._support_flags = SUPPORT_FLAGS | SUPPORT_PRESET_MODE
self._away_temp = away_temp
self._is_away = False
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
# Add listener
async_track_state_change_event(
self.hass, [self.sensor_entity_id], self._async_sensor_changed
)
async_track_state_change_event(
self.hass, [self.heater_entity_id], self._async_switch_changed
)
if self._keep_alive:
async_track_time_interval(
self.hass, self._async_control_heating, self._keep_alive
)
@callback
def _async_startup(event):
"""Init on startup."""
sensor_state = self.hass.states.get(self.sensor_entity_id)
if sensor_state and sensor_state.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
self._async_update_temp(sensor_state)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)
# Check If we have an old state
old_state = await self.async_get_last_state()
if old_state is not None:
# If we have no initial temperature, restore
if self._target_temp is None:
# If we have a previously saved temperature
if old_state.attributes.get(ATTR_TEMPERATURE) is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"Undefined target temperature, falling back to %s",
self._target_temp,
)
else:
self._target_temp = float(old_state.attributes[ATTR_TEMPERATURE])
if old_state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY:
self._is_away = True
if not self._hvac | _mode and old_state.state:
self._hvac_mode = old_state.state
else:
# No previous state, try and restore defaults
if self._target_temp is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
| self._target_temp = self.min_temp
_LOGGER.warning(
"No previously saved temperature, setting to %s", self._target_temp
)
# Set default state to off
if not self._hvac_mode: |
Drvanon/Game | run.py | Python | apache-2.0 | 232 | 0.00431 | from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPS | erver
from tornado.ioloop import IOLoop
from game import app
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000)
IOLoop.i | nstance().start() |
eagafonov/screencloud | screencloud/src/3rdparty/PythonQt/examples/PyScriptingConsole/example.py | Python | gpl-2.0 | 331 | 0.009063 | from PythonQt.QtG | ui import *
group = QGroupBox()
box = QVBoxLayout(group)
push1 = QPushButton(group)
box.addWidget(push1)
push2 = QPushButton(group)
box.addWidget(push2)
check = QCheckBox(group)
check.text = 'check me'
group.title | = 'my title'
push1.text = 'press me'
push2.text = 'press me2'
box.addWidget(check)
group.show()
|
jjaviergalvez/CarND-Term3-Quizzes | search/print-path.py | Python | mit | 2,943 | 0.011553 | # -----------
# User Instructions:
#
# Modify the the search functio | n so that it returns
# a shortest path as follows:
#
# [['>', 'v', ' ', ' ', ' ', ' '],
# [' ', '>', '>', '>', '>', 'v'],
# [' ', ' ', ' ', ' ', ' ', 'v'] | ,
# [' ', ' ', ' ', ' ', ' ', 'v'],
# [' ', ' ', ' ', ' ', ' ', '*']]
#
# Where '>', '<', '^', and 'v' refer to right, left,
# up, and down motions. Note that the 'v' should be
# lowercase. '*' should mark the goal cell.
#
# You may assume that all test cases for this function
# will have a path from init to goal.
# ----------
grid = [[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1
delta = [[-1, 0 ], # go up
[ 0, -1], # go left
[ 1, 0 ], # go down
[ 0, 1 ]] # go right
delta_name = ['^', '<', 'v', '>']
def search(grid,init,goal,cost):
# ----------------------------------------
# modify code below
# ----------------------------------------
closed = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
closed[init[0]][init[1]] = 1
g_grid = [[' ' for row in range(len(grid[0]))] for col in range(len(grid))] #fill with g values
expand = [[' ' for row in range(len(grid[0]))] for col in range(len(grid))]
x = init[0]
y = init[1]
g = 0
g_grid[x][y] = g
open = [[g, x, y]]
found = False # flag that is set when search is complete
resign = False # flag set if we can't find expand
while not found and not resign:
if len(open) == 0:
resign = True
return 'fail'
else:
open.sort()
open.reverse()
next = open.pop()
x = next[1]
y = next[2]
g = next[0]
if x == goal[0] and y == goal[1]:
found = True
expand[x][y] = '*'
else:
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
g2 = g + cost
g_grid[x2][y2] = g2
open.append([g2, x2, y2])
closed[x2][y2] = 1
for n in range(g2, -1, -1):
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if g_grid[x2][y2] == (n-1):
expand[x2][y2] = delta_name[(i+2)%4]
x = x2
y = y2
return expand # make sure you return the shortest path
result = search(grid,init,goal,cost)
for row in result:
print(row) |
byuphamerator/phamerator-dev | phamerator/phamServer_InnoDB.py | Python | gpl-2.0 | 16,847 | 0.017985 | #!/usr/bin/env python
import Pyro.core
import Pyro.naming
import string
import MySQLdb
import time
import random
import threading
try:
from phamerator import *
except:
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import alignmentDatabase
from errorHandler import *
import db_conf
import sys
try:
import hashlib
except ImportError:
import md5
import getopt
import getpass
import logger
from threading import Thread
from Pyro.EventService.Clients import Subscriber
from Pyro.protocol import getHostname
import Pyro.EventService.Clients
Pyro.config.PYRO_MAXCONNECTIONS=1000
Pyro.config.PYRO_NS_HOSTNAME='localhost'
class options:
def __init__(self, argv):
try:
opts, args = getopt.getopt(argv, "hpq:s:n:u:d:i:l:a:", ["help", "prompt", "password=", "server=", "nsname=", "user=","database=","instances=","logging=","alignment_type="])
except getopt.GetoptError:
print 'error running getopt.getopt'
self.usage()
self.argDict = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
self.usage()
sys.exit()
elif opt in ("-p", "--promp | t"):
self.argDict['password'] = getpass.getpass('password: ')
elif opt in ("-q", "--password"):
self.argDict['password'] = arg
elif opt in ("-s", "--server"):
self.argDict['server'] = arg
elif opt in ("-n", "--nsname"):
| self.argDict['nsname'] = arg
elif opt in ("-u", "--user"):
self.argDict['user'] = arg
elif opt in ("-d", "--database"):
self.argDict['database'] = arg
elif opt in ("-i", "--instances"):
self.argDict['instances'] = arg
elif opt in ("-l", "--logging"):
self.argDict['logging'] = arg
elif opt in ("-a", "--alignment_type"):
self.argDict['alignment_type'] = arg
if not self.argDict.has_key('password'): self.argDict['password'] = ''
required_args = ('server', 'nsname', 'user', 'database', 'instances', 'logging', 'alignment_type')
for a in required_args:
if a not in self.argDict:
print "required argument '%s' is missing" % a
self.usage()
sys.exit()
def usage(self):
'''Prints program usage information'''
print """phamServer_InnoDB.py [OPTION] [ARGUMENT]
-h, --help: print this usage information
-u, --user=<username>: specify a username on the database
-p, --prompt: prompt for a password
-q, --password=<pass>: enter a password on the command line, overrides -p
-d, --database=<database name>: specify the name of the database to access
-i, --instances=<number_of_instances>: number of server instances to run (default=1)
-l, --logging={True or False}: whether to print out debugging info (default is True)
-a, --alignment_type={blast or clustalw}: this argument is required
-s, --server=<hostname>: hostname of database server, required
-n, --nsname=<nsname>: PYRO server nsname, usually localhost, required"""
class phamPublisher(Pyro.EventService.Clients.Publisher):
'''Publishes Pyro events over the network to clients, for instance when the BLAST database changes'''
def __init__(self):
Pyro.EventService.Clients.Publisher.__init__(self)
#def publish(self, channel, message):
# self.publish(channel, message)
class NameServer(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(1)
self.starter = Pyro.naming.NameServerStarter() # no special identification
def run(self):
print "Launching Pyro Name Server"
self.starter.start() # (hostname=Pyro.config.PYRO_NS_HOSTNAME)
def waitUntilStarted(self):
return self.starter.waitUntilStarted()
class EventServer(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(1)
self.starter = Pyro.EventService.Server.EventServiceStarter() # no special identification
def run(self):
print "Launching Pyro Event Server"
self.starter.start(hostname=Pyro.config.PYRO_NS_HOSTNAME)
def waitUntilStarted(self):
return self.starter.waitUntilStarted()
class phamServlet(Pyro.core.SynchronizedObjBase, errorHandler):
def __init__(self, logging, c):
Pyro.core.SynchronizedObjBase.__init__(self)
errorHandler.__init__(self)
self._logger = logger.logger(logging)
self.lastAccessed = time.time()
self.name = ''
self.c = c
#self.c.execute("SELECT id FROM scores LIMIT 1000")
try: self.c.execute("COMMIT")
except: self.show_sql_errors(self.c)
def get_last_accessed(self):
#print 'returning lastAccessed'
return self.lastAccessed
class clustalwServlet(phamServlet, Subscriber, Thread):
def __init__(self, logging, c, server, database, opts):
Thread.__init__(self)
phamServlet.__init__(self,logging, c)
Subscriber.__init__(self)
#self.setDaemon(1)
self.server, self.database = server, database
self.c = db_conf.db_conf(username=opts['user'], password=opts['password'], server=opts['server'], db=opts['database']).get_cursor()
self.subscribe("clustalw")
self._logger = logger.logger(logging)
self.publisher = phamPublisher()
def request_db_info(self):
'''phamClient needs this info to get a proper database cursor, but it also needs a valid username/password pair'''
return self.server, self.database
def event(self, event):
self._logger.log('%s --> %s' % (event.subject, event.msg))
if event.subject == 'clustalw' and event.msg == 'database has alignments available':
self._logger.log('telling the clients to get busy')
self.publisher.publish('clustalw', 'get busy')
def report_scores(self, clustalw_work_unit, results, client_host):
'''compute node reporting scores for a particular query'''
self._logger.log('%s: reporting clustalw results' % client_host)
clustalw_work_unit.add_matches(results, self.c)
def request_seqs(self, client_host):
'''compute node asking for a query sequence and optionally the database for clustalw alignments'''
self._logger.log('%s: requesting clustalw work unit' % client_host)
clustalw_work_unit = alignmentDatabase.clustalwWorkUnit(self.c)
if not clustalw_work_unit.query_id:
try:
import pynotify
if pynotify.init("Phamerator"):
n = pynotify.Notification("Phamerator Server Update", "Clustalw alignments completed", "file:///%s" % os.path.join(os.path.dirname(__file__),"pixmaps/phamerator.png"))
n.show()
else:
pass
#print "there was a problem initializing the pynotify module"
except:
pass
return clustalw_work_unit
def run(self):
self.listen()
class blastServlet(phamServlet, Subscriber, Thread):
def __init__(self, logging, c, server, database, opts):
Thread.__init__(self)
phamServlet.__init__(self, logging, c)
Subscriber.__init__(self)
self.c = db_conf.db_conf(username=opts['user'], password=opts['password'], server=opts['server'], db=opts['database']).get_cursor()
self.server, self.database = server, database
self.subscribe("fasta")
self.lastAccessed = time.time()
self.waitTime = random.randint(5,15)
self.busy = False
self._logger = logger.logger(logging)
self.status = 'avail'
def request_db_info(self):
'''phamClient needs this info to get a proper database cursor, but it also needs a valid username/password pair'''
return self.server, self.database
def disconnect(self, client):
'''cleans up after a client disconnects'''
self._logger.log(client + ' has disconnected. Rolling back changes.')
try:
self.c.execute("ROLLBACK")
self._logger.log('done.')
except: self.show_sql_errors(self.c)
self._logger.log(client + ' has disconnected. Unlocking tables.')
try:
self.c.execute("UNLOCK TABLES")
self._logger.log('done.')
except: self.show_sql_errors(self.c)
def event(self, event):
self._logger.log('%s --> %s' % (event.subject, event.msg))
if event.subject == 'fasta' and event.msg == 'update available': self.update_db()
def request_seqs(self, client_host):
|
nnrcschmdt/helsinki | program/management/commands/check_automation_ids.py | Python | bsd-3-clause | 1,616 | 0.001238 | import json
from os.path import join
from django.conf import settings
from django.core.management.base import NoArgsCommand
from program.models import ProgramSlot
class Command(NoArgsCommand):
help = 'checks the automation_ids used by program slots against the exported'
def handle_noargs(self, **options):
cache_dir = getattr(settings, 'AUTOMATION_CACHE_DIR', 'cache')
cached_shows = join(cache_dir, 'shows.json')
with open(cached_shows) as shows_json:
shows = json.loads(shows_json.read())
rd_ids = {}
for show in shows['shows']:
rd_ids[show['id']] = show
for show in shows['multi-shows']:
rd_ids[show['id']] = | show
| pv_ids = []
for programslot in ProgramSlot.objects.filter(automation_id__isnull=False):
pv_ids.append(int(programslot.automation_id))
for automation_id in sorted(rd_ids.iterkeys()):
if rd_ids[automation_id]['type'] == 's':
continue
multi_id = -1
if 'multi' in rd_ids[automation_id]:
multi_id = rd_ids[automation_id]['multi']['id']
if automation_id not in pv_ids and multi_id not in pv_ids:
if multi_id < 0:
print '+ %d' % (automation_id)
else:
print '+ %d (%d)' % (automation_id, multi_id)
for automation_id in sorted(pv_ids):
if automation_id not in rd_ids:
print '-', automation_id
|
balazssimon/ml-playground | udemy/lazyprogrammer/deep-reinforcement-learning-python/atari/dqn_tf_alt.py | Python | apache-2.0 | 11,056 | 0.012663 | # https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python
# https://www.udemy.com/deep-reinforcement-learning-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import copy
import gym
import os
import sys
import random
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from scipy.misc import imresize
##### testing only
# MAX_EXPERIENCES = 10000
# MIN_EXPERIENCES = 1000
MAX_EXPERIENCES = 500000
MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
def downsample_image(A):
B = A[31:195] # select the important parts of the image
B = B.mean(axis=2) # convert to grayscale
# downsample image
# changing aspect ratio doesn't significantly distort the image
# nearest neighbor interpolation produces a much sharper image
# than default bilinear
B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest')
return B
def update_state(state, obs):
obs_small = downsample_image(obs)
return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
class ConvLayer:
def __init__(self, mi, mo, filtersz=5, stride=2, f=tf.nn.relu):
# mi = input feature map size
# mo = output feature map size
self.W = tf.Variable(tf.random_normal(shape=(filtersz, filtersz, mi, mo)))
b0 = np.zeros(mo, dtype=np.float32)
self.b = tf.Variable(b0)
self.f = f
self.stride = stride
self.params = [self.W, self.b]
def forward(self, X):
conv_out = tf.nn.conv2d(X, self.W, strides=[1, self.stride, self.stride, 1], padding='SAME')
conv_out = tf.nn.bias_add(conv_out, self.b)
return self.f(conv_out)
class HiddenLayer:
def __init__(self, M1, M2, f=tf.nn.relu, use_bias=True):
# print("M1:", M1)
self.W = tf.Variable(tf.random_normal(shape=(M1, M2)))
self.params = [self.W]
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(M2).astype(np.float32))
self.params.append(self.b)
self.f = f
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
class DQN:
# def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, max_experiences=500000, min_experiences=50000, batch_sz=32):
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma):
self.K = K
# create the graph
self.conv_layers = []
num_input_filters = 4 # number of filters / color channels
final_height = IM_SIZE
final_width = IM_SIZE
for num_output_filters, filtersz, stride in conv_layer_sizes:
layer = ConvLayer(num_input_filters, num_output_filters, filtersz, stride)
self.conv_layers.append(layer)
num_input_filters = num_output_filters
# calculate final output size for input into fully connected layers
old_height = final_height
new_height = int(np.ceil(old_height / stride))
print("new_height (%s) = old_height (%s) / stride (%s)" % (new_height, old_height, stride))
final_height = int(np.ceil(final_height / stride))
final_width = int(np.ceil(final_width / stride))
self.layers = []
flattened_ouput_size = final_height * final_width * num_input_filters
M1 = flattened_ouput_size
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, K, lambda x: x)
self.layers.append(layer)
# collect params for copy
self.params = []
for layer in (self.conv_layers + self.layers):
self.params += layer.params
# inputs and targets
self.X = tf.placeholder(tf.float32, shape=(None, 4, IM_SIZE, IM_SIZE), name='X')
# tensorflow convolution needs the order to be:
# (num_samples, height, width, "color")
# so we need to tranpose later
self.G = tf.placeholder(tf.float32, shape=(None,), name='G')
self.actions = tf.placeholder(tf.int32, shape=(None,), name='actions')
# calculate output and cost
Z = self.X / 255.0
Z = tf.transpose(Z, [0, 2, 3, 1]) # TF wants the "color" channel to be last
for layer in self.conv_layers:
Z = layer.forward(Z)
Z = tf.reshape(Z, [-1, flattened_ouput_size])
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = Z
self.predict_op = Y_hat
# selected_action_values = tf.reduce_sum(
# Y_hat * tf.one_hot(self.actions, K),
# reduction_indices=[1]
# )
# we would like to do this, but it doesn't work in TF:
# selected_action_values = Y_hat[tf.range(batch_sz), self.actions]
# instead we do:
indices = tf.range(batch_sz) * tf.shape(Y_hat)[1] + self.actions
selected_action_values = tf.gather(
tf.reshape(Y_hat, [-1]), # flatten
indices
)
cost = tf.reduce_mean(tf.square(self.G - selected_action_values))
self.cost = cost
# self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost)
# self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost)
self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost)
# self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost)
# self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost)
def set_session(self, session):
self.session = session
def copy_from(self, other):
# collect all the ops
ops = []
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = self.session.run(q)
op = p.assign(actual)
ops.append(op)
# now run them all
self.session.run(ops)
def predict(self, X):
return self.session.run(self.predict_op, feed_dict={self.X: X})
def update(self, states, actions, targets):
c, _ = self.session.run(
[self.cost, self.train_op],
feed_dict={
self.X: states,
self.G: targets,
self.actions: actions
}
)
return c
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([x])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
# Sample experiences
samples = random.sample(experience_replay_buffer, batch_size)
states, actions, rewards, next_states, dones = map(np.array, zip(*samples))
# Calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
# Update model
loss = model.update(states, actions, targets)
return loss
def play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_size,
epsilon,
epsilon_change,
epsilon_min):
t0 = datetime.now()
# Reset the environment
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
assert(state.shape | == (4, 80, 80))
loss = None |
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done = False
while not done:
# Update target network
if total_t % TARGET_UPDATE_PERIOD == 0:
target_model.copy_from(model)
print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD))
# Take action
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
# assert(state.shape == (4, 80, 80))
episode_reward += reward
# Remove oldest experience if replay buffer is full
if len(experience_replay_buffer) == MAX_EXPERIENCES:
experience_replay_buffer.pop(0)
# Save the latest experience
experience_replay_buffer.append((state, action, reward, next_state, done))
# Train the model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamm |
tomcounsell/Cobra | apps/public/migrations/0018_auto_20141219_1711.py | Python | gpl-2.0 | 646 | 0.001548 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('public', '0017_auto_20141218_1813'),
]
operations = [
migrations.RenameField(
model_name='commission',
old_name='estimated_price',
new_name='estimated_display_price',
),
| migrations.AlterField(
| model_name='commission',
name='customer',
field=models.ForeignKey(related_name='commissions', to='public.Customer'),
preserve_default=True,
),
]
|
dariemp/odoo | openerp/tools/translate.py | Python | agpl-3.0 | 45,371 | 0.003769 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_G | reece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': | 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from openerp.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from openerp.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr and uid:
lang = pool['res.users'].context_get(cr, uid) |
peteboyd/lammps_interface | lammps_interface/ccdc.py | Python | mit | 949 | 0 | """
Bond order information.
"""
CCDC_BOND_ORDERS = {
# http://cima.chem.usyd.edu.au:8080/cif/skunkworks/html/ddl1/mif/bond.html
'S': 1.0, # single (two-electron) bond or sigma bond to metal
'D': 2.0, # double (four-electron) bond
'T': 3.0, # triple (six-electron) bond
'Q': 4.0, # quadruple (eight-electron, metal-metal) bond
'A': 1.5, # alternating normalized ring bond (aromatic)
'C': 1.0, # catena-forming bond in crystal structure
'E': 1.5 | , # equivalent (delocalized double) bond
'P': 1.0, # pi bond (metal-ligand pi interaction)
'Am': 1.41, # Amide bond (non standard)
1.0: 'S', # single (two-electron) bond or sigma bond to metal
2.0: 'D', # double (fou | r-electron) bond
3.0: 'T', # triple (six-electron) bond
4.0: 'Q', # quadruple (eight-electron, metal-metal) bond
1.5: 'A', # alternating normalized ring bond (aromatic)
1.41: 'Am' # Amide bond (non standard)
}
|
micbuz/project2 | boot/manage.py | Python | apache-2.0 | 802 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.set | default("DJANGO_SETTINGS_MODULE", "boot.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's in | stalled and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
NorThanapon/dict-definition | definition/legacy/analyze_data.py | Python | gpl-3.0 | 1,265 | 0.001581 | import codecs
from nltk.tokenize import wordpunct_tokenize as tokenize
data_filepath = 'output/top10kwords_2defs.tsv'
def num_words_with_freq(freq, n):
c = 0
for w in freq:
if freq[w] == n:
c = c + 1
return c
defined_words = set()
freq = {}
total_tokens = 0
total_defs = 0
with codecs.open(data_filepath, 'r', 'utf-8') as ifp:
for line in ifp:
total_defs = total_defs + 1
line = line.strip()
parts = line.split('\t')
if parts[0] not in freq:
freq[parts[0]] = 0
freq[parts[0]] = freq[parts[0]] + 1
defined_words.add(parts[0])
for t i | n tokenize(parts[3]):
if t not in freq:
freq[t] = 0
freq[t] = freq[t] + 1
total_tokens = total_tokens + 1
print('#word being defined: ' + str(len(defined_words)))
print('#definition: ' + str(total_defs))
print('#tokens: ' + str(total_tokens))
print('vocab size: ' + str(len(freq)))
print('rare word frequency: ')
print(' - 1: ' + str(num_words_with_freq(freq, 1)))
print(' - 2: ' + str(num_word | s_with_freq(freq, 2)))
print(' - 3: ' + str(num_words_with_freq(freq, 3)))
print(' - 4: ' + str(num_words_with_freq(freq, 4)))
print(' - 5: ' + str(num_words_with_freq(freq, 5)))
|
i32baher/practicas-iss | tubeworld/video/admin.py | Python | gpl-3.0 | 147 | 0 | from dja | ngo.contrib import admin
# Register your models here.
from .models import Video, Tag
| admin.site.register(Video)
admin.site.register(Tag)
|
kevgliss/lemur | lemur/certificates/service.py | Python | apache-2.0 | 17,189 | 0.002327 | """
.. module: lemur.certificate.service
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
import arrow
from flask import current_app
from sqlalchemy import func, or_, not_, cast, Integer
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from lemur import database
from lemur.extensions import metrics, signals
from lemur.plugins.base import plugins
from lemur.common.utils import generate_private_key, truthiness
from lemur.roles.models import Role
from lemur.domains.models import Domain
from lemur.authorities.models import Authority
from lemur.destinations.models import Destination
from lemur.certificates.models import Certificate
from lemur.notifications.models import Notification
from lemur.pending_certificates.models import PendingCertificate
from lemur.certificates.schemas import CertificateOutputSchema, CertificateInputSchema
from lemur.roles import service as role_service
csr_created = signals.signal('csr_created', "CSR generated")
csr_imported = signals.signal('csr_imported', "CSR imported from external source")
certificate_issued = signals.signal('certificate_issued', "Authority issued a certificate")
certificate_imported = signals.signal('certificate_imported', "Certificate imported from external source")
def get(cert_id):
"""
Retrieves certificate by its ID.
:param cert_id:
:return:
"""
return database.get(Certificate, cert_id)
def get_by_name(name):
"""
Retrieves certificate by its Name.
:param name:
:return:
"""
return database.get(Certificate, name, field='name')
def get_by_serial(serial):
"""
Retrieves certificate by it's Serial.
:param serial:
:return:
"""
if isinstance(serial, int):
# although serial is a number, the DB column is String(128)
serial = str(serial)
return Certificate.query.filter(Certificate.serial == serial).all()
def delete(cert_id):
"""
Delete's a certificate.
:param cert_id:
"""
database.delete(get(cert_id))
def get_all_certs():
"""
Retrieves all certificates within Lemur.
:return:
"""
return Certificate.query.all()
def get_all_pending_cleaning(source):
"""
Retrieves all certificates that are available for cleaning.
:param source:
:return:
"""
return Certificate.query.filter(Certificate.sources.any(id=source.id))\
.filter(not_(Certificate.endpoints.any())).all()
def get_all_pending_reissue():
"""
Retrieves all certificates that need to be rotated.
Must be X days from expiration, uses the certificates rotation
policy to determine how many days from expiration the certificate must be
for rotation to be pending.
:return:
"""
return Certificate.query.filter(Certificate.rotation == True)\
.filter(not_(Certificate.replaced.any()))\
.filter(Certificate.in_rotation_window == True).all() # noqa
def find_duplicates(cert):
"""
Finds certificates that already exist within Lemur. We do this by looking for
certificate bodies that are the same. This is the most reliable way to determine
if a certificate is already being tracked by Lemur.
:param cert:
:return:
"""
if cert['chain']:
return Certificate.query.filter_by(body=cert['body'].strip(), chain=cert['chain'].strip()).all()
else:
return Certificate.query.filter_by(body=cert['body'].strip(), chain=None).all()
def export(cert, export_plugin):
"""
Exports a certificate to the requested format. This format
may be a binary format.
:param export_plugin:
:param cert:
:return:
"""
plugin = plugins.get(export_plugin['slug'])
return plugin.export(cert.body, cert.chain, cert.private_key, export_plugin['pluginOptions'])
def update(cert_id, **kwargs):
"""
Updates a certificate
:param cert_id:
:return:
"""
cert = get(cert_id)
for key, value in kwargs.items():
setattr(cert, key, value)
return database.update(cert)
def create_certificate_roles(**kwargs):
# create an role for the owner and assign it
owner_role = role_service.get_by_name(kwargs['owner'])
if not owner_role:
owner_role = role_service.create(
kwargs['owner'],
description="Auto generated role based on owner: {0}".format(kwargs['owner'])
)
# ensure that the authority's owner is also associated with the certificate
if kwargs.get('authority'):
authority_owner_role = role_service.get_by_name(kwargs['authority'].owner)
return [owner_role, authority_owner_role]
return [owner_role]
def mint(**kwargs):
"""
Minting is slightly different for each authority.
Support for multiple authorities is handled by individual plugins.
"""
authority = kwargs['authority']
issuer = plugins.get(authority.plugin_name)
# allow | the CSR to be specified by the user
if not kwargs.get('csr'):
csr, private_key = create_csr(** | kwargs)
csr_created.send(authority=authority, csr=csr)
else:
csr = str(kwargs.get('csr'))
private_key = None
csr_imported.send(authority=authority, csr=csr)
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
return cert_body, private_key, cert_chain, external_id, csr
def import_certificate(**kwargs):
"""
Uploads already minted certificates and pulls the required information into Lemur.
This is to be used for certificates that are created outside of Lemur but
should still be tracked.
Internally this is used to bootstrap Lemur with external
certificates, and used when certificates are 'discovered' through various discovery
techniques. was still in aws.
:param kwargs:
"""
if not kwargs.get('owner'):
kwargs['owner'] = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')[0]
return upload(**kwargs)
def upload(**kwargs):
"""
Allows for pre-made certificates to be imported into Lemur.
"""
roles = create_certificate_roles(**kwargs)
if kwargs.get('roles'):
kwargs['roles'] += roles
else:
kwargs['roles'] = roles
if kwargs.get('private_key'):
private_key = kwargs['private_key']
if not isinstance(private_key, bytes):
kwargs['private_key'] = private_key.encode('utf-8')
cert = Certificate(**kwargs)
cert = database.create(cert)
kwargs['creator'].certificates.append(cert)
cert = database.update(cert)
certificate_imported.send(certificate=cert, authority=cert.authority)
return cert
def create(**kwargs):
"""
Creates a new certificate.
"""
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
kwargs['body'] = cert_body
kwargs['private_key'] = private_key
kwargs['chain'] = cert_chain
kwargs['external_id'] = external_id
kwargs['csr'] = csr
roles = create_certificate_roles(**kwargs)
if kwargs.get('roles'):
kwargs['roles'] += roles
else:
kwargs['roles'] = roles
if cert_body:
cert = Certificate(**kwargs)
kwargs['creator'].certificates.append(cert)
else:
cert = PendingCertificate(**kwargs)
kwargs['creator'].pending_certificates.append(cert)
cert.authority = kwargs['authority']
database.commit()
if isinstance(cert, Certificate):
certificate_issued.send(certificate=cert, authority=cert.authority)
metrics.send('certificate_issued', 'counter', 1, metric_tags=dict(owner=cert.owner, issuer=cert.issuer))
return cert
def render(args):
"""
Helper function that allows use to render our REST Api.
:param args:
:return:
"""
query = database.session_query(Certificate)
time_range = args.pop('time_range')
destination_id = args.pop('destination_id')
notification_id = args.pop('notifi |
sti-lyneos/shop | tests/gtk3/test_exhibits.py | Python | lgpl-3.0 | 6,973 | 0.000574 | import os
import unittest
from mock import patch, Mock
from tests.utils import (
FakedCache,
ObjectWithSignals,
setup_test_env,
)
setup_test_env()
from softwarecenter.db.database import StoreDatabase
from softwarecenter.ui.gtk3.views import lobbyview
from softwarecenter.ui.gtk3.widgets.exhibits import (
_HtmlRenderer,
)
class ExhibitsTestCase(unittest.TestCase):
"""The test suite for the exhibits carousel."""
def setUp(self):
self.cache = FakedCache()
self.db = StoreDatabase(cache=self.cache)
self.lobby = lobbyview.LobbyView(cache=self.cache, db=self.db,
icons=None, apps_filter=None)
self.addCleanup(self.lobby.destroy)
def _get_banner_from_lobby(self):
return self.lobby.vbox.get_children()[-1].get_child()
def test_featured_exhibit_by_default(self):
"""Show the featured exhibit before querying the remote service."""
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.assertIsInstance(banner.exhibits[0], lobbyview.FeaturedExhibit)
def test_no_exhibit_if_not_available(self):
"""The exhibit should not be shown if the package is not available."""
exhibit = Mock()
exhibit.package_names = u'foobarbaz'
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.assertIsInstance(banner.exhibits[0], lobbyview.FeaturedExhibit)
def test_exhibit_if_available(self):
"""The exhibit should be shown if the package is available."""
exhibit = Mock()
exhibit.package_names = u'foobarbaz'
exhibit.banner_urls = ['banner']
exhibit.title_translated = ''
self.cache[u'foobarbaz'] = Mock()
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.assertIs(banner.exhibits[0], exhibit)
def test_exhibit_if_mixed_availability(self):
"""The exhibit should be shown even if some are not available."""
# available exhibit
exhibit = Mock()
exhibit.package_names = u'foobarbaz'
exhibit.banner_urls = ['banner']
exhibit.title_translated = ''
self.cache[u'foobarbaz'] = Mock()
# not available exhibit
other = Mock()
other.package_names = u'not-there'
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca,
[exhibit, other])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.ass | ertIs(banner.exhibits[0], exhibit)
def test_exhibit_with_url(self):
# available exhibit
exhibit = Mock()
exhibit.package_names = ''
exhibit.click_url = 'http://example.com'
exhibit.banner_urls = [ | 'banner']
exhibit.title_translated = ''
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca,
[exhibit])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
# add the banners
self.lobby._append_banner_ads()
# fake click
alloc = self.lobby.exhibit_banner.get_allocation()
mock_event = Mock()
mock_event.x = alloc.x
mock_event.y = alloc.y
with patch.object(self.lobby.exhibit_banner, 'emit') as mock_emit:
self.lobby.exhibit_banner.on_button_press(None, mock_event)
self.lobby.exhibit_banner.on_button_release(None, mock_event)
mock_emit.assert_called()
signal_name = mock_emit.call_args[0][0]
call_exhibit = mock_emit.call_args[0][1]
self.assertEqual(signal_name, "show-exhibits-clicked")
self.assertEqual(call_exhibit.click_url, "http://example.com")
def test_exhibit_with_featured_exhibit(self):
""" regression test for bug #1023777 """
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca,
[lobbyview.FeaturedExhibit()])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
# add the banners
self.lobby._append_banner_ads()
# fake click
alloc = self.lobby.exhibit_banner.get_allocation()
mock_event = Mock()
mock_event.x = alloc.x
mock_event.y = alloc.y
with patch.object(self.lobby, 'emit') as mock_emit:
self.lobby.exhibit_banner.on_button_press(None, mock_event)
self.lobby.exhibit_banner.on_button_release(None, mock_event)
mock_emit.assert_called()
signal_name = mock_emit.call_args[0][0]
call_category = mock_emit.call_args[0][1]
self.assertEqual(signal_name, "category-selected")
self.assertEqual(call_category.name, "Our star apps")
class HtmlRendererTestCase(unittest.TestCase):
def test_multiple_images(self):
downloader = ObjectWithSignals()
downloader.download_file = lambda *args, **kwargs: downloader.emit(
"file-download-complete", downloader, os.path.basename(args[0]))
with patch("softwarecenter.ui.gtk3.widgets.exhibits."
"SimpleFileDownloader", lambda: downloader):
renderer = _HtmlRenderer()
mock_exhibit = Mock()
mock_exhibit.banner_urls = [
"http://example.com/path1/banner1.png",
"http://example.com/path2/banner2.png",
]
mock_exhibit.html = "url('/path1/banner1.png')#"\
"url('/path2/banner2.png')"
renderer.set_exhibit(mock_exhibit)
# assert the stuff we expected to get downloaded got downloaded
self.assertEqual(
renderer._downloaded_banner_images,
["banner1.png", "banner2.png"])
# test that the path mangling worked
self.assertEqual(
mock_exhibit.html, "url('banner1.png')#url('banner2.png')")
if __name__ == "__main__":
unittest.main()
|
jakeh12/hackisu2018-flopper | basestation/track_chunk.py | Python | mit | 264 | 0.003788 | from helpers import HexArrayToDecimal
from track_data import TrackDa | ta
class TrackChunk(object):
def __init__(self, raw_data):
self.raw_data = raw_data
self.length = HexArrayToDecimal(raw_data[4:8])
self.d | ata = TrackData(raw_data[8:])
|
mikeireland/pymfe | doc/conf.py | Python | mit | 9,303 | 0.005912 | # -*- coding: utf-8 -*-
#
# pymfe documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 11 12:35:11 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_ | sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named | 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymfe'
copyright = u'2015, Michael J. Ireland'
author = u'Michael J. Ireland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymfedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pymfe.tex', u'pymfe Documentation',
u'Michael J. Ireland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is |
Kronuz/Xapiand | contrib/python/cuuid/mertwis.py | Python | mit | 495 | 0 | # -*- coding: utf-8 -*-
# Mersenne Twister implementation
from _random import Random
import six
class MT19937(object):
def __init__(self, seed):
mt = [0] * | 624
mt[0] = p = seed & 0xffffffff
for mti in six.moves.range(1, 624):
mt[mti] = p = (1812433253 * (p ^ (p >> 30)) + mti) & 0xffffffff
mt.append | (624)
self.random = Random()
self.random.setstate(tuple(mt))
def __call__(self):
return self.random.getrandbits(32)
|
hcrlab/access_teleop | cse481wi18/perception/src/perception/mock_camera.py | Python | mit | 858 | 0 | import rosbag
from sensor_msgs.msg import PointCloud2
def pc_filter(topic, datatype, md5sum, msg_def, header):
if datatype == 'sensor_msgs/PointCloud2':
return True
return False
class MockCamera(object):
"""A MockCamera reads saved point clouds.
"""
def __init__(self):
pass
def read_cloud(se | lf, path):
"""Returns the sensor_msgs/PointCloud2 in the given bag file.
Args:
path: string, the path to a bag file with a single
sensor_msgs/PointCloud2 in it.
Returns: A sensor_msgs/PointCloud2 message, or None if there were no
| PointCloud2 messages in the bag file.
"""
bag = rosbag.Bag(path)
for topic, msg, time in bag.read_messages(connection_filter=pc_filter):
return msg
bag.close()
return None
|
TuSimple/simpledet | config/ms_r50v1_fpn_1x.py | Python | apache-2.0 | 10,314 | 0.003975 | from symbol.builder import add_anchor_to_arg
from models.FPN.builder import MSRAResNet50V1FPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
from models.msrcnn.builder import MaskScoringFasterRcnn as Detector
from models.msrcnn.builder import MaskFPNRpnHead as RpnHead
from models.msrcnn.builder import MaskFasterRcnn4ConvHead as MaskHead
from models.maskrcnn.builder import BboxPostProcessor
from models.maskrcnn.process_output import process_output
from models.msrcnn.builder import MaskIoUConvHead as MaskIoUHead
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class MaskParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
resolution = 28
dim_reduced = 256
num_fg_roi = int(RpnParam.subsample_proposal.image_roi * RpnParam.subsample_proposal.fg_fraction)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
img_roi = 1000
class MaskRoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 14
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
img_roi = 100
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
mult = 1
begin_epoch = 0
end_epoch = 6 * mult
lr_iter = [60000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: process_output(x, y)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam, MaskParam)
| roi_extractor = | RoiExtractor(RoiParam)
mask_roi_extractor = RoiExtractor(MaskRoiParam)
bbox_head = BboxHead(BboxParam)
mask_head = MaskHead(BboxParam, MaskParam, MaskRoiParam)
bbox_post_processer = BboxPostProcessor(TestParam)
maskiou_head = MaskIoUHead(TestParam, BboxParam, MaskParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, maskiou_head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, maskiou_head, bbox_post_processer)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet-v1-50"
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
excluded_param = ["mask_fcn"]
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
# data processing
class NormParam:
mean = (122.7717, 115.9465, 102.9801) # RGB order
std = (1.0, 1.0, 1.0)
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
max_len_gt_poly = 2500
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage, Pad2DImage
from models.maskrcnn.input import PreprocessGtPoly, EncodeGtPoly, \
Resize2DImageBboxMask, Flip2DImageBboxMask, Pad2DImageBboxMask
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(No |
jd23/py-deps | lib/python/snakefood/flatten.py | Python | gpl-2.0 | 535 | 0.003738 | """
Read a snakefood dependencies file and output the list of all files.
"""
# This file i | s part of the Snakefood open source package.
# See http://furius.ca/snakefood/ for licensing details.
import sys
from os.path import join
from snakefood.depends import read_depends, flatten_depends
def main():
import optparse
parser = optparse.OptionParser(__doc__.strip())
opts, args = parser.parse_args()
depends = read_depends(sys.stdin)
for droot, drel in flatten_depends(depends):
print join(droot, | drel)
|
dennisdarwis/dugem-backend | api/serializers.py | Python | apache-2.0 | 3,888 | 0.005916 | from datetime import datetime
from rest_framework import serializers
from rest_framework.settings import api_settings
from api.models import VenueList, EventList
class VenueListSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
venue_name = serializers.CharField(max_length=255, allow_blank=False)
venue_url = serializers.CharField(max_length=255, allow_blank=False)
venue_address = serializers.CharField(max_length=255, allow_blank=False)
venue_lat_long = serializers.CharField(max_length=255, allow_blank=False)
venue_contact = serializers.CharField(max_length=255, allow_blank=False)
venue_details = serializers.CharField(max_length=255, allow_blank=False)
venue_city = serializers.CharField(max_length=255, allow_blank=False)
def create(self, validated_data):
"""
Create and return a new `Snippet` instance, given the validated data.
"""
return VenueList.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.venue_name = validated_data.get('venue_name', instance.venue_name)
instance.venue_url = validated_data.get('venue_url', instance.venue_url)
instance.venue_address = validated_data.get('venue_address', instance.venue_address)
instance.venue_lat_long = validated_data.get('venue_lat_long', instance.venue_lat_long)
instance.venue_contact = valida | ted_data.get('venue_contact', instance.venue_contact)
instance.venue_details = validated_data.get('venu | e_details', instance.venue_details)
instance.venue_city = validated_data.get('venue_city', instance.venue_city)
instance.save()
return instance
class EventListSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
venue_id = serializers.IntegerField(allow_null=False)
event_name = serializers.CharField(max_length=255, allow_blank=False)
event_price = serializers.DecimalField(allow_null=False, max_digits=6, decimal_places=2)
event_detail = serializers.CharField(max_length=255, allow_blank=False)
#time_start_end = serializers.CharField(max_length=255, allow_blank=False)
event_time_start = serializers.TimeField(format="%H:%M", input_formats=None)
event_time_end = serializers.TimeField(format="%H:%M", input_formats=None)
event_url = serializers.CharField(max_length=255, allow_blank=False)
event_img_url = serializers.CharField(max_length=255, allow_blank=False)
event_date_time = serializers.DateField(allow_null=False)
def create(self, validated_data):
return EventList.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.venue_id = validated_data.get('venue_id', instance.venue_id)
instance.event_name = validated_data.get('event_name', instance.event_name)
instance.event_price = validated_data.get('event_price', instance.event_price)
instance.event_detail = validated_data.get('event_detail', instance.event_detail)
#instance.time_start_end = validated_data.get('time_start_end', instance.time_start_end)
instance.event_time_start = validated_data.get('event_time_start', instance.event_time_start)
instance.event_time_end = validated_data.get('event_time_end', instance.event_time_end)
instance.event_url = validated_data.get('event_url', instance.event_url)
instance.event_img_url = validated_data.get('event_img_url', instance.event_img_url)
instance.event_date_time = validated_data.get('event_date_time', instance.event_date_time)
instance.save()
return instance |
Zolertia/openthread | tests/scripts/thread-cert/Cert_5_2_05_AddressQuery.py | Python | bsd-3-clause | 5,385 | 0.000557 | #!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
ED1 = 1
BR1 = 2
LEADER = 3
ROUTER2 = 4
REED = 5
ED2 = 6
ED3 = 7
class Cert_5_2_5_AddressQuery(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,8):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[BR1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[REED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[BR1].set_panid(0xface)
self.nodes[BR1].set_mode('rsdn')
self.nodes[BR1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[BR1].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[BR1].enable_whitelist()
self.nodes[BR1].set_router_selection_jitter(1)
self.nodes[ED1].set_panid(0xface)
self.nodes[ED1].set_mode('rsn')
self.nodes[ED1].add_whitelist(self.nodes[BR1].get_addr64())
self.nodes[ED1].enable_whitelist()
self.nodes[REED].set_panid(0xface)
self.nodes[REED].set_mode(' | rsdn')
self.nodes[REED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[REED].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[REED].set_router_upgrade_threshold(0)
self.nodes[REED].enable_whitelist()
self.nodes[ROUTER2].set_panid(0xface)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
se | lf.nodes[ROUTER2].add_whitelist(self.nodes[REED].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[ED2].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[ED3].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[ED2].set_panid(0xface)
self.nodes[ED2].set_mode('rsn')
self.nodes[ED2].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ED2].enable_whitelist()
self.nodes[ED3].set_panid(0xface)
self.nodes[ED3].set_mode('rsn')
self.nodes[ED3].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ED3].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[BR1].start()
time.sleep(5)
self.assertEqual(self.nodes[BR1].get_state(), 'router')
self.nodes[BR1].add_prefix('2001:2:0:3::/64', 'paros')
self.nodes[BR1].add_prefix('2001:2:0:4::/64', 'paros')
self.nodes[BR1].register_netdata()
self.nodes[ED1].start()
time.sleep(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[REED].start()
time.sleep(5)
self.assertEqual(self.nodes[REED].get_state(), 'child')
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ED2].start()
time.sleep(5)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[ED3].start()
time.sleep(5)
self.assertEqual(self.nodes[ED3].get_state(), 'child')
addrs = self.nodes[REED].get_addrs()
for addr in addrs:
if addr[0:4] != 'fe80':
self.assertTrue(self.nodes[ED2].ping(addr))
time.sleep(1)
if __name__ == '__main__':
unittest.main()
|
Flamacue/pretix | src/tests/base/test_settings.py | Python | apache-2.0 | 11,458 | 0.000785 | from datetime import date, datetime, time
from decimal import Decimal
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.utils.timezone import now
from i18nfield.strings import LazyI18nString
from pretix.base import settings
from pretix.base.models import Event, Organizer, User
from pretix.base.settings import SettingsSandbox
from pretix.control.forms.global_settings import GlobalSettingsObject
class SettingsTestCase(TestCase):
def setUp(self):
settings.DEFAULTS['test_default'] = {
'default': 'def',
'type': str
}
self.global_settings = GlobalSettingsObject()
self.global_settings.settings._flush()
self.organizer = Organizer.objects.create(name='Dummy', slug='dummy')
self.organizer.settings._flush()
self.event = Event.objects.create(
organizer=self.organizer, name='Dummy', slug='dummy',
date_from=now(),
)
self.event.settings._flush()
def test_global_set_explicit(self):
self.global_settings.settings.test = 'foo'
self.assert | Equal(s | elf.global_settings.settings.test, 'foo')
# Reload object
self.global_settings = GlobalSettingsObject()
self.assertEqual(self.global_settings.settings.test, 'foo')
def test_organizer_set_explicit(self):
self.organizer.settings.test = 'foo'
self.assertEqual(self.organizer.settings.test, 'foo')
# Reload object
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.assertEqual(self.organizer.settings.test, 'foo')
def test_event_set_explicit(self):
self.event.settings.test = 'foo'
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.event.settings.test, 'foo')
def test_event_set_twice(self):
self.event.settings.test = 'bar'
self.event.settings.test = 'foo'
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.event.settings.test, 'foo')
def test_organizer_set_on_global(self):
self.global_settings.settings.test = 'foo'
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.organizer.settings.test, 'foo')
# Reload object
self.global_settings = GlobalSettingsObject()
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.organizer.settings.test, 'foo')
def test_event_set_on_global(self):
self.global_settings.settings.test = 'foo'
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.global_settings = GlobalSettingsObject()
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
def test_event_set_on_organizer(self):
self.organizer.settings.test = 'foo'
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
def test_event_override_organizer(self):
self.organizer.settings.test = 'foo'
self.event.settings.test = 'bar'
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
# Reload object
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
def test_event_override_global(self):
self.global_settings.settings.test = 'foo'
self.event.settings.test = 'bar'
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
# Reload object
self.global_settings = GlobalSettingsObject()
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
def test_default(self):
self.assertEqual(self.global_settings.settings.test_default, 'def')
self.assertEqual(self.organizer.settings.test_default, 'def')
self.assertEqual(self.event.settings.test_default, 'def')
self.assertEqual(self.event.settings.get('nonexistant', default='abc'), 'abc')
def test_default_typing(self):
self.assertIs(type(self.event.settings.get('nonexistant', as_type=Decimal, default=0)), Decimal)
def test_item_access(self):
self.event.settings['foo'] = 'abc'
self.assertEqual(self.event.settings['foo'], 'abc')
del self.event.settings['foo']
self.assertIsNone(self.event.settings['foo'])
def test_delete(self):
self.organizer.settings.test = 'foo'
self.event.settings.test = 'bar'
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
del self.event.settings.test
self.assertEqual(self.event.settings.test, 'foo')
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.event.settings.test, 'foo')
del self.organizer.settings.test
self.assertIsNone(self.organizer.settings.test)
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.assertIsNone(self.organizer.settings.test)
def test_serialize_str(self):
self._test_serialization('ABC', as_type=str)
def test_serialize_float(self):
self._test_serialization(2.3, float)
def test_serialize_int(self):
self._test_serialization(2, int)
def test_serialize_datetime(self):
self._test_serialization(now(), datetime)
def test_serialize_time(self):
self._test_serialization(now().time(), time)
def test_serialize_date(self):
self._test_serialization(now().date(), date)
def test_serialize_decimal(self):
self._test_serialization(Decimal('2.3'), Decimal)
def test_serialize_dict(self):
self._test_serialization({'a': 'b', 'c': 'd'}, dict)
def test_serialize_list(self):
self._test_serialization([1, 2, 'a'], list)
def test_serialize_lazyi18nstring(self):
self._test_serialization(LazyI18nString({'de': 'Hallo', 'en': 'Hello'}), LazyI18nString)
def test_serialize_bool(self):
self._test_serialization(True, bool)
self._test_serialization(False, bool)
def test_serialize_bool_implicit(self):
self.event.settings.set('test', True)
self.event.settings._flush()
self.assertIs(self.event.settings.get('test', as_type=None), True)
self.event.settings.set('test', False)
self.event.settings._flush()
self.assertIs(self.event.settings.get('test', as_type=None), False)
def test_serialize_versionable(self):
self._test_serialization(self.event, Event)
def test_serialize_model(self):
self._test_serialization(User.objects.create_user('dummy@dummy.dummy', 'dummy'), User)
def test_serialize_unknown(self):
class Type:
pass
try:
self._test_serialization(Type(), Type)
self.assertTrue(False, 'No exception thrown!')
except TypeError:
pass
def test_serialize_file(self):
val = SimpleUploadedFile("sample_invalid_image.jpg", b"file_content", content_type="image/jpeg")
default_storage.save(val.name, val)
val.close()
self.event.settings.set('test', val)
s |
saukrIppl/seahub | seahub/api2/endpoints/share_links.py | Python | apache-2.0 | 9,155 | 0.000983 | import logging
from constance import config
from dateutil.relativedelta import relativedelta
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.utils import timezone
from django.utils.translation import ugettext as _
from seaserv import seafile_api
from pysearpc import SearpcError
from seahub.api2.utils import api_error
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.share.models import FileShare, OrgFileShare
from seahub.utils import gen_shared_link, is_org_context
from seahub.views import check_folder_permission
logger = logging.getLogger(__name__)
def get_share_link_info(fileshare):
data = {}
token = fileshare.token
data['repo_id'] = fileshare.repo_id
data['path'] = fileshare.path
data['ctime'] = fileshare.ctime
data['view_cnt'] = fileshare.view_cnt
data['link'] = gen_shared_link(token, fileshare.s_type)
data['token'] = token
data['expire_date'] = fileshare.expire_date
data['is_expired'] = fileshare.is_expired()
data['username'] = fileshare.username
return data
class ShareLinks(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def _can_generate_shared_link(self, request):
return request.user.permissions.can_generate_shared_link()
def _generate_obj_id_and_type_by_path(self, repo_id, path):
file_id = seafile_api.get_file_id_by_path(repo_id, path)
if file_id:
return (file_id, 'f')
dir_id = seafile_api.get_dir_id_by_path(repo_id, path)
if dir_id:
return (dir_id, 'd')
return (None, None)
def get(self, request):
""" get share links.
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check if args invalid
repo_id = request.GET.get('repo_id', None)
if repo_id:
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# repo level permission check
if not check_folder_permission(request, repo_id, '/'):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
path = request.GET.get('path', None)
if path:
try:
obj_id, s_type = self._generate_obj_id_and_type_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not obj_id:
if s_type == 'f':
error_msg = 'file %s not found.' % path
elif s_type == 'd':
error_msg = 'folder %s not found.' % path
else:
error_msg = 'path %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# folder/path permission check
if not check_folder_permission(request, repo_id, path):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
username = request.user.username
fileshares = FileShare.objects.filter(username=username)
# filter result by args
if repo_id:
fileshares = filter(lambda fs: fs.repo_id == repo_id, fileshares)
if path:
if s_type == 'd' and path[-1] != '/':
path = path + '/'
fileshares = filter(lambda fs: fs.path == path, fileshares)
result = []
for fs in fileshares:
link_info = get_share_link_info(fs)
result.append(link_info)
if len(result) == 1:
result = result[0]
return Response(result)
def post(self, request):
""" create share link.
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
repo_id = request.data.get('repo_id', None)
if not repo_id:
error_msg = 'repo_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
path = request.data.get('path', None)
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
obj_id, s_type = self._generate_obj_id_and_type_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not obj_id:
if s_type == 'f':
error_msg = 'file %s not found.' % path
elif s_type == 'd':
error_msg = 'folder %s not found.' % path
else:
error_msg = 'path %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if not check_folder_permission(request, repo_id, path):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
password = request.data.get('password', None)
if password and len(password) < config.SHARE_LINK_PASSWORD_MIN_LENGTH:
error_msg = _('Password is too short.')
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
expire_days = int(request.data.get('expire_days', 0))
except ValueError:
expire_days = 0
if expire_days <= 0:
expire_date = None
else:
expire_date = timezone.now() + relativedelta(days=expire_days)
username = request.user.username
if s_type == 'f':
fs = FileShare.objects.get_file_link_by_path(username, repo_id, path)
if not fs:
fs = File | Share.objects.create_file_link(username, repo_id, path,
| password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
elif s_type == 'd':
fs = FileShare.objects.get_dir_link_by_path(username, repo_id, path)
if not fs:
fs = FileShare.objects.create_dir_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
link_info = get_share_link_info(fs)
return Response(link_info)
class ShareLink(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def _can_generate_shared_link(self, request):
return request.user.permissions.can_generate_shared_link()
def get(self, request, token):
try:
fs = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
error_msg = 'token %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
link_info = get_share_link_info(fs)
|
woogers/volatility | volatility/plugins/registry/registryapi.py | Python | gpl-2.0 | 12,244 | 0.009801 | # Volatility
# Copyright (C) 2008-2013 Volatility Foundation
# Copyright (C) 2011 Jamie Levy (Gleeda) <jamie.levy@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Jamie Levy (Gleeda)
@license: GNU General Public License 2.0
@contact: jamie.levy@gmail.com
@organization: Volatility Foundation
"""
import volatility.win32.hive as hivemod
import volatility.win32.rawreg as rawreg
import volatility.win32.hashdump as hashdump
import volatility.utils as utils
import volatility.plugins.registry.hivelist as hl
from heapq import nlargest
class RegistryApi(object):
"""A wrapper several highly used Registry functions"""
def __init__(self, config):
self._config = config
self.addr_space = utils.load_as(self._config)
self.all_offsets = {}
self.current_offsets = {}
self.populate_offsets()
def print_offsets(self):
'''
this is just in case we want to check our offsets and which hive(s) was/were chosen
'''
for item in self.all_offsets:
print "0x{0:x}".format(item), self.all_offsets[item]
for item in self.current_offsets:
print 'current', "0x{0:x}".format(item), self.current_offsets[item]
def populate_offsets(self):
'''
get all hive offsets so we don't have to scan again...
'''
hive_offsets = []
hiveroot = hl.HiveList(self._config).calculate()
for hive in hiveroot:
if hive.obj_offset not in hive_offsets:
hive_offsets.append(hive.obj_offset)
try:
name = hive.FileFullPath.v() or hive.FileUserName.v() or hive.HiveRootPath.v() or "[no name] | "
# What exception are we expecting here?
except:
name = "[no name]"
self.all_offsets[hive.obj_offset] = name
def reg_get_currentcontrolset(self, fullname = True):
'''
get the CurrentControlSet
If fullname is not specified, we only get the number like "1" or "2" etc
The default is ControlSet00{#} so we can append it to the desired key path
We return None if it fails, so you | need to verify before using.
'''
for offset in self.all_offsets:
name = self.all_offsets[offset] + " "
if name.lower().find("\\system ") != -1:
sysaddr = hivemod.HiveAddressSpace(self.addr_space, self._config, offset)
if fullname:
return "ControlSet00{0}".format(hashdump.find_control_set(sysaddr))
else:
return hashdump.find_control_set(sysaddr)
return None
def set_current(self, hive_name = None, user = None):
'''
if we find a hive that fits the given criteria, save its offset
so we don't have to scan again. this can be reset using reset_current
if context changes
'''
for item in self.all_offsets:
name = self.all_offsets[item] + " "
if user == None and hive_name == None:
#no particular preference: all hives
self.current_offsets[item] = name
elif user != None and name.lower().find('\\' + user.lower() + '\\') != -1 and name.lower().find("\\" + "ntuser.dat ") != -1:
#user's NTUSER.DAT hive
self.current_offsets[item] = name
elif hive_name != None and hive_name.lower() == 'hklm' \
and (name.lower().find("\\security ") != -1 or name.lower().find("\\system ") != -1 \
or name.lower().find("\\software ") != -1 or name.lower().find("\\sam ") != -1):
#any HKLM hive
self.current_offsets[item] = name
elif hive_name != None and name.lower().find("\\" + hive_name.lower() + " ") != -1 and user == None:
#a particular hive indicated by hive_name
if hive_name.lower() == "system" and name.lower().find("\\syscache.hve ") == -1:
self.current_offsets[item] = name
elif hive_name.lower() != "system":
self.current_offsets[item] = name
def reset_current(self):
'''
this is in case we switch to a different hive/user/context
'''
self.current_offsets = {}
def reg_get_key(self, hive_name, key, user = None, given_root = None):
'''
Returns a key from a requested hive; assumes this is from a single hive
if more than one hive is specified, the hive/key found is returned
'''
if self.all_offsets == {}:
self.populate_offsets()
if self.current_offsets == {}:
self.set_current(hive_name, user)
if key:
for offset in self.current_offsets:
if given_root == None:
h = hivemod.HiveAddressSpace(self.addr_space, self._config, offset)
root = rawreg.get_root(h)
else:
root = given_root
if root != None:
k = rawreg.open_key(root, key.split('\\'))
if k:
return k
return None
def reg_yield_key(self, hive_name, key, user = None, given_root = None):
'''
Use this function if you are collecting keys from more than one hive
'''
if self.all_offsets == {}:
self.populate_offsets()
if self.current_offsets == {}:
self.set_current(hive_name, user)
if key:
for offset in self.current_offsets:
name = self.current_offsets[offset]
if given_root == None:
h = hivemod.HiveAddressSpace(self.addr_space, self._config, offset)
root = rawreg.get_root(h)
else:
root = given_root
if root != None:
k = rawreg.open_key(root, key.split('\\'))
if k:
yield k, name
def reg_enum_key(self, hive_name, key, user = None):
'''
This function enumerates the requested key
'''
k = self.reg_get_key(hive_name, key, user)
if k:
for s in rawreg.subkeys(k):
if s.Name:
item = key + '\\' + s.Name
yield item
def reg_get_all_subkeys(self, hive_name, key, user = None, given_root = None):
'''
This function enumerates the subkeys of the requested key
'''
k = given_root if given_root != None else self.reg_get_key(hive_name, key)
if k:
for s in rawreg.subkeys(k):
if s.Name:
yield s
def reg_yield_values(self, hive_name, key, thetype = None, given_root = None):
'''
This function yields all values for a requested registry key
'''
if key:
h = given_root if given_root != None else self.reg_get_key(hive_name, key)
if h != None:
for v in rawreg.values(h):
tp, dat = rawreg.value_data(v)
if thetype == None or tp == thetype:
yield v.Name, dat
def reg_get_value(self, hive_name, key, value, strcmp = None, given_root = None):
'''
This function returns the requested value of a registry key
'''
if key and value:
h = given_root if given_root |
opentrials/opentrials-airflow | dags/operators/postgres_to_s3_transfer.py | Python | mpl-2.0 | 3,091 | 0.001618 | from urllib.parse import urlparse
import subprocess
impo | rt logging
im | port boto3
import airflow.hooks.base_hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import utils.helpers as helpers
class PostgresToS3Transfer(BaseOperator):
'''Dumps a Postgres database to a S3 key
:param url: URL to download. (templated)
:type url: str
:param postgres_conn_id: Postgres Connection's ID.
:type postgres_conn_id: str
:param tables: List of tables to export (optional, default exports all
tables).
:type tables: list of str
:param s3_conn_id: S3 Connection's ID. It needs a JSON in the `extra` field
with `aws_access_key_id` and `aws_secret_access_key`
:type s3_conn_id: str
:param s3_url: S3 url (e.g. `s3://my_bucket/my_key.zip`) (templated)
:type s3_url: str
'''
template_fields = ('s3_url',)
@apply_defaults
def __init__(self, postgres_conn_id, s3_conn_id, s3_url, tables=None, *args, **kwargs):
super(PostgresToS3Transfer, self).__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.tables = tables
self.s3_conn_id = s3_conn_id
self.s3_url = s3_url
def execute(self, context):
s3 = self._load_s3_connection(self.s3_conn_id)
s3_bucket, s3_key = self._parse_s3_url(self.s3_url)
command = [
'pg_dump',
'-Fc',
]
if self.tables:
tables_params = ['--table={}'.format(table) for table in self.tables]
command.extend(tables_params)
logging.info('Dumping database "%s" into "%s"', self.postgres_conn_id, self.s3_url)
logging.info('Command: %s <POSTGRES_URI>', ' '.join(command))
command.append(helpers.get_postgres_uri(self.postgres_conn_id))
with subprocess.Popen(command, stdout=subprocess.PIPE).stdout as dump_file:
s3.Bucket(s3_bucket) \
.upload_fileobj(dump_file, s3_key)
@staticmethod
def _parse_s3_url(s3_url):
parsed_url = urlparse(s3_url)
if not parsed_url.netloc:
raise airflow.exceptions.AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def _load_s3_connection(self, conn_id):
'''
Parses the S3 connection and returns a Boto3 resource.
This should be implementing using the S3Hook, but it currently uses
boto (not boto3) which doesn't allow streaming.
:return: Boto3 resource
:rtype: boto3.resources.factory.s3.ServiceResource
'''
conn = airflow.hooks.base_hook.BaseHook.get_connection(conn_id)
extra_dejson = conn.extra_dejson
key_id = extra_dejson['aws_access_key_id']
access_key = extra_dejson['aws_secret_access_key']
s3 = boto3.resource(
's3',
aws_access_key_id=key_id,
aws_secret_access_key=access_key
)
return s3
|
sputnick-dev/weboob | contrib/plugin.video.videoobmc/resources/lib/test/common_test.py | Python | agpl-3.0 | 2,860 | 0.002105 | # -*- coding: utf-8 -*-
from __future__ import print_function
import urllib
def get_addon():
pass
def get_translation(key):
translation = {'30000': 'Recherche',
'30001': 'Recherche :',
'30100': 'Télécharger',
'30110': 'Information',
'30200': 'Erreur!',
'30300': 'Information',
'30301': 'Lancement du téléchargement',
'30302': 'Fichier téléchargé avec succès',
'30551': 'Debut de la mise à jour',
'30552': 'Weboob est maintenant à jour'}
return translation.get(key)
def get_addon_dir():
return '/home/benjamin'
def get_settings(key):
settings = {'downloadPath': get_addon_dir(),
'nbVideoPerBackend': '0',
'nsfw': 'False'}
return settings.get(key)
def display_error(error):
print("%s: %s" % ("ERROR", error))
def display_info(msg):
print("%s: %s" % ("INFO", msg))
def parse_params(paramStr):
paramDic = {}
# Parameters are on the 3rd arg passed to the script
if len(paramStr) > 1:
paramStr = paramStr.replace('?', '')
# Ignore last char if it is a '/'
if paramStr[len(paramStr) - 1] == '/':
paramStr = paramStr[0:len(paramStr) - 2]
# Processing each parameter splited on '&'
for param in paramStr.split('&'):
try:
# Spliting couple key/value
key, value = param.split('=')
except:
key = param
value = ''
key = urllib.unquote_plus(key)
value = urllib.unquote_plus(value)
# Filling dictionnary
paramDic[key] = value
return paramDic
def ask_user(content, title):
return raw_input(title)
def create_param_url(paramsDic, quote_plus=False):
#url = sys.argv[0]
url = ''
sep = '?'
try:
for param in paramsDic:
if quote_plus:
url = url + sep + urllib.quote_plus(param) + '=' + urllib.quot | e_plus(paramsDic[param])
else:
url = "%s%s%s=%s" % (url, sep, param, paramsDic[param])
sep = '&'
except Exception as msg:
display_error("create_param_url %s" % msg)
url = None
return url
def add_menu_item(params={}):
| print('%s => "%s"' % (params.get('name'), create_param_url(params)))
def add_menu_link(params={}):
print('[%s] %s (%s)' % (params.get('id'), params.get('name'), params.get('url')))
#print params.get('itemInfoLabels')
#print params.get('c_items')
def end_of_directory(update=False):
print('******************************************************')
def download_video(url, name, dir='./'):
print('Downlaod a video %s from %s' % (name, url))
|
ebursztein/SiteFab | SiteFab/parser/mistune.py | Python | gpl-3.0 | 35,424 | 0.000056 | # coding: utf-8
"""
mistune
~~~~~~~
The fastest markdown parser in pure Python with renderer feature.
:copyright: (c) 2014 - 2016 by Hsiaoming Yang.
"""
import re
import inspect
__version__ = '0.7.3'
__author__ = 'Hsiaoming Yang <me@lepture.com>'
__all__ = [
'BlockGrammar', 'BlockLexer',
'InlineGrammar', 'InlineLexer',
'Renderer', 'Markdown',
'markdown', 'escape',
]
_key_pattern = re.compile(r'\s+')
_nonalpha_pattern = re.compile(r'\W')
_escape_pattern = re.compile(r'&(?!#?\w+;)')
_newline_pattern = re.compile(r'\r\n|\r')
_block_quote_leading_pattern = re.compile(r'^ *> ?', flags=re.M)
_block_code_leading_pattern = re.compile(r'^ {4}', re.M)
_inline_tags = [
'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn', 'abbr', 'data',
'time', 'code', 'var', 'samp', 'kbd', 'sub', 'sup', 'i', 'b', 'u', 'mark',
'ruby', 'rt', 'rp', 'bdi', 'bdo', 'span', 'br', 'wbr', 'ins', 'del',
'img', 'font',
]
_pre_tags = ['pre', 'script', 'style']
_valid_end = r'(?!:/|[^\w\s@]*@)\b'
_valid_attr = r'''\s*[a-zA-Z\-](?:\=(?:"[^"]*"|'[^']*'|\d+))*'''
_block_tag = r'(?!(?:%s)\b)\w+%s' % ('|'.join(_inline_tags), _valid_end)
_scheme_blacklist = ('javascript:', 'vbscript:')
def _pure_pattern(regex):
pattern = regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
return pattern
def _keyify(key):
return _key_pattern.sub(' ', key.lower())
def escape(text, quote=False, smart_amp=True):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences.
The original cgi.escape will always escape "&", but you can control
this one for a smart escape amp.
:param quote: if set to True, " and ' will be escaped.
:param smart_amp: if set to False, & will always be escaped.
"""
if smart_amp:
text = _escape_pattern.sub('&', text)
else:
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
if quote:
text = text.replace('"', '"')
text = text.replace("'", ''')
return text
def escape_link(url):
"""Remove dangerous URL schemes like javascript: and escape afterwards."""
lower_url = url.lower().strip('\x00\x1a \n\r\t')
for scheme in _scheme_blacklist:
if lower_url.startswith(scheme):
return ''
return escape(url, quote=True, smart_amp=False)
def preprocessing(text, tab=4):
text = _newline_pattern.sub('\n', text)
text = text.expandtabs(tab)
text = text.replace('\u00a0', ' ')
text = text.replace('\u2424', '\n')
pattern = re.compile(r'^ +$', re.M)
return pattern.sub('', text)
class BlockGrammar(object):
"""Grammars for block level tokens."""
def_links = re.compile(
r'^ *\[([^^\]]+)\]: *' # [key]:
r'<?([^\s>]+)>?' # <link> or link
r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)'
)
def_footnotes = re.compile(
r'^\[\^([^\]]+)\]: *('
r'[^\n]*(?:\n+|$)' # [^key]:
r'(?: {1,}[^\n]*(?:\n+|$))*'
r')'
)
newline = re.compile(r'^\n+')
block_code = re.compile(r'^( {4}[^\n]+\n*)+')
fences = re.compile(
r'^ *(`{3,}|~{3,}) *(\S+)? *\n' # ```lang
r'([\s\S]+?)\s*'
r'\1 *(?:\n+|$)' # ```
)
hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)')
heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)')
lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)')
block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+')
list_block = re.compile(
r'^( *)([*+-]|\d+\.) [\s\S]+?'
r'(?:'
r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule
r'|\n+(?=%s)' # def links
r'|\n+(?=%s)' # def footnotes
r'|\n{2,}'
r'(?! )'
r'(?!\1(?:[*+-]|\d+\.) )\n*'
r'|'
r'\s*$)' % (
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
)
)
list_item = re.compile(
r'^(( *)(?:[*+-]|\d+\.) [^\n]*'
r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)',
flags=re.M
)
list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +')
paragraph = re.compile(
r'^((?:[^\n]+\n?(?!'
r'%s|%s|%s|%s|%s|%s|%s|%s|%s'
r'))+)\n*' % (
_pure_pattern(fences).replace(r'\1', r'\2'),
_pure_pattern(list_block).replace(r'\1', r'\3'),
_pure_pattern(hrule),
_pure_pattern(heading),
_pure_pattern(lheading),
_pure_pattern(block_quote),
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
'<' + _block_tag,
)
)
block_html = re.compile(
r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
r'<!--[\s\S]*?-->',
r'<(%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_block_tag, _valid_attr),
r'<%s(?:%s)*?\s*\/?>' % (_block_tag, _valid_attr),
)
)
table = re.compile(
r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
)
nptable = re.compile(
r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*'
)
text = re.compile(r'^[^\n]+')
class BlockLexer(object):
"""Block level lexer for block grammars."""
grammar_class = BlockGrammar
default_rules = [
'newline', 'hrule', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'block_quote',
'list_block', 'block_html', 'def_links',
'def_footnotes', 'table', 'paragraph', 'text'
]
list_rules = (
'newline', 'block_code', 'fences', 'lheading', 'hrule',
'block_quote', 'list_block', 'block_html', 'text',
)
footnote_rules = (
'newline', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'hrule', 'block_quote',
'list_block', 'block_html', 'table', 'paragraph', 'text'
)
def __init__(self, rules=None, **kwargs):
self.tokens = []
self.def_links = {}
self.def_footnotes = {}
if not rules:
rules = self.grammar_class()
self.rules = rules
def __call__(self, text, rules=None):
return self.parse(text, rules)
def parse(self, text, rules=None):
text = text.rstrip('\n')
if not r | ules:
rules = self.default_rules
def manipulate(text):
for key in rules:
rule = getattr(self.rules, key)
m = rul | e.match(text)
if not m:
continue
getattr(self, 'parse_%s' % key)(m)
return m
return False # pragma: no cover
while text:
m = manipulate(text)
if m is not False:
text = text[len(m.group(0)):]
continue
if text: # pragma: no cover
raise RuntimeError('Infinite loop at: %s' % text)
return self.tokens
def parse_newline(self, m):
length = len(m.group(0))
if length > 1:
self.tokens.append({'type': 'newline'})
def parse_block_code(self, m):
# clean leading whitespace
code = _block_code_leading_pattern.sub('', m.group(0))
self.tokens.append({
'type': 'code',
'lang': None,
'text': code,
})
def parse_fences(self, m):
self.tokens.append({
'type': 'code',
'lang': m.group(2),
'text': m.group(3),
})
def parse_heading(self, m):
self.tokens.append({
'type': 'heading',
'level': len(m.group(1)),
'text': m.group(2),
})
def parse_lheading(self, m):
"""Parse setext heading."""
self.tokens.append({
'type': 'heading',
'level': 1 if m.group(2) == '=' else 2,
'text': m.group(1),
})
def parse_hrule(self, m):
self.tokens.append({'type': 'hrule'})
def parse_list_block(self, m):
bull = m.group(2)
self.tokens.append({
'type': 'list_start',
'ordered': '.' in bull,
})
cap = m.group(0)
self._process_list_item |
jianghuaw/nova | nova/objects/service.py | Python | apache-2.0 | 22,275 | 0.000045 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import availability_zones
from nova import context as nova_context
from nova import db
from nova import exception
from nova.notifications.objects import base as notification
from nova.notifications.objects import service as service_notification
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 22
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
# time we bump the version, we will put an entry here to record the change,
# along with any pertinent data. For things that we can programatically
# detect that need a bump, we put something in _collect_things() below to
# assemble a dict of things we can check. For example, we pretty much always
# want to consider the compute RPC API version a thing that requires a service
# bump so that we can drive version pins from it. We could include other
# service RPC versions at some point, minimum object versions, etc.
#
# The TestServiceVersion test will fail if the calculated set of
# things differs from the value in the last item of the list below,
# indicating that a version bump is needed.
#
# Also note that there are other reasons we may want to bump this,
# which will not be caught by the test. An example of this would be
# triggering (or disabling) an online data migration once all services
# in the cluster are at the same level.
#
# If a version bump is required for something mechanical, just document
# that generic thing here (like compute RPC version bumps). No need to
# replicate the details from compute/rpcapi.py here. However, for more
# complex service interactions, extra detail should be provided
SERVICE_VERSION_HISTORY = (
# Version 0: Pre-history
{'compute_rpc': '4.0'},
# Version 1: Introduction of SERVICE_VERSION
{'compute_rpc': '4.4'},
# Version 2: Compute RPC version 4.5
{'compute_rpc': '4.5'},
# Version 3: Compute RPC version 4.6
{'compute_rpc': '4.6'},
# Version 4: Add PciDevice.parent_addr (data migration needed)
{'compute_rpc': '4.6'},
# Version 5: Compute RPC version 4.7
{'compute_rpc': '4.7'},
# Version 6: Compute RPC version 4.8
{'compute_rpc': '4.8'},
# Version 7: Compute RPC version 4.9
{'compute_rpc': '4.9'},
# Version 8: Compute RPC version 4.10
{'compute_rpc': '4.10'},
# Version 9: Compute RPC version 4.11
{'compute_rpc': '4.11'},
# Version 10: Compute node conversion to Inventories
{'compute_rpc': '4.11'},
# Version 11: Compute RPC version 4.12
{'compute_rpc': '4.12'},
# Version 12: The network APIs and compute manager support a NetworkRequest
# object where the network_id value is 'auto' or 'none'. BuildRequest
# objects are populated by nova-api during instance boot.
{'compute_rpc': '4.12'},
# Version 13: Compute RPC version 4.13
{'compute_rpc': '4.13'},
# Version 14: The compute manager supports setting device tags.
{'compute_rpc': '4.13'},
# Version 15: Indicate that nova-conductor will stop a boot if BuildRequest
# is deleted before RPC to nova-compute.
{'compute_rpc': '4.13'},
# Version 16: Indicate that nova-compute will refuse to start if it doesn't
# have a placement section configured.
{'compute_rpc': '4.13'},
# Version 17: Add 'reserve_volume' to the boot from volume flow and
# remove 'check_attach'. The service version bump is needed to fall back to
# the old check in the API as the old computes fail if the volume is moved
# to 'attaching' state by reserve.
{'compute_rpc': '4.13'},
# Version 18: Compute RPC version 4.14
{'compute_rpc': '4.14'},
# Version 19: Compute RPC version 4.15
{'compute_rpc': '4.15'},
# Version 20: Compute RPC version 4.16
{'compute_rpc': '4.16'},
# Version 21: Compute RPC version 4.17
{'compute_rpc': '4.17'},
# Version 22: A marker for the behaviour change of auto-healing code on the
# compute host regarding allocations against an instance
{'compute_rpc': '4.17'},
)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Service(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
# Version 1.5: ComputeNode version 1.6
# Version 1.6: ComputeNode version 1.7
# Version 1.7: ComputeNode version 1.8
# Version 1.8: ComputeNode version 1.9
# Version 1.9: ComputeNode version 1.10
# Version 1.10: Changes behaviour of loading compute_node
# Version 1.11: Added get_by_host_and_binary
# Version 1.12: ComputeNode version 1.11
# Version 1.13: Added last_seen_up
# Version 1.14: Added forced_down
# Version 1.15: ComputeNode version 1.12
# Version 1.16: Added version
# Version 1.17: ComputeNode version 1.13
# Version 1.18: ComputeNode version 1.14
# Version 1.19: Added get_minimum_v | ersion()
# Version 1.20: Added get_ | minimum_version_multi()
# Version 1.21: Added uuid
# Version 1.22: Added get_by_uuid()
VERSION = '1.22'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
'last_seen_up': fields.DateTimeField(nullable=True),
'forced_down': fields.BooleanField(),
'version': fields.IntegerField(),
}
_MIN_VERSION_CACHE = {}
_SERVICE_VERSION_CACHING = False
def __init__(self, *args, **kwargs):
# NOTE(danms): We're going against the rules here and overriding
# init. The reason is that we want to *ensure* that we're always
# setting the current service version on our objects, overriding
# whatever else might be set in the database, or otherwise (which
# is the normal reason not to override init).
#
# We also need to do this here so that it's set on the client side
# all the time, such that create() and save() operations will
# include the current service version.
if 'version' in kwargs:
raise exception.ObjectActionError(
action='init',
reason='Version field is immutable')
super(Service, self).__init__(*args, **kwargs)
self.version = SERVICE_VERSION
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
super(Service, self).obj_make_compatible_from_manifest(
primitive, target_version, version_manifest)
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 21) and 'uuid' in primitive:
del primitive['uuid']
if _target_version < (1, 16) and 'v |
nsdont/taiga-docker | events/conf.py | Python | mit | 216 | 0 | ho | st = "0.0.0.0"
secret_key = "mysecret"
repo_conf = {
"kwargs": {"dsn": "dbname=taiga"}
}
queue_conf = {
"path": "taiga_events.queues.pg.EventsQueue",
"kwargs": {
| "dsn": "dbname=taiga"
}
}
|
tusharmakkar08/TRIE_Data_Structure | serialize.py | Python | mit | 1,972 | 0.040061 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# serialize.py
#
# Copyright 2013 tusharmakkar08 <tusharmakkar08@tusharmakkar08-Satellite-C660>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it | will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA | 02110-1301, USA.
#
#
# Importing modules
import cPickle
_end = '_end_'
# Implementation of Trie using Dictionaries in python
def make_trie(words):
root=dict()
for i in words :
cd=root
for letter in i:
cd=cd.setdefault(letter,{})
cd=cd.setdefault(_end,_end)
return root
def in_trie(trie,word):
cd=trie
for letter in word:
if letter in cd:
cd=cd[letter]
else:
return False
else:
if _end in cd:
return True
else:
return False
try:
# Getting Data Back from Secondary file So avoids makin trie again and again
newt=open("result.txt",'r')
print "Fetching from secondary memory ... "
TR=cPickle.load(newt)
newt.close()
except IOError:
# Opening Data File
print "Making of trie"
List =open("20million.txt").readlines()
inp=[]
# Code to remove \n from the file
for i in List:
k=i.strip()
if k!='':
inp.append(k.lower())
TR=make_trie(inp)
# Dumping data into file
newres=open("result.txt",'w')
cPickle.dump(TR,newres)
newres.close()
while 1:
r=raw_input("Enter string or -1 to exit\n")
if r=="-1":
break
else:
kit=r.lower()
print in_trie(TR,kit)
|
postatum/nefertari-sqla | nefertari_sqla/fields.py | Python | apache-2.0 | 18,442 | 0.000108 | from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import Column, ForeignKey
# Since SQLAlchemy 1.0.0
# from sqlalchemy.types import MatchType
from .types import (
LimitedString,
LimitedText,
LimitedUnicode,
LimitedBigInteger,
LimitedInteger,
LimitedSmallInteger,
LimitedFloat,
LimitedNumeric,
LimitedUnicodeText,
DateTime,
Boolean,
Date,
Interval,
LargeBinary,
PickleType,
Time,
Choice,
Dict,
ChoiceArray,
)
class ProcessableMixin(object):
""" Mixin that allows running callables on a value that
is being set on a field.
"""
def __init__(self, *args, **kwargs):
""" Pop before/after validation processors
:before_validation: Processors that are run before session.flush()
:after_validation: Processors that are run after session.flush()
but before session.commit()
"""
self.before_validation = kwargs.pop('before_validation', ())
self.after_validation = kwargs.pop('after_validation', ())
super(ProcessableMixin, self).__init__(*args, **kwargs)
def apply_processors(self, instance, new_value,
before=False, after=False):
processors = []
if before:
processors += list(self.before_validation)
if after:
processors += list(self.after_validation)
for proc in processors:
new_value = proc(instance=instance, new_value=new_value)
return new_value
class BaseField(Column):
""" Base plain column that otherwise would be created as
sqlalchemy.Column(sqlalchemy.Type())
Attributes:
_sqla_type_cls: SQLAlchemy type class used to instantiate the column type.
_type_unchanged_kwargs: sequence of strings that represent arguments
received by `_sqla_type_cls`, the names of which have not been
changed. Values of field init arguments with these names will
be extracted from field init kwargs and passed to Type init
as is.
_column_valid_kwargs: sequence of string names of valid kwargs that
a Column may receive.
"""
_sqla_type_cls = None
_type_unchanged_kwargs = ()
_column_valid_kwargs = (
'name', 'type_', 'autoincrement', 'default', 'doc', 'key', 'index',
'info', 'nullable', 'onupdate', 'primary_key', 'server_default',
'server_onupdate', 'quote', 'unique', 'system', '_proxies')
def __init__(self, *args, **kwargs):
""" Responsible for:
* Filter out type-specific kwargs and init Type using these.
* Filter out column-slecific kwargs and init column using them.
* If `args` are provided, that means column proxy is being created.
In this case Type does not need to be created.
"""
type_args, type_kw, cleaned_kw = self.process_type_args(kwargs)
col_kw = self.process_column_args(cleaned_kw)
# Column proxy is created by declarative extension
if args:
col_kw['name'], col_kw['type_'] = args
# Column init when defining a schema
else:
col_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw)
super(BaseField, self).__init__(**col_kw)
def __setattr__(self, key, value):
""" Store column name on 'self.type'
This allows error messages in custom types' validation be more
explicit.
"""
if value is not None and key == 'name':
self.type._column_name = value
return super(BaseField, self).__setattr__(key, value)
def process_type_args(self, kwargs):
""" Process arguments of a sqla Type.
http://docs.sqlalchemy.org/en/rel_0_9/core/type_basics.html#generic-types
Process `kwargs` to extract type-specific arguments.
If some arguments' names should be changed, extend this method
with a manual args processor.
Returns:
* type_args: sequence of type-specific posional arguments
* type_kw: dict of type-specific kwargs
* cleaned_kw: input kwargs cleaned from type-specific args
"""
type_kw = dict()
type_args = ()
cleaned_kw = kwargs.copy()
for arg in self._type_unchanged_kwargs:
if arg in cleaned_kw:
type_kw[arg] = cleaned_kw.pop(arg)
return type_args, type_kw, cleaned_kw
def _drop_invalid_kwargs(self, kwargs):
""" Drop keys from `kwargs` that are not present in
`self._column_valid_kwargs`, thus are not valid kwargs to
be passed to Column.
"""
return {k: v for k, v in kwargs.items() if
k in self._column_valid_kwargs}
def process_column_args(self, kwargs):
""" Process/extract/rename Column arguments.
http://docs.sqlalchemy.org/en/rel_0_9/core/metadata.html#column-table-metadata-api
Changed:
required -> nullable
help_text -> doc
"""
col_kw = kwargs.copy()
col_kw['nullable'] = not col_kw.pop('required', False)
col_kw['doc'] = col_kw.pop('help_text', None)
col_kw = self._drop_invalid_kwargs(col_kw)
return col_kw
@property
def _constructor(self):
return self.__class__
class BigIntegerField(ProcessableMixin, BaseField):
_sqla_type_cls = LimitedBigInteger
_type_unchanged_kwargs = ('min_value', 'max_value')
class BooleanField(ProcessableMixin, BaseField):
_sqla_type_cls = Boolean
_type_unchanged_kwargs = ('create_constraint')
def process_type_args(self, kwargs):
"""
Changed:
constraint_name -> name
"""
type_args, type_kw, cleaned_kw = super(
BooleanField, self).process_type_args(kwargs)
type_kw.update({
'name': cleaned_kw.pop('constraint_name', None),
})
return type_args, type_kw, cleaned_kw
class DateField(ProcessableMixin, BaseField):
_sqla_type_cls = Date
_type_unchanged_kwargs = ()
class DateTimeField(ProcessableMixin, BaseField):
_sqla_type_cls = DateTime
_type_unchanged_kwargs = ('timezone',)
class ChoiceField(ProcessableMixin, BaseField):
_sqla_type_cls = Choice
_type_unchanged_kwargs = (
'collation', 'convert_unicode', 'unicode_error',
'_warn_on_bytestring', 'choices')
class FloatField(ProcessableMixin, BaseField):
_sqla_type_cls = LimitedFloat
_type_unchanged_kwargs = (
'precision', 'asdecimal', 'decimal_return_scale',
'min_value', 'max_value')
class IntegerField(ProcessableMixin, BaseField):
_sqla_type_cls = LimitedInteger
_type_unchanged_kwargs = ('min_value', 'max_value')
class IdField(IntegerField):
""" Just a subclass of IntegerField that must be used for fields
that represent database-specific 'id' field.
"""
pass
class IntervalField(ProcessableMixin, BaseField):
_sqla_type_cls = Interval
_type_unchanged_kwargs = (
'native', 'second_precision', 'day_precision')
class BinaryField(ProcessableMixin, BaseField):
_sqla_type_cls = LargeBinary
_type_unchanged_kw | args = ('length',)
# Since SQLAlchemy 1.0.0
# class MatchField(BooleanField):
# _sqla_type_cls = MatchType
class DecimalField(ProcessableMixin, BaseField):
_sqla_type_cls = LimitedNumeric
_type_unchanged_kwargs = (
'precision', 'scale', 'decimal_return_scale', 'asdecimal',
'min_value', 'max_value')
class PickleField(ProcessableMixin, BaseField):
| _sqla_type_cls = PickleType
_type_unchanged_kwargs = (
'protocol', 'pickler', 'comparator')
class SmallIntegerField(ProcessableMixin, BaseField):
_sqla_type_cls = LimitedSmallInteger
_type_unchanged_kwargs = ('min_value', 'max_value')
class StringField(ProcessableMixin, BaseField):
_sqla_type_cls = LimitedString
_type_unchanged_kwargs = (
'collation', 'convert_unicode', 'unicode_error',
'_warn_on_bytestring', 'min_length', 'max_length')
def process_type_args(self, kwargs):
"""
Changed:
max_ |
Eulercoder/fabulous | setup.py | Python | gpl-3.0 | 1,273 | 0.002357 | __author__ = 'vikesh'
import os
import sys
try:
from setuptools import | setup
except ImportError:
from distutils.core import setup
PYTHON3 = sys.version_info[0] > 2
required = []
if not PYTHON3:
required += ['importlib>=1.0.4']
packages = ['fabulous', 'fabulous.services']
try:
longdesc = open('README.md').read()
except:
longdesc = ''
setup(
name='fabulous',
version='0.0.1',
description='Answer to all your queries right inside Slack!',
author='Eulercoder',
author_email='hi@eulercoder.me',
url='https://github.co | m/Eulercoder/fabulous',
packages=packages,
scripts= ['bin/fabulous'],
package_data={'': ['LICENSE',], '': ['fabulous/services/*.py']},
include_package_data=True,
install_requires=required,
license='BSD-3-Clause',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
sckoh/cloudfire | python/cfcapp.py | Python | mit | 5,964 | 0.032864 | #
import time
from flask import Flask, json, request
from flask.app import setupmethod
from threading import Thread
class DaemonThread(Thread):
def start(self):
self.daemon = True
super(DaemonThread, self).start()
class WSConnection(object):
def __init__(self, app, wsid, fid):
self.app = app
self.fid = fid
self.wsid = wsid
def __repr__(self):
return '<WSConnection %s fid=..%s>' % (self.wsid, self.fid[-4:])
def created(self):
# do something when first connected
print "%r: created" % self
pass
def closed(self):
# do something after being closed, but cannot send messages at this point
print "%r: closed" % self
pass
def tx(self, msg):
self.app.tx(msg, conn=self)
class CFCContextVars(object):
# This class is put into the context of all templates as "CFC"
#
@property
def WEBSOCKET_URL(self):
" Provide a URL for the websocket to be used "
scheme = request.environ['wsgi.url_scheme']
return '%s://%s/__W__' % ('ws' if scheme == 'http' else 'wss', request.host)
class CFCFlask(Flask):
''' Extensions to Flask() object to support app needs for CFC frontend '''
#
# override this -- what is the domain we're associated with
# in the front end?
# lh = localhost/127.0.0.1
# none = no host given (ie. default)
# example.com = traffic for example.com
#
# Your app can still handle other traffic, but websocket stuff should be on these domains.
#
# how often to do websocket-level keepalive on sockets.
ping_rate = 15 # seconds
def __init__(self, *a, **kws):
# List of functions that want to receive data from websocket clients
# Extend this using the decorator app.ws_rx_handler
self.ws_rx_handlers = []
# map of all current connections
self.ws_connections = {}
# Domains we are implementing today; lowercase, canonical names only.
# you can still redirect www. variations and such, but don't include them
# in this list.
self.my_vhosts = kws.pop('vhosts', ['lh', 'none'])
# We need some threads. You can add yours too, by decorating with
# app.background_task
self.ws_background_tasks = [ self.pinger, self.rxer ]
super(CFCFlask, self).__init__(*a, **kws)
@self.context_processor
def extra_ctx():
return dict(CFC = CFCContextVars())
def pinger(self):
# Keep all connections alive with some minimal traffic
RDB = self.redis
#RDB.publish('bcast', 'RESTART')
while 1:
RDB.publish('bcast', 'PING')
time.sleep(self.ping_rate)
def rxer(self):
# Listen for all traffic from the clients to us. Forward upwards to app
RDB = self.redis
endpoints = ['rx|'+v for v in self.my_vhosts]
while 1:
# block on read from a few lists...
vhost, here = RDB.blpop(endpoints)
# name of list which provides the value is the vhost source
assert vhost.startswith('rx|')
vhost = vhost[3:]
assert vhost in self.my_vhosts, "Unexpended hostname: %s" % vhost
# This data from WS is already wrapped as JSON by LUA code. Trustable.
try:
here = json.loads(here)
except:
self.logger.error('Badly wrapped WS message? %s' % here, exc_info=1)
continue
assert 'fid' in here
assert 'wsid' in here
wsid = here['wsid']
fid = here['fid']
# Socket state changes will "state" set but not "msg"
if 'state' in here:
sc = here['state']
if sc == 'OPEN':
self.ws_new_connection(wsid, fid)
elif sc == 'CLOSE':
conn = self.ws_connections.pop(wsid, None)
if conn:
conn.closed()
# end of processing.
continue
assert 'msg' in here
conn = self.ws_connections.get(wsid, None)
if not conn:
# this will happen if you restart python while the nginx/lua stays up
self.logger.warn('Existing/unexpected WSID')
conn = self.ws_new_connection(wsid, fid)
# Important: do not trust "msg" here as it comes
# unverified from browser-side code. Could be nasty junk.
msg = here.get('msg', None)
if msg[0] == '{' and msg[-1] == '}':
# looks like json
try:
msg = json.loads(msg)
except:
self.logger.debug('RX[%s] got bad JSON: %r' % (vhost, msg))
for handler in self.ws_rx_handlers:
handler(vhost, conn, msg)
if not self.ws_rx_handlers:
self.logger.debug('RX[%s] %r' % (vhost, msg))
def ws_new_connection(self, wsid, fid):
''' New WS connection, track it.
'''
self.ws_connections[wsid] = rv = WSConnection(self, wsid, fid)
rv.created()
return rv
def tx(self, msg, conn=None, fid=None, wsid=None, bcast=False):
'''
Send a message via websocket to a specific browser, specific tab (wsid) or all
'msg' can be text, but should probably be JSON in most applications.
'''
assert conn or fid or wsid or bcast, "Must provide a destination"
if conn:
chan = 'wsid|' + conn.wsid
elif wsid:
chan = 'wsid|' + wsid
elif fid:
chan = 'fid|' + fid
elif bcast:
chan = 'bcast'
if not isinstance(msg, basestring):
# convert into json, if not already
msg = json.dumps(msg)
self.redis.publish(chan, msg)
def ws_close(self, wsid_or_conn):
'''
Close a specific web socket from server side.
LUA code detects this message and kills it's connection.
'''
self.tx('CLOSE', wsid=getattr(wsid_or_conn, 'wsid', wsid_or_conn))
def ws_kill(self, conn):
'''
Close all web sockets from server side; because user mis-behaved, and
also kill it's session on CFC. User will have to wait for javascript POW.
'''
self.tx('KILL', fid=conn.fid)
@setupmethod
def ws_rx_handler(self, f):
"""
Registers a function to be called when traffic is received via web sockets
"""
self.ws_rx_handlers.append(f)
return f
@setupmethod
def background_task(self, f):
"""
Registers a function to be run as a backgroun | d threa | d
"""
self.ws_background_tasks.append(f)
return f
def start_bg_tasks(self):
''' start long-lived background threads '''
for fn in self.ws_background_tasks:
DaemonThread(name=fn.__name__, target=fn, args=[]).start()
|
matevzmihalic/wlansi-store | wlansi_store/models.py | Python | agpl-3.0 | 1,854 | 0.007012 | from django.conf import settings
from django.db import models as django_models
from django.utils.translation import ugettext_lazy as _
from cms import models | as cms_models
from djangocms_utils import fields as cms_fields
from shop import models as shop_models
from shop.util import fields as shop_fields
from simple_translation import actions
CMSPLUGIN_BLOG_PLACEHOLDERS = getattr(settings, 'CMSPLUGIN_BLOG_PLACEHOLDERS', ('excerpt', 'content'))
class Product(shop_models.Product):
placeholders = cms_fields.M2MPlaceholderField(actions=actions.SimpleTranslationPlaceh | olderActions(), placeholders=CMSPLUGIN_BLOG_PLACEHOLDERS)
class Meta:
pass
def get_price(self):
if self.price_set.count() > 0:
return self.price_set.aggregate(django_models.Sum('price')).get('price__sum')
return self.unit_price
class ProductTitle(django_models.Model):
product = django_models.ForeignKey(Product)
language = django_models.CharField(max_length=2, choices=settings.LANGUAGES)
name = django_models.CharField(max_length=255)
slug = django_models.SlugField()
def __unicode__(self):
return self.name
class Meta:
unique_together = ('language', 'slug')
class Item(django_models.Model):
product = django_models.ForeignKey(Product)
item = django_models.CharField(max_length=255)
quantity = django_models.IntegerField(default=1)
has_nodewatcher_firmware = django_models.BooleanField()
class Price(django_models.Model):
product = django_models.ForeignKey(Product)
price = shop_fields.CurrencyField()
price_type = django_models.CharField(max_length=255, choices=((_('Purchase price'), _('Purchase price')), (_('Import tax'), _('Import tax')), ))
class ProductPlugin(cms_models.CMSPlugin):
product = django_models.ForeignKey(Product) |
ogrady/GoodMorning | util.py | Python | gpl-3.0 | 6,532 | 0.008114 | import pygame
import time
import functools
from pygame import locals
from threading import Thread
from enum import Enum
'''
Utility classes and exceptions.
version: 1.0
author: Daniel O'Grady
'''
# CONSTANTS
ALARMS_FILE = "alarms.json"
CONFIG_FILE = "config.cfg"
LOG_FILE = "goodmorning.log"
# CONFIG DEFAULTS
C_DEVELOPMENT = False
# CONFIG SECTIONS AND KEYS
CS_GENERAL = "GENERAL"
CK_DEVELOPMENT = "development"
CS_NETWORK = "NETWORK"
CK_HOST = "host"
CK_PORT = "port"
class GoodMorningException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class AudioException(GoodMorningException):
def __init__(self, message):
GoodMorningException.__init__(self, message)
class DisplayException(GoodMorningException):
def __init__(self, message):
GoodMorningException.__init__(self, message)
class AlarmException(GoodMorningException):
def __init__(self, message):
GoodMorningException.__init__(self, message)
class SchedulerException(GoodMorningException):
def __init__(self, message):
GoodMorningException.__init__(self, message)
class DispatcherException(GoodMorningException):
def __init__(self, message):
GoodMorningException.__init__(self, message)
class ConfigException(GoodMorningException):
def __init__(self, message):
GoodMorningException.__init__(self, message)
class CommandException(GoodMorningException):
def __init__(self, message):
GoodMorningException.__init__(self, message)
class Singleton:
'''
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Also, the decorated class cannot be
inherited from. Other than that, there are no restrictions that apply
to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
This is more or less taken from
https://stackoverflow.com/a/7346105
'''
def __init__(self, decorated):
self._decorated = decorated
@property
def instance(self):
'''
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
'''
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `instance`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
class Event(Enum):
'''
PyGame has this great thing where they use magic values
to queue all events into one big queue. And users are kindly
allowed to create custom events in a certain range. Screw them.
'''
KEYSTROKE = pygame.locals.USEREVENT + 1
SOUND_ENDED = pygame.locals.USEREVENT + 2
# This is a special kind of stupid!
# While PyGame allows custom event types, they require you
# to be between their reserved values USEREVENT and NUMEVENTS,
# which gives you 9 values in total.
# But sometimes you have no choice but to identify the source
# of an event via the event-type-id (mixer.set_endevent).
# So here you go. Making sure all our constants are valid,
# looking forward to conflicting constant values when a user
# chooses use more than 5 channels... smh
# FIXME: incorporate maximum channels in Scene.__init__
assert functools.reduce((lambda o,n: o and n), map(lambda x: pygame.locals.USEREVENT < x < pygame.locals.NUMEVENTS, [e.value for e in Event]), True) , "all user events must be between USEREVENT (%d) and NUMEVENTS (%d)" % (p | ygame.locals.USEREVENT, pygame.locals.NUMEVENTS)
class EventDispatcher(object):
'''
Listener-like dispatcher for arbitrary events.
Has a list of listeners one can register to.
Each dispatcher expects all of its listeners
to | have a certain method it dispatches events to.
'''
def __init__(self, notify_method):
self._listeners = []
self._notify_method = notify_method
def add_listener(self, listener):
if not hasattr(listener, self._notify_method):
raise DispatcherException("Listener of type '%s' does not offer a method named '%s'" % (type(listener), self._notify_method))
self._listeners.append(listener)
def remove_listener(self, listener):
self._listeners.remove(listener)
def clear_listeners(self):
self._listeners = []
def dispatch(self, event):
for l in self._listeners:
getattr(l, self._notify_method)(event)
@Singleton
class PygameEventListener(object):
def __init__(self):
TimeTicker.instance.dispatcher.add_listener(self)
self.running = True
self.dispatcher = EventDispatcher("on_pygame_event")
pygame.init()
def on_tick(self, elapsed):
try:
for e in pygame.event.get():
self.dispatcher.dispatch(e)
except:
# make sure the loop keeps running even if pygame errors out!
# Errors may occur due to not having any actualy display.
# But that would skip shutdown routines
# FIXME: dispatch as special event?
pass
def stop(self):
if self.running:
self.dispatcher.clear_listeners()
TimeTicker.instance.dispatcher.remove_listener(self)
@Singleton
class TimeTicker(Thread):
def __init__(self, delay = 0.5):
Thread.__init__(self, target = self.tick)
self.delay = delay
self.running = False
self.dispatcher = EventDispatcher("on_tick")
self.start()
def stop(self):
if self.running:
self.dispatcher.clear_listeners()
self.running = False
def tick(self):
self.running = True
timestamp = time.time()
while self.running:
time.sleep(self.delay)
now = time.time()
self.dispatcher.dispatch(now - timestamp)
timestamp = now
pygame.init()
|
isshe/Language | Python/20161127/11_unit_test_Dict.py | Python | gpl-3.0 | 868 | 0.008065 |
import unittest
from mydict import Dict
class TestDict(unittest.TestCase):
| def test_init(self):
d = Dict(a=1, b='test')
self.assertEqual(d.a, 1)
self | .assertEqual(d.b, 'test')
self.assertTrue(isinstance(d, dict))
def test_key(self):
d = Dict()
d['key'] = 'value'
self.assertEqual(d.key, 'value')
def test_attr(self):
d = Dict()
d.key = 'value'
self.assertTrue('key' in d)
self.assertTrue(d['key'], 'value')
def test_keyerror(self):
d = Dict()
with self.assertRaises(KeyError):
value = d['empty']
def test_attrerror(self):
d = Dict()
with self.assertRaises(AttributeError):
value = d.empty
if __name__ == '__main__':
unittest.main()
|
Triv90/SwiftUml | swift/common/middleware/bulk.py | Python | apache-2.0 | 17,862 | 0 | # Copyright (c) 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
from urllib import quote, unquote
from xml.sax import saxutils
from swift.common.swob import Request, HTTPBadGateway, \
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
wsgify
from swift.common.utils import json, TRUE_VALUES
from swift.common.constraints import check_utf8, MAX_FILE_SIZE
from swift.common.http import HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, \
HTTP_NOT_FOUND
from swift.common.constraints import MAX_OBJECT_NAME_LENGTH, \
MAX_CONTAINER_NAME_LENGTH
MAX_PATH_LENGTH = MAX_OBJECT_NAME_LENGTH + MAX_CONTAINER_NAME_LENGTH + 2
class CreateContainerError(Exception):
def __init__(self, msg, status_int, status):
self.status_int = status_int
self.status = status
Exception.__init__(self, msg)
ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
'text/xml']
def get_response_body(data_format, data_dict, error_list):
"""
Returns a properly formatted response body according to format.
:params data_format: resulting format
:params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
"""
if data_format == 'text/plain':
output = ''
for key in sorted(data_dict.keys()):
output += '%s: %s\n' % (key, data_dict[key])
output += 'Errors:\n'
output += '\n'.join(
['%s, %s' % (name, status)
for name, status in error_list])
return output
if data_format == 'application/json':
data_dict['Errors'] = error_list
return json.dumps(data_dict)
if data_format.endswith('/xml'):
output = '<?xml version="1.0" encoding="UTF-8"?>\n<delete>\n'
for key in sorted(data_dict.keys()):
xml_key = key.replace(' ', '_').lower()
output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key)
output += '<errors>\n'
output += '\n'.join(
['<object>'
'<name>%s</name><status>%s</status>'
'</object>' % (saxutils.escape(name), status) for
name, status in error_list])
output += '</errors>\n</delete>\n'
return output
raise HTTPNotAcceptable('Invalid output type')
class Bulk(object):
"""
Middleware that will do many operations on a single request.
Extract Archive:
Expand tar files into a swift account. Request must be a PUT with the
query parameter ?extract-archive=format specifying the format of archive
file. Accepted formats are tar, tar.gz, and tar.bz2.
For a PUT to the following url:
/v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
container, a pseudo-directory within a container, or an empty string. The
destination of a file in the archive will be built as follows:
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
Where FILE_PATH is the file name from the listing in the tar file.
If the UPLOAD_PATH is an empty string, containers will be auto created
accordingly and files in the tar that would not map to any container (files
in the base directory) will be ignored.
Only regular files will be uploaded. Empty directories, symlinks, etc will
not be uploaded.
If all valid files were uploaded successfully will return an HTTPCreated
response. If any files failed to be created will return an HTTPBadGateway
response. In both cases the response body will specify the number of files
successfully uploaded and a list of the files that failed. The return body
will be formatted in the way specified in the request's Accept header.
Acceptable formats are text/plain, application/json, application/xml, and
text/xml.
There are proxy logs created for each file (which becomes a subrequest) in
the tar. The subrequest's proxy log will have a swift.source set to "EA"
the log's content length will reflect the unzipped size of the file. If
double proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the unexpanded size of the tar.gz).
Bulk Delete:
Will delete multiple objects or containers from their account with a
single request. Responds to DELETE requests with query parameter
?bulk-delete set. The Content-Type should be set to text/plain.
The body of the DELETE request will be a newline separated list of url
encoded objects to delete. You can only delete 1000 (configurable) objects
per request. The objects specified in the DELETE request body must be URL
encoded and in the form:
/container_name/obj_name
or for a container (which must be empty at time of delete)
/container_name
If all items were successfully deleted (or did not exist), will return an
HTTPOk. If any failed to delete, will return an HTTPBadGateway. In
both cases the response body will specify the number of items
successfully deleted, not found, and a list of those that failed.
The return body will be formatted in the way specified in the request's
Accept header. Acceptable formats are text/plain, application/json,
application/xml, and text/xml.
There are proxy logs created for each object or container (which becomes a
subrequest) that is deleted. The subrequest's proxy log will have a
swift.source set to "BD" the log's content length of 0. If double
proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the list of objects/containers to be deleted).
"""
def __init__(self, app, conf):
self.app = app
self.max_containers = int(
conf.get('max_containers_per_extraction', 10000))
self.max_failed_extractions = int(
conf.get('max_failed_extractions', 1000))
self.max_deletes_per_request = int(
conf.get('max_deletes_per_request', 1000))
def create_container(self, req, container_path):
"""
Makes a subrequest to create a new container.
:params container_path: an unquoted path to a container to be created
:returns: None on success
:raises: CreateContainerError on creation error
"""
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
new_env['swift.source'] = 'EA'
create_cont_req = Request.blank(container_path, environ=new_env)
resp = create_cont_req.get_response(self.app)
if resp.status_int // 100 != 2:
raise CreateContainerError(
"Create Container Failed: " + container_path,
resp.status_int, resp.status)
def get_objs_to_delete(self, req):
"""
Will populate objs_to_delete with data from request input.
:params req: a Swob request
:returns: a list of the contents of req.body when separated by newline.
:raises: HTTPException on failures
"""
line = ''
data_remaini | ng = True
objs_to_delete = []
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPBadRequest('Invalid request: no content sent.')
while data_remaining:
if len(objs_to_delete) > self.max_deletes_per_ | reques |
mnach/suds-py3k | suds/transport/options.py | Python | lgpl-3.0 | 2,197 | 0.002276 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for transport options.
"""
from suds.transport import *
from suds.properties import *
class Options(Skin):
"""
Options:
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- | B{headers} - Extra HTTP headers.
- type: I{dict}
- I{str} B{http} - The I{http} protocol proxy URL.
- I{str} B{https} - The I{https} protocol proxy URL.
- default: {}
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
| - B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('proxy', dict, {}),
Definition('timeout', (int,float), 90),
Definition('headers', dict, {}),
Definition('username', str, None),
Definition('password', str, None),
]
Skin.__init__(self, domain, definitions, kwargs) |
jedie/django-cms-tools | django_cms_tools/plugin_landing_page/app_settings.py | Python | gpl-3.0 | 878 | 0.005695 |
"""
:create: 2018 by Jens Diemer
:copyleft: 2018 by the django-cms-tools team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# Template used to render one landing page:
LANDING_PAGE_TEM | PLATE = g | etattr(settings, "LANDING_PAGE_TEMPLATE", "landing_page/landing_page.html")
# redirect user, e.g.: /en/langing_page/ -> /
LANDING_PAGE_HIDE_INDEX_VIEW = getattr(settings, "LANDING_PAGE_HIDE_INDEX_VIEW", True)
# Always expand toolbar or all links only if current page is the landing page app?
LANDING_PAGE_ALWAYS_ADD_TOOLBAR = getattr(settings, "LANDING_PAGE_ALWAYS_ADD_TOOLBAR", True)
# Menu Text in cms toolbar:
LANDING_PAGE_TOOLBAR_VERBOSE_NAME = getattr(settings, "LANDING_PAGE_TOOLBAR_VERBOSE_NAME", _("Landing Pages"))
|
wwj718/ANALYSE | common/djangoapps/terrain/stubs/lti.py | Python | agpl-3.0 | 12,436 | 0.002493 | """
Stub implementation of LTI Provider.
What is supported:
------------------
1.) This LTI Provider can service only one Tool Consumer at the same time. It is
not possible to have this LTI multiple times on a single page in LMS.
"""
from uuid import uuid4
import textwrap
import urllib
import re
from oauthlib.oauth1.rfc5849 import signature, parameters
import oauthlib.oauth1
import hashlib
import base64
import mock
import requests
from http import StubHttpRequestHandler, StubHttpService
class StubLtiHandler(StubHttpRequestHandler):
"""
A handler for LTI POST and GET requests.
"""
DEFAULT_CLIENT_KEY = 'test_client_key'
DEFAULT_CLIENT_SECRET = 'test_client_secret'
DEFAULT_LTI_ENDPOINT = 'correct_lti_endpoint'
DEFAULT_LTI_ADDRESS = 'http://127.0.0.1:{port}/'
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
Used for checking LTI Provider started correctly.
"""
self.send_response(200, 'This is LTI Provider.', {'Content-type': 'text/plain'})
def do_POST(self):
"""
Handle a POST request from the client and sends response back.
"""
if 'grade' in self.path and self._send_graded_result().status_code == 200:
status_message = 'LTI consumer (edX) responded with XML content:<br>' + self.server.grade_data['TC answer']
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_outcome' in self.path and self._send_lti2_outcome().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_delete' in self.path and self._send_lti2_delete().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
# Respond to request with correct lti endpoint
elif self._is_correct_lti_request():
params = {k: v for k, v in self.post_dict.items() if k != 'oauth_signature'}
if self._check_oauth_signature(params, self.post_dict.get('oauth_signature', "")):
status_message = "This is LTI tool. Success."
# Set data for grades what need to be stored as server data
if 'lis_outcome_service_url' in self.post_dict:
self.server.grade_data = {
'callback_url': self.post_dict.get('lis_outcome_service_url').replace('https', 'http'),
'sourcedId': self.post_dict.get('lis_result_sourcedid')
}
submit_url = '//{}:{}'.format(*self.server.server_address)
content = self._create_content(status_message, submit_url)
self.send_response(200, content)
else:
content = self._create_content("Wrong LTI signature")
self.send_response(200, content)
else:
content = self._create_content("Invalid request URL")
self.send_response(500, content)
def _send_graded_result(self):
"""
Send grade request.
"""
values = {
'textString': 0.5,
'sourcedId': self.server.grade_data['sourcedId'],
'imsx_messageIdentifier': uuid4().hex,
}
payload = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier> /
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{textString}</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
data = payload.format(**values)
url = self.server.grade_data['callback_url']
headers = {
'Content-Type': 'application/xml',
'X-Requested-With': 'XMLHttpRequest',
'Authorization': self._oauth_sign(url, data)
}
# Send request ignoring verifirecation of SSL certificate
res | ponse = requests.post(url, data=data, headers=headers, verify=False)
self.server.grade_data['TC answer'] = response.content
return response
def _send_lti2_outcome(self):
"""
Send a grade back to consumer
"""
payload = textwrap.dedent("""
{{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result",
"resultScore" : {score},
"comment" : "This is | awesome."
}}
""")
data = payload.format(score=0.8)
return self._send_lti2(data)
def _send_lti2_delete(self):
"""
Send a delete back to consumer
"""
payload = textwrap.dedent("""
{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result"
}
""")
return self._send_lti2(payload)
def _send_lti2(self, payload):
"""
Send lti2 json result service request.
"""
### We compute the LTI V2.0 service endpoint from the callback_url (which is set by the launch call)
url = self.server.grade_data['callback_url']
url_parts = url.split('/')
url_parts[-1] = "lti_2_0_result_rest_handler"
anon_id = self.server.grade_data['sourcedId'].split(":")[-1]
url_parts.extend(["user", anon_id])
new_url = '/'.join(url_parts)
content_type = 'application/vnd.ims.lis.v2.result+json'
headers = {
'Content-Type': content_type,
'Authorization': self._oauth_sign(new_url, payload,
method='PUT',
content_type=content_type)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.put(new_url, data=payload, headers=headers, verify=False)
self.server.grade_data['status_code'] = response.status_code
self.server.grade_data['TC answer'] = response.content
return response
def _create_content(self, response_text, submit_url=None):
"""
Return content (str) either for launch, send grade or get result from TC.
"""
if submit_url:
submit_form = textwrap.dedent("""
<form action="{submit_url}/grade" method="post">
<input type="submit" name="submit-button" value="Submit">
</form>
<form action="{submit_url}/lti2_outcome" method="post">
<input type="submit" name="submit-lti2-button" value="Submit">
</form>
<form action="{submit_url}/lti2_delete" method="post">
<input type="submit" name="submit-lti2-delete-button" value="Submit">
</form>
""").format(submit_url=submit_url)
else:
submit_form = ''
# Show roles only for LTI launch.
if self.post_dict.get('role |
ghchinoy/tensorflow | tensorflow/compiler/tests/sparse_to_dense_op_test.py | Python | apache-2.0 | 4,522 | 0.007077 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
feed_sparse_indices = array_ops.placeholder(dtypes.int32)
feed_dict = {feed_sparse_indices: sparse_indices}
return sparse_ops.sparse_to_dense(
feed_sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices).eval(feed_dict=feed_dict)
class SparseToDenseTest(xla_test.XLATestCase):
def testInt(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, 0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testFloat(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testSetValue(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1)
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testSetSingleValue(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, -1)
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
# pylint: disable=bad-whitespace
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1)
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
with self.sessi | on():
x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1)
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testDegenerateIndexMatrix(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[ | 2], [3], [4], [5], [6], [7], [8], [9]], [10],
[1, 2, 3, 4, 5, 6, 7, 8], -1)
self.assertAllClose([-1, -1, 1, 2, 3, 4, 5, 6, 7, 8], tf_ans)
def testBadShape(self):
with self.session(), self.test_scope():
with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [[5], [3]], -1)
def testBadNumValues(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [1, 2, 3], -1)
def testBadDefault(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError("default_value should be a scalar"):
_SparseToDense([1, 3], [5], [1, 2], [0])
if __name__ == "__main__":
test.main()
|
rhg/Qwibber | gwibber/microblog/plugins/statusnet/__init__.py | Python | gpl-2.0 | 9,075 | 0.012011 | import re
from gwibber.microblog import network, util
import gnomekeyring
from oauth import oauth
from gwibber.microblog.util import log, resources
from gettext import lgettext as _
log.logger.name = "StatusNet"
PROTOCOL_INFO = {
"name": "StatusNet",
"version": 1.1,
"config": [
"private:secret_token",
"access_token",
"username",
"site_display_name",
"url_prefix",
"color",
"receive_enabled",
"send_enabled",
],
"authtype": "oauth1a",
"color": "#4E9A06",
"features": [
"send",
"receive",
"search",
"tag",
"reply",
"responses",
"private",
"public",
"delete",
"retweet",
"like",
"send_thread",
"send_private",
"user_messages",
"sinceid",
],
"default_streams": [
"receive",
"images",
"responses",
"private",
"public",
],
}
class Client:
def __init__(self, acct):
if acct.has_key("url_prefix"):
pref = "" if acct["url_prefix"].startswith("http") else "https://"
self.url_prefix = pref + acct["url_prefix"]
if acct.has_key("secret_token") and acct.has_key("password"): acct.pop("password")
if not acct.has_key("url_prefix") and acct.has_key("domain"): acct.pop("domain")
self.account = acct
def _common(self, data):
m = {}
try:
m["mid"] = str(data["id"])
m["service"] = "statusnet"
m["account"] = self.account["id"]
m["time"] = util.parsetime(data["created_at"])
m["source"] = data.get("source", False)
m["text"] = data["text"]
m["to_me"] = ("@%s" % self.account["username"]) in data["text"]
m["html"] = util.linkify(m["text"],
((util.PARSE_HASH, '#<a class="hash" href="%s#search?q=\\1">\\1</a>' % self.account["url_prefix"]),
(util.PARSE_NICK, '@<a class="nick" href="%s/\\1">\\1</a>' % self.account["url_prefix"])))
m["content"] = util.linkify(m["text"],
((util.PARSE_HASH, '#<a class="hash" href="gwibber:/tag?acct=%s&query=\\1">\\1</a>' % m["account"]),
(util.PARSE_NICK, '@<a class="nick" href="gwibber:/user?acct=%s&name=\\1">\\1</a>' % m["account"])))
images = []
if data.get("attachments", 0):
for a in data["attachments"]:
mime = a.get("mimetype", "")
if mime and mime.startswith("image") and a.get("url", 0):
images.append({"src": a["url"], "url": a["url"]})
images.extend(util.imgpreview(m["text"]))
if images:
m["images"] = images
m["type"] = "photo"
except:
log.logger.error("%s failure - %s", PROTOCOL_INFO["name"], data)
return m
def _message(self, data):
m = self._common(data)
if data.has_key("in_reply_to_status_id"):
if data["in_reply_to_status_id"]:
m["reply"] = {}
m["reply"]["id"] = data["in_reply_to_status_id"]
m["reply"]["nick"] = data["in_reply_to_screen_name"]
m["reply"]["url"] = "/".join((self.account["url_prefix"], "notice", str(m["reply"]["id"])))
user = data.get("user", data.get("sender", 0))
m["sender"] = {}
m["sender"]["name"] = user["name"]
m["sender"]["nick"] = user["screen_name"]
m["sender"]["id"] = user["id"]
m["sender"]["location"] = user["location"]
m["sender"]["followers"] = user["followers_count"]
m["sender"]["image"] = user["profile_image_url"]
m["sender"]["url"] = "/".join((self.account["url_prefix"], m["sender"]["nick"]))
m["sender"]["is_me"] = m["sender"]["nick"] == self.account["username"]
m["url"] = "/".join((self.account["url_prefix"], "notice", m["mid"]))
return m
def _private(self, data):
m = self._message(data)
m["private"] = True
m["recipient"] = {}
m["recipient"]["name"] = data["recipient"]["name"]
m["recipient"]["nick"] = data["recipient"]["screen_name"]
m["recipient"]["id"] = data["recipient"]["id"]
m["recipient"]["image"] = data["recipient"]["profile_image_url"]
m["recipient"]["location"] = data["recipient"]["location"]
m["recipient"]["url"] = "/".join((self.account["url_prefix"], m["recipient"]["nick"]))
m["recipient"]["is_me"] = m["recipient"]["nick"].lower() | == self.account["username"].lower()
m["to_me"] = m["recipient"]["is_me"]
return m
def _result(self, data):
m = self._common(data)
if data["to_user_id"]:
m["reply"] = {}
m["reply"]["id"] = data["to_user_id"]
m["reply"]["nick"] = data["to_user"]
m["sender"] = {}
| m["sender"]["nick"] = data["from_user"]
m["sender"]["id"] = data["from_user_id"]
m["sender"]["image"] = data["profile_image_url"]
m["sender"]["url"] = "/".join((self.account["url_prefix"], m["sender"]["nick"]))
m["url"] = "/".join((self.account["url_prefix"], "notice", str(m["mid"])))
return m
def _get(self, path, parse="message", post=False, single=False, **args):
if not self.account.has_key("access_token") and not self.account.has_key("secret_token"):
log.logger.error("%s unexpected result - %s", PROTOCOL_INFO["name"], _("Account needs to be re-authorized"))
return [{"error": {"type": "auth", "account": self.account, "message": _("Account needs to be re-authorized")}}]
url = "/".join((self.account["url_prefix"], "api", path))
self.sigmethod = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.consumer = oauth.OAuthConsumer("anonymous", "anonymous")
self.token = oauth.OAuthToken(self.account["access_token"], self.account["secret_token"])
parameters = util.compact(args)
request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, self.token,
http_method=post and "POST" or "GET", http_url=url, parameters=parameters)
request.sign_request(self.sigmethod, self.consumer, self.token)
if post:
data = network.Download(request.to_url(), parameters, post).get_json()
else:
data = network.Download(request.to_url(), None, post).get_json()
resources.dump(self.account["service"], self.account["id"], data)
if isinstance(data, dict) and data.get("error", 0):
log.logger.error("%s failure - %s", PROTOCOL_INFO["name"], data["error"])
if "authenticate" in data["error"]:
return [{"error": {"type": "auth", "account": self.account, "message": data["error"]}}]
else:
return [{"error": {"type": "unknown", "account": self.account, "message": data["error"]}}]
elif isinstance(data, str):
log.logger.error("%s unexpected result - %s", PROTOCOL_INFO["name"], data)
return [{"error": {"type": "unknown", "account": self.account, "message": data}}]
if single: return [getattr(self, "_%s" % parse)(data)]
if parse: return [getattr(self, "_%s" % parse)(m) for m in data]
else: return []
return [self._result(m) for m in data]
def _search(self, **args):
data = network.Download("%s/api/search.json" % self.account["url_prefix"], util.compact(args))
data = data.get_json()
return [self._result(m) for m in data["results"]]
def __call__(self, opname, **args):
return getattr(self, opname)(**args)
def receive(self, count=util.COUNT, since=None):
return self._get("statuses/friends_timeline.json", count=count, since_id=since, source="Gwibber")
def user_messages(self, id=None, count=util.COUNT, since=None):
return self._get("statuses/user_timeline.json", id=id, count=count, since_id=since, source="Gwibber")
def responses(self, count=util.COUNT, since=None):
return self._get("statuses/mentions.json", count=count, since_id=since, source="Gwibber")
def private(self, count=util.COUNT, since=None):
private = self._get("direct_messages.json", "private", count=count, since_id=since, source="Gwibber") or []
private_sent = self._get("direct_messages/sent.json", "private", count=count, since_id=since, source="Gwibber") or []
return private + private_sent
def public(self, count=util.COUNT, since=None):
return self._get("statuses/public_timeline.json", source="Gwibber")
def search(self, query, count=util.COUNT, since=None):
return self._search(q=query, rpp=count, since_id=since, source="Gwibber")
def tag(self, query, count=util.COUNT, since=None):
return |
badboy99tw/mass | mass/scheduler/swf/__init__.py | Python | apache-2.0 | 12,898 | 0.001085 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module implements mass worker by AWS SWF.
"""
# built-in modules
from __future__ import print_function
from functools import reduce, wraps
from multiprocessing import Event, Process, Queue
import json
import signal
import socket
import sys
import time
import traceback
# 3rd-party modules
from botocore.client import Config
import boto3
# local modules
from mass.exception import TaskError, TaskWait
from mass.input_handler import InputHandler
from mass.scheduler.swf import config
from mass.scheduler.swf.decider import Decider
from mass.scheduler.swf.step import StepHandler, ChildWorkflowExecution, ActivityTask
from mass.scheduler.worker import BaseWorker
def get_priority(root, root_priority, target_index):
def count_max_serial_children(task):
result = None
if 'Action' in task:
result = 0
elif task['Task'].get('parallel', False):
result = max(
[count_max_serial_children(c) + 1
for c in task['Task']['children']])
else:
result = reduce(
lambda x, y: x + y,
[count_max_serial_children(c) + 1
for c in task['Task']['children']])
return result
type_ = [k for k in root.keys()][0]
is_parallel = root[type_].get('parallel', False)
priority = None
brothers = root[type_]['children'][:target_index]
if is_parallel: # parallel subtask
priority = root_priority + 1
elif not brothers:
priority = root_priority + 1
else:
priority = reduce(
lambda x, y: x + y,
[count_max_serial_children(b) + 1
for b in brothers]) + root_priority + 1
return priority
class SWFDecider(Decider):
def run(self, task_list):
"""Poll decision task from SWF and process.
"""
events = self.poll(task_list)
if not events:
return
self.handler = StepHandler(
events,
activity_max_retry=config.ACTIVITY_MAX_RETRY,
workflow_max_retry=config.WORKFLOW_MAX_RETRY)
try:
result = self.execute()
if self.handler.is_waiting():
raise TaskWait
except TaskWait:
self.suspend()
except TaskError:
_, error, _ = sys.exc_info()
self.fail(error.reason, error.details)
except:
_, error, _ = sys.exc_info()
self.fail(repr(error), json | .dumps(traceback.format_exc()))
else:
self.complete(result)
def execute(self):
"""Execute input of SWF workflow.
"""
type_ = 'Job' if 'Job' in self.handler.input else 'Task'
parallel = self.handler.input[type_].get('parallel', False)
for i, child | in enumerate(self.handler.input[type_]['children']):
priority = get_priority(self.handler.input, self.handler.priority, i)
if 'Task' in child:
self.execute_task(child, priority)
elif 'Action' in child and not child['Action']['_whenerror']:
self.execute_action(child, priority)
if not parallel:
self.wait()
if parallel:
for child in self.handler.input[type_]['children']:
self.wait()
def execute_task(self, task, priority):
"""Schedule task to SWF as child workflow and wait. If the task is not
completed, raise TaskWait.
"""
if self.handler.is_waiting():
raise TaskWait
elif self.handler.is_scheduled():
return
else:
handler = InputHandler(self.handler.protocol)
ChildWorkflowExecution.start(
decisions=self.decisions,
name=self.handler.get_next_workflow_name(task['Task']['title']),
input_data={
'protocol': self.handler.protocol,
'body': handler.save(
data=task,
genealogy=self.handler.tag_list + [task['Task']['title']])
},
tag_list=self.handler.tag_list + [task['Task']['title']],
priority=priority)
def execute_action(self, action, priority):
"""Schedule action to SWF as activity task and wait. If action is not
completed, raise TaskWait.
"""
if self.handler.is_waiting():
raise TaskWait
elif self.handler.is_scheduled():
return
else:
handler = InputHandler(self.handler.protocol)
action_name = self.handler.get_next_activity_name()
ActivityTask.schedule(
self.decisions,
name=action_name,
input_data={
'protocol': self.handler.protocol,
'body': handler.save(
data=action,
genealogy=self.handler.tag_list + ['Action%s' % action_name])
},
task_list=action['Action'].get('_role', config.ACTIVITY_TASK_LIST),
priority=priority
)
def fail(self, reason, details):
try:
type_ = 'Job' if 'Job' in self.handler.input else 'Task'
actions = filter(lambda c: 'Action' in c, self.handler.input[type_]['children'])
for i, child in enumerate(self.handler.input[type_]['children']):
if 'Action' not in child:
continue
if child['Action']['_whenerror'] is False:
continue
priority = get_priority(self.handler.input, self.handler.priority, i)
self.execute_action(child, priority)
self.wait()
except TaskWait:
self.suspend()
except TaskError:
_, error, _ = sys.exc_info()
super(SWFDecider, self).fail(
error.reason[:config.MAX_REASON_SIZE] if error.reason else error.reason,
error.details[:config.MAX_DETAIL_SIZE] if error.details else error.details)
except:
_, error, _ = sys.exc_info()
super(SWFDecider, self).fail(
repr(error)[:config.MAX_REASON_SIZE],
traceback.format_exc()[:config.MAX_DETAIL_SIZE])
else:
super(SWFDecider, self).fail(
reason[:config.MAX_REASON_SIZE] if reason else reason,
details[:config.MAX_DETAIL_SIZE] if details else details)
def wait(self):
"""Check if the next step could be processed. If the previous step
is submitted to SWF, processed and successful, return result.
"""
if self.decisions._data:
raise TaskWait
with self.handler.pop() as step:
if not step:
return
if step.status() in ['Failed', 'TimedOut']:
if step.should_retry():
step.retry(self.decisions)
raise TaskWait
else:
error = step.error()
step.is_checked = True
raise TaskError(error.reason, error.details)
else:
return step.result()
def execute_action_proc(execute, action, event, queue):
try:
start_time = time.time()
result = execute(action)
execution_time = time.time() - start_time
queue.put({
'status': 'completed',
'result': result,
'execution_time': execution_time
})
except TaskError as err:
queue.put({
'status': 'failed',
'reason': err.reason,
'details': err.details
})
except Exception as e:
_, exc_value, _ = sys.exc_info()
queue.put({
'status': 'failed',
'reason': repr(e),
'details': traceback.format_exc()
})
finally:
event.set()
class SWFWorker(BaseWorker):
def __init__(self, domain=None, region=None):
super(SWFWorker, self).__init__()
self.domain = |
lpantano/bcbio-nextgen | bcbio/variation/vardict.py | Python | mit | 15,244 | 0.004395 | """Sensitive variant calling using VarDict.
Defaults to using the faster, equally sensitive Java port:
https://github.com/AstraZeneca-NGS/VarDictJava
if 'vardict' or 'vardict-java' is specified in the configuration. To use the
VarDict perl version:
https://github.com/AstraZeneca-NGS/VarDict
specify 'vardict-perl'.
"""
import os
import itertools
import sys
import toolz as tz
import pybedtools
from bcbio import broad, utils
from bcbio.bam import highdepth
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bamprep, vcfutils
def _is_bed_file(target):
return target and isinstance(target, basestring) and os.path.isfile(target)
def _vardict_options_from_config(items, config, out_file, target=None):
opts = ["-c 1", "-S 2", "-E 3", "-g 4"]
# ["-z", "-F", "-c", "1", "-S", "2", "-E", "3", "-g", "4", "-x", "0",
# "-k", "3", "-r", "4", "-m", "8"]
resources = config_utils.get_resources("vardict", config)
if resources.get("options"):
opts += resources["options"]
assert _is_bed_file(target)
if any(tz.get_in(["config", "algorithm", "coverage_interval"], x, "").lower() == "genome"
for x in items):
target = shared.remove_highdepth_regions(target, items)
target = shared.remove_lcr_regions(target, items)
target = _enforce_max_region_size(target, items[0])
opts += [target] # this must be the last option
return opts
def _enforce_max_region_size(in_file, data):
"""Ensure we don't have any chunks in the region greater than 1Mb.
Larger sections have high memory usage on VarDictJava and failures
on VarDict. This creates minimum windows from the input BED file
to avoid these issues. Downstream VarDict merging sorts out any
variants across windows.
"""
max_size = 1e6
overlap_size = 250
def _has_larger_regions(f):
return any(r.stop - r.start > max_size for r in pybedtools.BedTool(f))
out_file = "%s-regionlimit%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
if _has_larger_regions(in_file):
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool().window_maker(w=max_size,
s=max_size - overlap_size,
b=pybedtools.BedTool(in_file)).saveas(tx_out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file
def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run VarDict variant calling.
"""
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _get_jvm_opts(data, out_file):
"""Retrieve JVM options when running the Java version of VarDict.
"""
if get_vardict_command(data) == "vardict-java":
resources = config_utils.get_resources("vardict", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts)
else:
return ""
def _run_vardict_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with VarDict.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
target = shared.subset_variant_regions(dd.get_variant_regions(items[0]), region,
out_file, do_merge=False)
num_bams = len(align_bams)
sample_vcf_names = [] # for individual sample names, given batch calling may be required
for bamfile, item in itertools.izip(align_bams, items):
# prepare commands
sample = dd.get_sample_name(item)
vardict = get_vardict_command(items[0])
strandbias = "teststrandbias.R"
var2vcf = "var2vcf_valid.pl"
opts = (" ".join(_vardict_options_from_config(items, config, out_file, target))
if _is_bed_file(target) else "")
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
coverage_interval = utils.get_in(config, ("algorithm", "coverage_interval"), "exome")
# for deep targeted panels, require 50 worth of coverage
var2vcf_opts = " -v 50 " if highdepth.get_median_coverage(items[0]) > 5000 else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
remove_dup = vcfutils.remove_dup_cl()
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
r_setup = "unset R_HOME && export PATH=%s:$PATH && " % os.path.dirname(utils.Rscript_cmd())
cmd = ("{r_setup}{jvm_opts}{vardict} -G {ref_file} -f {freq} "
"-N {sample} -b {bamfile} {opts} "
"| {strandbias}"
"| {var2vcf} -N {sample} -E -f {freq} {var2vcf_opts} "
"| {fix_ambig} | {remove_dup} | {vcfstreamsort} {compress_cmd}")
if num_bams > 1:
temp_file_prefix = out_file.replace(".gz", "").replace(".vcf", "") + item["name"][1]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if out_file.endswith("gz") else ""
sample_vcf_names.append(tmp_out)
with file_transaction(item, tmp_out) as tx_tmp_file:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample])
else:
cmd += " > {tx_tmp_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {}) |
else:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample])
else:
cmd | += " > {tx_out_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if num_bams > 1:
# N.B. merge_variant_files wants region in 1-based end-inclusive
# coordinates. Thus use bamprep.region_to_gatk
vcfutils.merge_variant_files(orig_files=sample_vcf_names,
out_file=tx_out_file, ref_file=ref_file,
config=config, region=bamprep.region_to_gatk(region))
out_file = (annotation.add_dbsnp(out_file, assoc_files["dbsnp"], config)
if assoc_files.get("dbsnp") else out_file)
return out_file
def _safe_to_float(x):
if x is None:
return None
else:
try:
return float(x)
except ValueError:
return None
def depth_freq_filter(line, tumor_index, aligner):
"""Command line to filter VarDict calls based on depth, frequency and quality.
Looks at regions with low depth for allele frequency (AF * DP < 6, the equivalent
of < 13bp for heterogygote calls, but generalized. Within these calls filters if a
calls has:
- Low mapping quali |
moyogo/vanilla | Lib/vanilla/vanillaBox.py | Python | mit | 4,498 | 0.002001 | from AppKit import NSBox, NSColor, NSFont, NSSmallControlSize, NSNoTitle, NSLineBorder, NSBoxSeparator
from vanilla.vanillaBase import VanillaBaseObject, _breakCycles, osVersionCurrent, osVersion10_10
class Box(VanillaBaseObject):
"""
A bordered container for other controls.
To add a control to a box, simply set it as an attribute of the box.::
from vani | lla import *
class BoxDemo(object):
def __init__(self):
| self.w = Window((150, 70))
self.w.box = Box((10, 10, -10, -10))
self.w.box.text = TextBox((10, 10, -10, -10), "This is a box")
self.w.open()
BoxDemo()
No special naming is required for the attributes. However, each attribute must have a unique name.
**posSize** Tuple of form *(left, top, width, height)* representing the position and size of the box.
**title** The title to be displayed dabove the box. Pass *None* if no title is desired.
"""
allFrameAdjustments = {
# Box does not have sizeStyle, but the
# adjustment is differeent based on the
# presence of a title.
"Box-Titled": (-3, -4, 6, 4),
"Box-None": (-3, -4, 6, 6)
}
nsBoxClass = NSBox
def __init__(self, posSize, title=None):
self._setupView(self.nsBoxClass, posSize)
if title:
self._nsObject.setTitle_(title)
if osVersionCurrent < osVersion10_10:
self._nsObject.titleCell().setTextColor_(NSColor.blackColor())
font = NSFont.systemFontOfSize_(NSFont.systemFontSizeForControlSize_(NSSmallControlSize))
self._nsObject.setTitleFont_(font)
else:
self._nsObject.setTitlePosition_(NSNoTitle)
def getNSBox(self):
"""
Return the *NSBox* that this object wraps.
"""
return self._nsObject
def _adjustPosSize(self, frame):
# skip subclasses
if self.__class__.__name__ == "Box":
pos = self._nsObject.titlePosition()
if pos != NSNoTitle:
title = "Titled"
else:
title = "None"
boxType = "Box-" + title
self.frameAdjustments = self.allFrameAdjustments[boxType]
return super(Box, self)._adjustPosSize(frame)
def _getContentView(self):
return self._nsObject.contentView()
def _breakCycles(self):
super(Box, self)._breakCycles()
view = self._getContentView()
if view is not None:
_breakCycles(view)
def setTitle(self, title):
"""
Set the title of the box.
"""
self._nsObject.setTitle_(title)
def getTitle(self):
"""
Get the title of the box.
"""
return self._nsObject.title()
class _Line(Box):
nsBoxClass = NSBox
def __init__(self, posSize):
self._setupView(self.nsBoxClass, posSize)
self._nsObject.setBorderType_(NSLineBorder)
self._nsObject.setBoxType_(NSBoxSeparator)
self._nsObject.setTitlePosition_(NSNoTitle)
class HorizontalLine(_Line):
"""
A horizontal line.::
from vanilla import *
class HorizontalLineDemo(object):
def __init__(self):
self.w = Window((100, 20))
self.w.line = HorizontalLine((10, 10, -10, 1))
self.w.open()
HorizontalLineDemo()
**posSize** Tuple of form *(left, top, width, height)* representing the position and size of the line.
+-------------------------+
| **Standard Dimensions** |
+---+---------------------+
| H | 1 |
+---+---------------------+
"""
def __init__(self, posSize):
super(HorizontalLine, self).__init__(posSize)
class VerticalLine(_Line):
"""
A vertical line.::
from vanilla import *
class VerticalLineDemo(object):
def __init__(self):
self.w = Window((80, 100))
self.w.line = VerticalLine((40, 10, 1, -10))
self.w.open()
VerticalLineDemo()
**posSize** Tuple of form *(left, top, width, height)* representing the position and size of the line.
+-------------------------+
| **Standard Dimensions** |
+---+---------------------+
| V | 1 |
+---+---------------------+
"""
def __init__(self, posSize):
super(VerticalLine, self).__init__(posSize)
|
candidtim/vagrant-appindicator | vgapplet/machineindex.py | Python | gpl-3.0 | 4,627 | 0.00389 | # Copyright 2014, candidtim (https://github.com/candidtim)
#
# This file is part of Vagrant AppIndicator for Ubuntu.
#
# Vagrant AppIndicator for Ubuntu is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the G | NU General Public License along with Foobar.
# If not, see <http://www.gnu.org/licenses/>.
'''
Parsers for Vagrant machine-index file
'''
import os
import json
from gi.repository import Gio as gio
__VAGRNAT_HOME_VAR = "VAGRANT_HOME"
__MACHINE_INDEX_PATH = "data/machine-index/index"
# module's public interfa | ce
class Machine(object):
def __init__(self, id, state, directory, name):
self.id = id
self.state = state
self.directory = directory
self.name = name
def isPoweroff(self):
return self.state == "poweroff"
def isRunning(self):
return self.state == "running"
def isSaved(self):
return self.state == "saved"
def __str__(self):
return "id=%s state=%s directory=%s name=%s" % \
(self.id, self.state, self.directory, self.name)
def __eq__(self, other):
return self.id == other.id
def _changed_state_since(self, other):
assert self == other
return self.state != other.state
class MachineIndexNotFoundError(Exception):
pass
def get_machineindex():
machineindex_path = _resolve_machineindex_path()
with open(machineindex_path, 'r') as machineindex_file:
return _parse_machineindex(machineindex_file)
def diff_machineindexes(new_index, old_index):
'''Returns tuple of 3 items:
(list of new machines, list of removed machines, list of machines that changed state)
'''
new_machines = [machine for machine in new_index if machine not in old_index]
removed_machines = [machine for machine in old_index if machine not in new_index]
changed_machines = [machine for machine in new_index
if machine in old_index and machine._changed_state_since(old_index[old_index.index(machine)])]
went_running = [machine for machine in changed_machines if machine.isRunning()]
return (new_machines, removed_machines, changed_machines)
active_monitors = {}
def subscribe(listener):
def on_machineindex_change(mon, f, o, event):
if event == gio.FileMonitorEvent.CHANGES_DONE_HINT:
listener(get_machineindex())
machineindex_path = _resolve_machineindex_path()
file_to_monitor = gio.File.new_for_path(machineindex_path)
monitor = file_to_monitor.monitor_file(gio.FileMonitorFlags.NONE, None)
handler_id = monitor.connect("changed", on_machineindex_change)
active_monitors[handler_id] = monitor
def unsubscribe_all():
global active_monitors
for handler_id in active_monitors:
monitor = active_monitors[handler_id]
monitor.disconnect(handler_id)
active_monitors = {}
# private implementation
def _resolve_machineindex_path():
vagrant_home = os.getenv(__VAGRNAT_HOME_VAR, "~/.vagrant.d")
machineindex_path = os.path.expanduser(os.path.join(vagrant_home, __MACHINE_INDEX_PATH))
if not os.path.isfile(machineindex_path):
raise MachineIndexNotFoundError(
"Vagrant machine index not found. Is Vagrant installed and at least one VM created?")
return machineindex_path
def _parse_machineindex(machineindex_file):
machineindex_json = json.load(machineindex_file)
version = machineindex_json["version"]
# currently, only one parser version is available:
parser = __MachineIndexParserV1()
return parser.parse(machineindex_json)
class __MachineIndexParser(object):
def parse(self, machineindex_json):
raise NotImplementedError()
class __MachineIndexParserV1(__MachineIndexParser):
def parse(self, machineindex_json):
machineindex = []
machines_json = machineindex_json["machines"]
for machine_id in machines_json:
machine_json = machines_json[machine_id]
machine = Machine(machine_id, machine_json["state"],
machine_json["vagrantfile_path"], machine_json["name"])
machineindex.append(machine)
return tuple(machineindex)
|
kubeflow/kfctl | py/kubeflow/kfctl/testing/pytests/kfctl_upgrade_test.py | Python | apache-2.0 | 583 | 0.006861 | import logging
import os
import pytest
from kubernetes import client as k8s_client
from kubeflow.kfctl.testing.util import kfctl_go_test_utils as kfctl_util
from | kubeflow.testing import util
def test_upgrade_kubeflow(record_xml_attribute, app_path, kfctl_path, upgrade_spec_path):
"""Test that we can run upgrade on a Kubeflow cluster.
Args:
app_path: The app dir of kubeflow deployment.
kfctl_path: The path to kfctl binary.
upgr | ade_spec_path: The path to the upgrade spec file.
"""
kfctl_util.kfctl_upgrade_kubeflow(app_path, kfctl_path, upgrade_spec_path)
|
vlegoff/mud | menu/validate_account.py | Python | bsd-3-clause | 1,181 | 0.000848 | """
This module contains the 'validate_account' menu node.
"""
from textwrap import dedent
from menu.character import _options_choose_characters
def validate_account(caller, input):
"""Prompt the user to enter the received validation code."""
text = ""
options = (
{
"key": "b",
"desc": "Go back to the e-mail address menu.",
"goto": "email_address",
},
{
"key": "_default",
"desc": "Enter the validation code.",
"goto": "validate_account",
},
)
player = caller.db._player
if player.db.validation_code != input.strip():
text = dedent("""
|rSorry, the specified validation code {} doesn't match
the one stored for this account. Is it the code you
received by e-mail? You can try to enter it again, or
enter |y | b|n to choose a different e-mail address.
""".strip("\n")).format(input.strip())
else:
player.db.valid = True
player.attributes.remove("validation_code")
text = ""
options = _options_choose_characters(player)
return text, optio | ns
|
Geosyntec/pygridtools | pygridtools/__init__.py | Python | bsd-3-clause | 140 | 0 | from .misc import *
from .core import *
from . impor | t iotoo | ls
from . import viz
from . import validate
from .tests import test, teststrict
|
avelino/bottle-boilerplate | setup.py | Python | mit | 1,673 | 0.000598 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from setuptools import setup
REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()]
classifiers = [
"Framework :: Bottle",
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development']
description = "Boilerplate code for new Bottle projects"
try:
long_description = open('README.rst').read()
except:
long_description = description
url = 'https://github.com/bottlepy/bottle-boilerplate'
setup(name='bottle-boilerplate',
version=0.3,
description=description,
long_description | =long_description,
classifiers=classifiers,
keywords='bottle boilerplate startproject doc',
author="Thiago Avelino",
author_email="thiago@avelino.xxx",
url=url,
download_url="{0}/tarball/master".format(url),
license="MIT",
install_requires=REQUIREMENTS,
entry_points={
'console_scripts': ["bottle-boilerplate = bottle_boilerplate:main"]
},
py_modules=['bottle_boilerplate'],
scripts=['bottle_b | oilerplate.py'],
include_package_data=True,
zip_safe=False)
|
xguse/easygv | easygv/cli/__init__.py | Python | mit | 5,459 | 0.000733 | #!/usr/bin/env python
"""Provide command line interface to easygv."""
# Imports
import logzero
from logzero import logger as log
import os
from pathlib import Path
import appdirs
from munch import Munch
import click
import graphviz as gv
from easygv.cli import config as _config
from easygv import easygv
# Metadata
__author__ = "Gus Dunn"
__email__ = "w.gus.dunn@gmail.com"
FACTORY_RESETS = (Path(os.path.realpath(__file__)).parent / 'factory_resets/').resolve()
USER_CONFIG_DIR = Path(appdirs.user_config_dir())
USER_APP_DIR = USER_CONFIG_DIR / 'easygv'
verbosity_levels = {'debug': 10,
'normal': 20,
'quiet': 30}
# logzero.loglevel(0)
# log.debug('debug')
# log.info('info')
# log.warning('warning')
# log.error('error')
@click.group()
@click.option('-v', '--verbosity',
type=click.Choice(verbosity_levels.keys()),
help="How much do you want to know about what I am doing?",
show_default=True,
default='normal')
@click.pass_context
def main(ctx=None, verbosity=None):
"""Command interface to easygv.
Define nodes and edges in an excel file and graph-style attributes in a yaml file with inheritence.
For command specific help text, call the specific
command followed by the --help option.
"""
ctx.obj = Munch()
# NOTE: Not sure I need to store this or just set it here.
ctx.obj.LOGLVL = verbosity_levels[verbosity]
logzero.loglevel(level=ctx.obj.LOGLVL, update_custom_handlers=True)
log.debug("loglevel set at: {lvl}".format(lvl=ctx.obj.LOGLVL))
@main.command()
@click.option('-g', '--generate-config',
is_flag=True,
help="Copy one or more of the 'factory default' config files to the users "
"config directory ({user_config_dir}). Back ups will be made of any existing config files.".format(user_config_dir=USER_APP_DIR),
show_default=True,
default=False)
@click.option('-k', '--kind',
type=click.Choice(['attrs']),
help="Which type of config should we replace?",
show_default=True,
| default='attrs')
@click.option('-p', '--prefix',
type=click.STRING,
help="""A prefix to identify the new config file(s).""", |
show_default=True,
default=None)
@click.pass_context
def config(ctx, generate_config, kind, prefix):
"""Manage configuration values and files."""
factory_resets = FACTORY_RESETS
default_files = {"attrs": factory_resets / 'attrs.yaml'}
if generate_config:
if kind == 'all':
for p in default_files['all']:
_config.replace_config(name=p.name,
factory_resets=factory_resets,
user_conf_dir=USER_APP_DIR,
prefix=prefix)
else:
p = default_files[kind]
_config.replace_config(name=p.name,
factory_resets=factory_resets,
user_conf_dir=USER_APP_DIR,
prefix=prefix)
draw_formats = ['all', 'pdf', 'png', 'svg']
draw_layouts = ["dot", "neato", "fdp", "sfdp", "twopi", "circo"]
@main.command('draw', short_help='Draw and save your graph.')
@click.option('-f', '--formats',
type=click.Choice(draw_formats),
help="Which type of format should we produce?",
show_default=True,
default='all')
@click.option('-d', '--directory',
type=click.Path(exists=True, file_okay=False),
help="""Path to a directory to write out the files.""",
show_default=True,
default=None)
@click.option('-n', '--name',
type=click.STRING,
help="""A name for your figure.""",
show_default=True,
default=None)
@click.option('-l', '--layout',
type=click.Choice(draw_layouts),
help="""Which layout program?""",
show_default=True,
default='dot')
@click.argument('definition', type=click.Path(exists=True, dir_okay=False))
@click.argument('attr_config', type=click.Path(exists=True, dir_okay=False))
@click.pass_context
def draw(ctx, formats, directory, name, layout, definition, attr_config): # noqa: D301
"""Produce your graph and save results based on your input.
\b
DEFINITION = Excel file containing the definition of your nodes and edges
ATTR_CONFIG = YAML file containing the attribute information for your
graph, node-, and edge-types
"""
log.info("Preparing your graph.")
directory = Path(directory)
definition = Path(definition)
attr_config = Path(attr_config)
if name is None:
name = 'easygv'
if formats == 'all':
formats = draw_formats[1:]
else:
formats = [formats]
graph_input = easygv.load_graph_input(path=definition)
attrs = easygv.process_attrs(attr_config)
g = easygv.build_graph(graph_input=graph_input, attrs=attrs)
gvg = gv.Source(g.string(), engine=layout)
for f in formats:
gvg.format = f
fig_path = gvg.render(directory / '{name}.gv'.format(name=name))
log.info("Created: {p}".format(p=fig_path))
# Business
if __name__ == '__main__':
main(obj=Munch())
|
rosarior/mayan | apps/sources/__init__.py | Python | gpl-3.0 | 7,263 | 0.005783 | from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from common.utils import encapsulate
from documents.models import Document
from documents.permissions import (PERMISSION_DOCUMENT_NEW_VERSION,
PERMISSION_DOCUMENT_CREATE)
from navigation.api import register_links, register_model_list_columns
from project_setup.api import register_setup
from .staging import StagingFile
from .models import (WebForm, StagingFolder, SourceTransformation,
WatchFolder)
from .widgets import staging_file_thumbnail
from .permissions import (PERMISSION_SOURCES_SETUP_VIEW,
PERMISSION_SOURCES_SETUP_EDIT, PERMISSION_SOURCES_SETUP_DELETE,
PERMISSION_SOURCES_SETUP_CREATE)
document_create_multiple = {'text': _(u'upload new documents'), 'view': 'document_create_multiple', 'famfam': 'page_add', 'permissions': [PERMISSION_DOCUMENT_CREATE], 'children_view_regex': [r'upload_interactive']}
document_create_siblings = {'text': _(u'clone metadata'), 'view': 'document_create_siblings', 'args': 'object.id', 'famfam': 'page_copy', 'permissions': [PERMISSION_DOCUMENT_CREATE]}
staging_file_preview = {'text': _(u'preview'), 'class': 'fancybox-noscaling', 'view': 'staging_file_preview', 'args': ['source.source_type', 'source.pk', 'object.id'], 'famfam': 'zoom', 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION, PERMISSION_DOCUMENT_CREATE]}
staging_file_delete = {'text': _(u'delete'), 'view': 'staging_file_delete', 'args': ['source.source_type', 'source.pk', 'object.id'], 'famfam': 'delete', 'keep_query': True, 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION, PERMISSION_DOCUMENT_CREATE]}
setup_sources = {'text': _(u'sources'), 'view': 'setup_web_form_list', 'famfam': 'application_form', 'icon': 'application_form.png', 'children_classes': [WebForm], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW], 'children_view_regex': [r'setup_web_form', r'setup_staging_folder', r'setup_source_']}
setup_web_form_list = {'text': _(u'web forms'), 'view': 'setup_web_form_list', 'famfam': 'application_form', 'icon': 'application_form.png', 'children_classes': [WebForm], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_staging_folder_list = {'text': _(u'staging folders'), 'view': 'setup_staging_folder_list', 'famfam': 'folder_camera', 'children_classes': [StagingFolder], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_watch_folder_list = {'text': _(u'watch folders'), 'view': 'setup_watch_folder_list', 'famfam': 'folder_magnify', 'children_classes': [WatchFolder], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_source_edit = {'text': _(u'edit'), 'view': 'setup_source_edit', 'args': ['source.source_type', 'source.pk'], 'famfam': 'application_form_edit', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_delete = {'text': _(u'delete'), 'view': 'setup_source_delete', 'args': ['source.source_type', 'source.pk'], 'famfam': 'application_form_delete', 'permissions': [PERMISSION_SOURCES_SETUP_DELETE]}
setup_source_create = {'text': _(u'add new source'), 'view': 'setup_source_create', 'args': 'source_type', 'famfam': 'application_form_add', 'permissions': [PERMISSION_SOURCES_SETUP_CREATE]}
setup_source_transformation_list = {'text': _(u'transformations'), 'view': 'setup_source_transformation_list', 'args': ['source.source_type', 'source.pk'], 'famfam': 'shape_move_front', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_create = {'text': _(u'add transformation'), 'view': 'setup_source_transformation_create', 'args': ['source.source_type', 'source.pk'], 'famfam': 'shape_square_add', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_edit = {'text': _(u'edit'), 'view': 'setup_source_transformation_edit', 'args': 'transformation.pk', 'famfam': 'shape_square_edit', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_delete = {'text': _(u'delete'), 'view': 'setup_source_transformation_delete', 'args': 'transformation.pk', 'famfam': 'shape_square_delete', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
source_list = {'text': _(u'Document sources'), 'view': 'setup_web_for | m_list', 'famfam': 'page_add', 'children_url_regex': [r'sources/setup'], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
upload_version = {'text': _(u'upload new version'), 'view': 'upload_version', 'args': 'object.pk', 'famfam': 'page_add', 'permissions': [PERMISSION_DOCUMENT_NEW_VERS | ION]}
register_links(StagingFile, [staging_file_delete])
register_links(SourceTransformation, [setup_source_transformation_edit, setup_source_transformation_delete])
#register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_create'], [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_create'], [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
#register_links(WebForm, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(WebForm, [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
register_links(WebForm, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_edit', 'setup_source_delete', 'setup_source_create'], [setup_sources, setup_source_create], menu_name='sidebar')
#register_links(StagingFolder, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(StagingFolder, [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
register_links(StagingFolder, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
register_links(WatchFolder, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(WatchFolder, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
# Document version
register_links(['document_version_list', 'upload_version', 'document_version_revert'], [upload_version], menu_name='sidebar')
register_links(['setup_source_transformation_create', 'setup_source_transformation_edit', 'setup_source_transformation_delete', 'setup_source_transformation_list'], [setup_source_transformation_create], menu_name='sidebar')
source_views = ['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_edit', 'setup_source_delete', 'setup_source_create', 'setup_source_transformation_list', 'setup_source_transformation_edit', 'setup_source_transformation_delete', 'setup_source_transformation_create']
register_model_list_columns(StagingFile, [
{'name':_(u'thumbnail'), 'attribute':
encapsulate(lambda x: staging_file_thumbnail(x))
},
])
register_setup(setup_sources)
register_links(['document_list_recent', 'document_list', 'document_create', 'document_create_multiple', 'upload_interactive', 'staging_file_delete'], [document_create_multiple], menu_name='secondary_menu')
register_links(Document, [document_create_siblings])
|
HEP-DL/root2hdf5 | root2hdf5/__init__.py | Python | gpl-3.0 | 117 | 0 | # -*- coding: utf-8 -*-
__author__ = """Kevin Wierman"""
__email__ = 'kevin.wierman@ | pnnl.gov'
__version__ = | '0.1.0'
|
akeym/cyder | cyder/api/v1/endpoints/dhcp/static_interface/__init__.py | Python | bsd-3-clause | 61 | 0 | f | rom cyder.api.v1.endpoints.dhcp.static_interfac | e import api
|
Fizzadar/pyinfra | pyinfra/operations/gem.py | Python | mit | 1,033 | 0 | '''
Manage Ruby gem packages. (see https://rubygems.org/ )
'''
from pyinfra.api import operation
from pyinfra.facts.gem import GemPackages
from .util.packaging import ensure_packages
@operation
def packages(packages=None, present=True, latest=False, state=None, host=None):
'''
Add/remove/update gem packages.
+ packages: list of packages to ensure
+ present: whether the packages should be installed
+ latest: whether to upgrade packages without a specified version
Versions:
Package versions can be pinned like gem: ``<pkg>:<version>``.
Example:
.. code:: python
# Note: A | ssumes that 'gem' is installed.
gem.packages(
name='Install rspec',
packages=['rspec'],
)
'''
yield ensure_packages(
host, packages, host.get_fact(GemPackages), present,
install_command='gem insta | ll',
uninstall_command='gem uninstall',
upgrade_command='gem update',
version_join=':',
latest=latest,
)
|
crisely09/horton | horton/io/molpro.py | Python | gpl-3.0 | 5,611 | 0.00303 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Molpro 2012 FCIDUMP format.
.. note ::
One- and two-electron integrals are stored in chemists' notation in an
FCIDUMP file while HORTON internally uses Physicist's notation.
'''
__all__ = ['load_fcidump', 'dump_fcidump']
def load_fcidump(filename, lf):
'''Read one- and two-electron integrals from a Molpro 2012 FCIDUMP file.
Works only for restricted wavefunctions.
Keep in mind that the FCIDUMP format changed in Molpro 2012, so files
generated with older versions are not supported.
**Arguments:**
filename
The filename of the fcidump file.
lf
A LinalgFactory instance.
**Returns**: A dictionary with keys: ``lf``, ``nelec``, ``ms2``,
``one_mo``, ``two_mo``, ``core_energy``
'''
with open(filename) as f:
# check header
line = f.next()
if not line.startswith(' &FCI NORB='):
raise IOError('Error in FCIDUMP file header')
# read info from header
words = line[5:].split(',')
header_info = {}
for word in words:
if word.count('=') == 1:
key, value = word.split('=')
header_info[key.strip()] = value.strip()
nbasis = int(header_info['NORB'])
nelec = int(header_info['NELEC'])
ms2 = int(header_info['MS2'])
if lf.default_nbasis is not None and lf.default_nbasis != nbasis:
raise TypeError('The value of lf.default_nbasis does not match NORB reported in the FCIDUMP file.')
lf.default_nbasis = nbasis
# skip rest of header
for line in f:
words = line.split()
if words[0] == "&END" or words[0] == "/END" or words[0]=="/":
break
# read the integrals
one_mo = lf.create_two_index()
two_mo = lf.create_four_index()
core_energy = 0.0
for line in f:
words = line.split()
if len(words) != 5:
raise IOError('Expecting 5 fields on each data line in FCIDUMP')
if words[3] != '0':
ii = int(words[1])-1
ij = int(words[2])-1
ik = int(words[3])-1
il = int(words[4])-1
# Uncomment the following line if you want to assert that the
# FCIDUMP file does not contain duplicate 4-index entries.
#assert two_mo.get_element(ii,ik,ij,il) == 0.0
two_mo.set_element(ii,ik,ij,il,float(words[0]))
elif words[1] != '0':
ii = int(words[1])-1
ij = int(words[2])-1
one_mo.set_element(ii,ij,float(words[0]))
else:
core_energy = float(words[0])
return {
'lf': lf,
'nelec': nelec,
'ms2': ms2,
'one_mo': one_mo,
'two_mo': two_mo,
'core_energy': core_energy,
}
def dump_fcidump(filename, data):
'''Write one- and two-electron integrals in the Molpro 2012 FCIDUMP format.
Works only for restricted wavefunctions.
Keep in mind that the FCIDUMP format changed in Molpro 2012, so files
written with this function cannot be used with older versions of Molpro
filename
The filename of the FCIDUMP file. This is usually "FCIDUMP".
data
An IOData instance. Must contain ``one_mo``, ``two_mo``.
May contain ``core_energy``, ``nelec`` and ``ms``
'''
with open(filename, 'w') as f:
one_mo = data.one_mo
two_mo = data.two_mo
nactive = one_mo.nbasis
core_energy = getattr(data, 'core_energy', 0.0)
nelec = getattr(data, 'nelec', 0)
ms2 = getattr(data, 'ms2', 0)
# Write header
print >> f, ' &FCI NORB=%i,NELEC=%i,MS2=%i,' % (nactive, nelec, ms2)
| print >> f, ' ORBSYM= '+",".join(str(1) for v in xrange(nactive))+","
print >> f, ' ISYM=1'
print >> f, ' &END'
# Write integrals and core energy
for i in xrange(nactive):
for j in xrange(i+1):
for k in xrange(nactive):
for l in xrange(k+1):
if (i*(i+1))/2+j >= (k*(k+1))/2+l:
| value = two_mo.get_element(i,k,j,l)
if value != 0.0:
print >> f, '%23.16e %4i %4i %4i %4i' % (value, i+1, j+1, k+1, l+1)
for i in xrange(nactive):
for j in xrange(i+1):
value = one_mo.get_element(i,j)
if value != 0.0:
print >> f, '%23.16e %4i %4i %4i %4i' % (value, i+1, j+1, 0, 0)
if core_energy != 0.0:
print >> f, '%23.16e %4i %4i %4i %4i' % (core_energy, 0, 0, 0, 0)
|
pinntech/flask-logex | tests/base.py | Python | mit | 1,058 | 0 | """Test Logex Initization and Error Handling"""
import subprocess
from unittest import TestCase
from samples import app, api, logex
from samples import bp_app, api_v1, api_v2, bp_logex
class BaseTestCase(TestCase):
DEBUG = True
__blueprints__ = False
@classmethod
def setUpClass(cls):
cls.app = app
cls.api = api
cls.logex = logex
if cls.__blueprints__:
cl | s.app = bp_app
cls.api = [api_v1, api_v2]
cls.logex = bp_logex
# App test client, config, and context
cls.log_name = cls.app.name + ".log"
cls.app.config['DEBUG'] | = cls.DEBUG
cls.ac = cls.app.app_context()
cls.test_client = cls.app.test_client()
cls.test_client.testing = True
cls.ctx = cls.app.test_request_context()
cls.ctx.push()
@classmethod
def tearDownClass(cls):
subprocess.call(['rm', '-rf', 'logs'])
def setUp(self):
with self.ac:
self.logs = self.logex.logs
def tearDown(self):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.