repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rochapps/django-google-oauth
|
google_oauth/models.py
|
Python
|
bsd-3-clause
| 545
| 0
|
from django.contrib.auth.models import User
from django.db import models
from oauth2client.django_orm import Flo
|
wField
from oauth2client.django_orm import CredentialsField
class Flow(models.Model):
"""
class to save flow objects in a multitreaded environment
"""
id = models.ForeignKey(User, primary_key=True)
flow = FlowField()
class Credentials(models.Model):
"""
saves user oauth credentials for later use
"""
id = models.ForeignKey(User, primary_key=True)
|
credential = CredentialsField()
|
dobladov/NickCage-TelegramBot
|
main.py
|
Python
|
mit
| 1,853
| 0.001079
|
#!/usr/bin/env python
# encoding: utf-8
import logging
import telegram
import random
import json
from giphypop import translate
with open('quotes.json') as data_file:
quotes = json.load(data_file)
quotes = quotes["quotes"]
def main():
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
bot = telegram.Bot(token='YOUR BOT AUTHORIZATION TOKEN')
try:
LAST_UPDATE_ID = bot.getUpdates()[-1].update_id
except IndexError:
LAST_UPDATE_ID = None
while True:
for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=5):
text = update.message.text
chat_id = update.message.chat.id
update_id = update.update_id
if '/start' in text:
custom_keyboard = [["/quote", "/gif"]]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard, resize_keyboard=True)
bot.sendMessage(chat_id=chat_id,
text="Chose.",
reply_markup=reply_markup)
LAST_UPDATE_ID = update_id + 1
elif '/quote' in text:
answer = quote()
bot.sendMessage(chat_id=chat_id,
text=answer)
LAST_UPDATE_ID = update_id + 1
elif '/gif' in text:
bot.sen
|
dMessage(chat_id=chat_id,
text="Nick is searching for an awesome gif.")
img = translate('nicolas cage')
bot.sendDocument(chat_id=chat_id,
document=img.fixed_height.url)
print "Enviar Gif " + img.fixed_height.url
LAST_UPDATE_ID = update_id + 1
def quote():
re
|
turn random.choice(quotes)
if __name__ == '__main__':
main()
|
skoslowski/gnuradio
|
gnuradio-runtime/examples/volk_benchmark/volk_plot.py
|
Python
|
gpl-3.0
| 6,303
| 0.003332
|
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import sys, math
import argparse
from volk_test_funcs import (create_connection, list_tables, get_results,
helper, timeit, format_results)
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplot
|
lib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', ty
|
pe=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = list(range(len(name_reg)))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / (M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
|
prats226/python-amazon-product-api-0.2.8
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 5,765
| 0.004163
|
from ConfigParser import SafeConfigParser
import os.path
import pytest
import re
import textwrap
from amazonproduct import utils
def pytest_addoption(parser):
group = parser.getgroup('amazonproduct',
'custom options for testing python-amazon-product-api')
group._addoption('--locale', action='append', dest='locales',
metavar='LOCALE', help='Locale to use (e.g. "de" or "us"). This option '
'can be used more than once. Note that tests with specific locales '
'defined which do not match the ones specified by this option will '
'NOT be run.')
group._addoption('--api-version', action='append', dest='versions',
metavar='VERSION', help='API version to use (e.g. "2010-09-01"). This '
'option can be used more than once. Note that tests with specific '
'versions defined which do not match the ones specified by this '
'option will NOT be run.')
group._addoption('--refetch', action='store', type='choice', dest='fetch',
metavar='method', choices=['no', 'missing', 'outdated', 'all'],
default='no', help='Fetch responses from live server and overwrite '
'previously cached XML file: one of no (default)|missing|outdated|'
'all.')
group._addoption('--processor', action='append', dest='processors',
metavar='PROCESSOR', choices=['objectify', 'etree', 'elementtree', 'minidom'],
help='Result processor to use: one of objectify|etree|minidom.')
def pytest_funcarg__server(request):
"""
Is the same as funcarg `httpserver` from plugin pytest-localserver with the
difference that it has a module-wide scope.
"""
def setup():
try:
localserver = request.config.pluginmanager.getplugin('localserver')
except KeyError:
raise pytest.skip('This test needs plugin pytest-localserver!')
server = localserver.http.Server()
server.start()
return server
de
|
f teardown(server):
server.stop()
return request.cached_setup(setup, teardown, 'module')
class DummyConfig (object):
"""
Dummy config to which to which you can add config files which in turn
|
will
be created on the file system as temporary files.
"""
_file_counter = 0
def __init__(self, tmpdir):
self.tmpdir = tmpdir
self.files = []
def add_file(self, content, path):
"""
Writes one temporary file.
"""
if not path:
path = 'config-%i' % self._file_counter
self._file_counter += 1
p = self.tmpdir.ensure(os.path.expanduser(path))
p.write(textwrap.dedent(content))
self.files += [p.strpath]
_REG = re.compile(r'^#\s*file:\s+(.+?)\n', re.DOTALL | re.MULTILINE)
def load_from_string(self, content):
"""
Creates config files from string which is split up into file blocks and
written to temporary files.
"""
last = 0 # end of the last matching '# file: XXX'
path = None # path of the last matching '# file: XXX'
for m in self._REG.finditer(content):
if path is not None:
self.add_file(content[last:m.start()], path)
path = m.group(1)
last = m.end()
if path is not None:
self.add_file(content[last:], path)
else:
raise ValueError('Where are the file paths?')
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def __repr__(self):
return '<DummyConfig %s files=%r>' % (hex(id(self)), self.files)
def pytest_funcarg__configfiles(request):
"""
Returns a dummy config to which you can add config files which in turn will
be created on the file system as temporary files. You can use the following
methods:
To add a single config file use ``configfiles.add_file(content, path)``. If
you omit the ``path``, some arbitrary file name is used. ::
configfiles.add_file('''
[Credentials]
access_key = ABCDEFGH12345
secret_key = abcdegf43
locale = de''', path='/etc/amazon-product-api.cfg')
In order to add multiple config files at once, you can use the following
method::
configfiles.load_from_string('''
# file: /etc/boto.cfg
[Credentials]
aws_access_key_id = Hhdjksaiunkljfl
aws_secret_access_key = difioemLjdks02
# file: /home/user/.amazon-product-api
[Credentials]
locale = de
''')
"""
tmpdir = request.getfuncargvalue('tmpdir')
monkeypatch = request.getfuncargvalue('monkeypatch')
def prepend_tmppath(dir, files):
return [tmpdir.join(os.path.expanduser(fn)).strpath for fn in files]
monkeypatch.setattr(utils, 'CONFIG_FILES',
prepend_tmppath(tmpdir, utils.CONFIG_FILES))
cfg = DummyConfig(tmpdir)
return cfg
|
SukkoPera/Arduino-Sensoria
|
python/server3.py
|
Python
|
gpl-3.0
| 1,544
| 0.036269
|
#!/usr/bin/env python
import server
import time
from Sensoria.stereotypes.TimeControlData import TimeControlData
from Sensoria.stereotypes.InstantMessageData import InstantMessageData
class TemperatureSensor (server.TemperatureSensor):
def __init__ (self):
super (TemperatureSensor, self).__init__ ("HD", "Heater Temperature")
class HeaterController (server.ControlledRelayActuator):
def __init__ (self):
super (HeaterController, self).__init__ ("HC", "Heater Controller")
#TL1:10 TL2:18 TL3:21
class HeaterTimer (server.TimedActuator):
def __init__ (self):
super (HeaterTimer, self).__init__ ("HT", "Heater Timer")
initData = TimeControlData ()
|
initData.unmarshal ("PMO:000000001000000003222110 PTU:000000001000000003222110 PWE:000000001000000003222110 PTH:000000001000000003222110 PFR:000000001000000003222111 PSA:00
|
0000000322222222222211 PSU:000000000322222222222210")
ok, msg = self.write (initData)
print msg
assert ok
class HeaterSettings (server.ValueSetActuator):
def __init__ (self):
super (HeaterSettings, self).__init__ ("HS", "Heater Settings")
self.levels = [10, 18, 21]
@property
def values (self):
return self.levels
@values.setter
def values (self, v):
self.levels = v[0:3]
hd = TemperatureSensor ()
hc = HeaterController ()
ht = HeaterTimer ()
hs = HeaterSettings ()
listener = server.CommandListener ("HeatingSystem")
listener.register_sensor (hd)
listener.register_sensor (hc)
listener.register_sensor (ht)
listener.register_sensor (hs)
listener.start ()
while True:
time.sleep (1)
|
openelections/openelections-core
|
openelex/us/md/datasource.py
|
Python
|
mit
| 12,263
| 0.001957
|
"""
Standardize names of data files on Maryland State Board of Elections.
File-name convention on MD site (2004-2012):
general election
precinct: countyname_by_precinct_year_general.csv
state leg. district: state_leg_districts_year_general.csv
county: countyname_party_year_general.csv
primary election
precinct: countyname_by_Precinct_party_year_Primary.csv
state leg. district: state_leg_districts_party_year_primary.csv
county: countyname_party_year_primary.csv
Exceptions: 2000 + 2002
To run mappings from a shell:
openelex datasource.mappings -s md
"""
import re
from openelex.base.datasource import BaseDatasource
class Datasource(BaseDatasource):
base_url = "http://www.elections.state.md.us/elections/%(year)s/election_data/"
# PUBLIC INTERFACE
def mappings(self, year=None):
"""Return array of dicts containing source url and
standardized filename for raw results file, along
with other pieces of metadata
"""
mappings = []
for yr, elecs in list(self.elections(year).items()):
mappings.extend(self._build_metadata(yr, elecs))
return mappings
def target_urls(self, year=None):
"Get list of source data urls, optionally filtered by year"
return [item['raw_url'] for item in self.mappings(year)]
def filename_url_pairs(self, year=None):
return [(item['generated_filename'], item['raw_url'])
for item in self.mappings(year)]
# PRIVATE METHODS
def _races_by_type(self, elections):
"Filter races by type and add election slug"
races = {
'special': None,
'general': None,
}
for elec in elections:
rtype = self._race_type(elec)
elec['slug'] = self._election_slug(elec)
races[rtype] = elec
return races['general'], races['primary'], races['special']
def _race_type(self, elec
|
tion):
if election['special']:
return 'special'
return election['race_type'].lower()
def _build_metadata(self, year,
|
elections):
year_int = int(year)
if year_int == 2002:
general, primary, special = self._races_by_type(elections)
meta = [
{
"generated_filename": "__".join((general['start_date'].replace('-',''), self.state, "general.txt")),
"raw_url": self._get_2002_source_urls('general'),
"ocd_id": 'ocd-division/country:us/state:md',
"name": 'Maryland',
"election": general['slug']
},
{
"generated_filename": "__".join((primary['start_date'].replace('-',''), self.state, "primary.txt")),
"raw_url": self._get_2002_source_urls('primary'),
"ocd_id": 'ocd-division/country:us/state:md',
"name": 'Maryland',
"election": primary['slug']
}
]
else:
meta = self._state_leg_meta(year, elections) + self._county_meta(year, elections)
if year_int == 2000:
general, primary, special = self._races_by_type(elections)
meta.append({
"generated_filename": "__".join((primary['start_date'].replace('-',''), self.state, "primary.csv")),
"raw_url": 'http://www.elections.state.md.us/elections/2000/results/prepaa.csv',
"ocd_id": 'ocd-division/country:us/state:md',
"name": 'Maryland',
"election": primary['slug']
})
elif year_int == 2008:
meta.append(self._special_meta_2008(elections))
return meta
def _state_leg_meta(self, year, elections):
payload = []
meta = {
'ocd_id': 'ocd-division/country:us/state:md/sldl:all',
'name': 'State Legislative Districts',
}
general, primary, special = self._races_by_type(elections)
if general is not None:
# Add General meta to payload
general_url = self._build_state_leg_url(year)
general_filename = self._generate_state_leg_filename(general_url, general['start_date'])
gen_meta = meta.copy()
gen_meta.update({
'raw_url': general_url,
'generated_filename': general_filename,
'election': general['slug']
})
payload.append(gen_meta)
# Add Primary meta to payload
if primary and int(year) > 2000:
for party in ['Democratic', 'Republican']:
pri_meta = meta.copy()
primary_url = self._build_state_leg_url(year, party)
primary_filename = self._generate_state_leg_filename(primary_url, primary['start_date'])
pri_meta.update({
'raw_url': primary_url,
'generated_filename': primary_filename,
'election': primary['slug']
})
payload.append(pri_meta)
return payload
def _build_state_leg_url(self, year, party=""):
tmplt = self.base_url + "State_Legislative_Districts"
kwargs = {'year': year}
year_int = int(year)
# PRIMARY
# Assume it's a primary if party is present
if party and year_int > 2000:
kwargs['party'] = party
if year_int == 2004:
tmplt += "_%(party)s_Primary_%(year)s"
else:
tmplt += "_%(party)s_%(year)s_Primary"
# GENERAL
else:
# 2000 and 2004 urls end in the 4-digit year
if year_int in (2000, 2004):
tmplt += "_General_%(year)s"
# All others have the year preceding the race type (General/Primary)
else:
tmplt += "_%(year)s_General"
tmplt += ".csv"
return tmplt % kwargs
def _generate_state_leg_filename(self, url, start_date):
bits = [
start_date.replace('-',''),
self.state.lower(),
]
matches = self._apply_party_racetype_regex(url)
if matches['party']:
bits.append(matches['party'].lower())
bits.extend([
matches['race_type'].lower(),
'state_legislative.csv',
])
name = "__".join(bits)
return name
def _county_meta(self, year, elections):
payload = []
general, primary, special = self._races_by_type(elections)
for jurisdiction in self._jurisdictions():
meta = {
'ocd_id': jurisdiction['ocd_id'],
'name': jurisdiction['name'],
}
county = jurisdiction['url_name']
# GENERALS
# Create countywide and precinct-level metadata for general
for precinct_val in (True, False):
if general is not None:
general_url = self._build_county_url(year, county, precinct=precinct_val)
general_filename = self._generate_county_filename(general_url, general['start_date'], jurisdiction)
gen_meta = meta.copy()
gen_meta.update({
'raw_url': general_url,
'generated_filename': general_filename,
'election': general['slug']
})
payload.append(gen_meta)
# PRIMARIES
# For each primary and party and party combo, generate countywide and precinct metadata
# Primary results not available in 2000
if primary and int(year) > 2000:
for party in ['Democratic', 'Republican']:
for precinct_val in (True, False):
pri_meta = meta.copy()
primary_url = self._build_county_url(year, county, party, precinct_val)
primary
|
littleghosty/forum
|
mysite/usercenter/views.py
|
Python
|
gpl-3.0
| 3,333
| 0.000627
|
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .models import ActivateCode
import uuid
import os
from django.http import HttpResponse
import datetime
def register(request):
error = ""
if request.method == "GET":
return render(request, "user_register.html")
else:
username = request.POST['username'].strip()
email = request.POST['email'].strip()
password = request.POST['password'].strip()
re_password = request.POST['re_password'].strip()
if not username or not password or not email:
error = "任何字段都不能为空"
if password != re_password:
error = "两次密码不一致"
if User.objects.filter(username=username).count() > 0:
error = "用户已存在"
if User.objects.filter(email=email).count() > 0:
error = "该邮箱已注册"
if not error:
user = Us
|
er.objects.create_user(username=username,
email=email, password=password)
user.is_active = False
user.save()
new_code = str(uuid.uuid4()).replace("-", "")
expire_time = datetime.datetime.now() + datetime.timedelta(days=2)
|
code_record = ActivateCode(owner=user, code=new_code,
expire_timestamp=expire_time)
code_record.save()
activate_link = "http://%s%s" % (request.get_host(), reverse(
"user_activate", args=[new_code]))
send_mail('[python论坛]激活邮件',
'您的激活链接为: %s' % activate_link,
'huyuanxuan@163.com',
[email],
fail_silently=False)
else:
return render(request, "user_register.html", {"error": error})
return HttpResponse("请查收邮件激活帐户!")
def activate(request, code):
query = ActivateCode.objects.filter(code=code,
expire_timestamp__gte=datetime.datetime.now())
if query.count() > 0:
code_record = query[0]
code_record.owner.is_active = True
code_record.owner.save()
return HttpResponse("激活成功")
else:
return HttpResponse("激活失败")
@login_required
def upload_avatar(request):
if request.method == "GET":
return render(request, "upload_avatar.html")
else:
profile = request.user.userprofile
avatar_file = request.FILES.get("avatar", None)
if not avatar_file:
return HttpResponse("未选择文件")
file_name = request.user.username + avatar_file.name
if avatar_file.size > 50000:
return HttpResponse("图片大小不能超过500KB")
file_path = os.path.join("/usr/share/userres/avatar/", file_name)
with open(file_path, 'wb+') as destination:
for chunk in avatar_file.chunks():
destination.write(chunk)
url = "http://res.myforum.com/avatar/%s" % file_name
profile.avatar = url
profile.save()
return redirect("/")
|
mozman/ezdxf
|
src/ezdxf/proxygraphic.py
|
Python
|
mit
| 30,574
| 0.000229
|
# Copyright (c) 2020-2021, Manfred Moitzi
# License: MIT License
from typing import (
TYPE_CHECKING,
Optional,
Iterable,
Tuple,
List,
Set,
Dict,
cast,
Sequence,
Any,
)
import sys
import struct
import math
from enum import IntEnum
from itertools import repeat
from ezdxf.lldxf import const
from ezdxf.tools.binarydata import bytes_to_hexstr, ByteStream, BitStream
from ezdxf import colors
from ezdxf.math import (
Vec3,
Matrix44,
Z_AXIS,
ConstructionCircle,
ConstructionArc,
OCS,
UCS,
X_AXIS,
)
from ezdxf.entities import factory
import logging
if TYPE_CHECKING:
from ezdxf.eztypes import (
Tags,
TagWriter,
Drawing,
DXFGraphic,
Polymesh,
Polyface,
Polyline,
Hatch,
LWPolyline,
)
logger = logging.getLogger("ezdxf")
CHUNK_SIZE = 127
class ProxyGraphicError(Exception):
pass
def load_proxy_graphic(
tags: "Tags", length_code: int = 160, data_code: int = 310
) -> Optional[bytes]:
binary_data = [
tag.value
for tag in tags.pop_tags(codes=(length_code, data_code))
if tag.code == data_code
]
return b"".join(binary_data) if len(binary_data) else None # type: ignore
def export_proxy_graphic(
data: bytes,
tagwriter: "TagWriter",
length_code: int = 160,
data_code: int = 310,
) -> None:
# Do not export proxy graphic for DXF R12 files
assert tagwriter.dxfversion > const.DXF12
length = len(data)
if length == 0:
return
tagwriter.write_tag2(length_code, length)
index = 0
while index < length:
hex_str = bytes_to_hexstr(data[index : index + CHUNK_SIZE])
tagwriter.write_tag2(data_code, hex_str)
index += CHUNK_SIZE
def has_prim_traits(flags: int) -> bool:
return bool(flags & 0xFFFF)
def prims_have_colors(flags: int) -> bool:
return bool(flags & 0x0001)
def prims_have_layers(flags: int) -> bool:
return bool(flags & 0x0002)
def prims_have_linetypes(flags: int) -> bool:
return bool(flags & 0x0004)
def prims_have_markers(flags: int) -> bool:
return bool(flags & 0x0020)
def prims_have_visibilities(flags: int) -> bool:
return bool(flags & 0x0040)
def prims_have_normals(flags: int) -> bool:
return bool(flags & 0x0080)
def prims_have_orientation(flags: int) -> bool:
return bool(flags & 0x0400)
TRAIT_TESTER = {
"colors": (prims_have_colors, "RL"),
"layers": (prims_have_layers, "RL"),
"linetypes": (prims_have_linetypes, "RL"),
"markers": (prims_have_markers, "RL"),
"visibilities": (prims_have_visibilities, "RL"),
"normals": (prims_have_normals, "3RD"),
}
def read_prim_traits(
bs: ByteStream, types: Sequence[str], prim_flags: int, count: int
) -> Dict:
def read_float_list():
return [bs.read_long() for _ in range(count)]
def read_vertices():
return [Vec3(bs.read_vertex()) for _ in range(count)]
data = dict()
for t in types:
test_trait, data_type = TRAIT_TESTER[t]
if test_trait(prim_flags):
if data_type == "3RD":
data[t] = read_vertices()
elif data_type == "RL":
data[t] = read_float_list()
else:
raise TypeError(data_type)
return data
def read_mesh_traits(
bs: ByteStream, edge_count: int, face_count: int, vertex_count: int
):
# Traits data format:
# all entries are optional
# traits: Dict[str, Dict]
# "edges": Dict[str, List]
# "colors": List[int]
# "layers": List[int] as layer ids
# "linetypes": List[int] as linetype ids
# "markers": List[int]
# "visibilities": List[int]
# "faces": Dict[str, List]
# "colors": List[int]
# "layers": List[int] as layer ids
# "markers": List[int]
# "normals": List[Vec3]
# "visibilities": List[int]
# "vertices": Dict
# "normals": List[Vec3]
# "orientation": bool
traits = dict()
edge_flags = bs.read_long()
if has_prim_traits(edge_flags):
traits["edges"] = read_prim_traits(
bs,
["colors", "layers", "linetypes", "markers", "visibilities"],
edge_flags,
edge_count,
)
face_flags = bs.read_long()
if has_prim_traits(face_flags):
traits["faces"] = read_prim_traits(
bs,
["colors", "layers", "markers", "normals", "visibilities"],
face_flags,
face_count,
)
# Note: DXF entities PolyFaceMesh and Mesh do not support vertex normals!
# disable useless reading process by vertex_count = 0
if vertex_count > 0:
vertex_flags = bs.read_long()
if has_prim_traits(vertex_flags):
vertices = dict()
if prims_have_normals(vertex_flags):
vertices["normals"] = [
Vec3(bs.read_vertex()) for _ in range(vertex_count)
]
if prims_have_orientation(vertex_flags):
vertices["orientation"] = bool(bs.read_long()) # type: ignore
traits["vertices"] = vertices
return traits
class ProxyGraphicTypes(IntEnum):
EXTENTS = 1
CIRCLE = 2
CIRCLE_3P = 3
CIRCULAR_ARC = 4
CIRCULAR_ARC_3P = 5
POLYLINE = 6
POLYGON = 7
MESH = 8
SHELL = 9
TEXT = 10
TEXT2 = 11
XLINE = 12
RAY = 13
ATTRIBUTE_COLOR = 14
UNUSED_15 = 15
ATTRIBUTE_LAYER = 16
UNUSED_17 = 17
ATTRIBUTE_LINETYPE = 18
ATTRIBUTE_MARKER = 19
ATTRIBUTE_FILL = 20
UNUSED_21 = 21
ATTRIBUTE_TRUE_COLOR = 22
ATTRIBUTE_LINEWEIGHT = 23
ATTRIBUTE_LTSCALE = 24
ATTRIBUTE_THICKNESS = 25
ATTRIBUTE_PLOT_STYLE_NAME = 26
PUSH_CLIP = 27
POP_CLIP = 28
PUSH_MATRIX = 29
PUSH_MATRIX2 = 30
POP_MATRIX = 31
POLYLINE_WITH_NORMALS = 32
LWPOLYLINE = 33
ATTRIBUTE_MATERIAL = 34
ATTRIBUTE_MAPPER = 35
UNICODE_TEXT = 36
UNKNOWN_37 = 37
UNICODE_TEXT2 = 38
class ProxyGraphic:
def __init__(self, data: bytes, doc: "Drawing" = None):
self._doc = doc
self._factory = factory.new
self._buffer: bytes = data
self._index: int = 8
self.dxfversion = doc.dxfversion if doc else "AC1015"
self.color: int = const.BYLAYER
self.layer: str = "0"
self.linetype: str = "BYLAYER"
self.marker_index: int = 0
self.fill: bool = False
self.true_color: Optional[int] = None
self.lineweight: int = const.LINEWEIGHT_DEFAULT
self.ltscale: float = 1.0
self.thickness: float = 0.0
# Layer list in storage order
self.layers: List[str] = []
# Linetypes list in storage order
self.linetypes: List[str] = []
# List of text styles, with font name as key
self.textstyles: Dict[str, str] = dict()
self.required_fonts: Set[str] = set()
self.matrices: List[Matrix44] = []
if self._doc:
self.layers = list(layer.dxf.name for layer in self._doc.layers)
self.linetypes = list(
linetype.dxf.name for linetype in self._doc.linetypes
)
self.textstyles = {
style.dxf.font: style.dxf.name for style in self._doc.styles
}
def info(self) -> Iterable[Tuple[int, int, str]]:
index = self._index
buffer = self._buffer
while index < len(buffer):
size, type_ = struct.
|
unpack_from("<2L", self._buffer, offset=index)
try:
|
name = ProxyGraphicTypes(type_).name
except ValueError:
name = f"UNKNOWN_TYPE_{type_}"
yield index, size, name
index += size
def virtual_entities(self) -> Iterable["DXFGraphic"]:
return self.__virtual_entities__()
def __virtual_entities__(self) -> Iterable["DXFGraphic"]:
"""Implements the SupportsVirtualEntities protocol."""
try:
yield from self.unsafe_virtual_entities()
exce
|
pmclanahan/django-mozilla-product-details
|
product_details/settings_defaults.py
|
Python
|
bsd-3-clause
| 335
| 0
|
import logging
im
|
port os
# URL to clone product_details JSON files from.
# Include trailing slash.
PROD_DETAILS_URL = 'http://svn.mozilla.org/libs/product-details/json/'
# Target dir to drop JSON files into (must be writable)
PROD_DETAILS_DIR = os.path.join(os.path.dir
|
name(__file__), 'json')
# log level.
LOG_LEVEL = logging.INFO
|
Orange9000/Codewars
|
Solutions/beta/beta_answer_the_students_questions.py
|
Python
|
mit
| 293
| 0.040956
|
from collect
|
ions import Counter
def answer(q,inf):
s = Counter(q.split(' ')); r = [-1,-1]
for i,j in enumerate(inf):
check = sum(s.get(w,0) for w in j.split(' '))
if check != 0 and check > r[
|
1]: r = [i,check]
return None if r == [-1,-1] else inf[r[0]]
|
kvangent/PokeAlarm
|
PokeAlarm/Events/QuestEvent.py
|
Python
|
agpl-3.0
| 6,377
| 0
|
# Standard Library Imports
from datetime import datetime
# 3rd Party Imports
# Local Imports
from PokeAlarm import Unknown
from . import BaseEvent
from PokeAlarm.Utils import get_gmaps_link, get_applemaps_link, \
get_waze_link, get_dist_as_str, get_base_types, get_type_emoji
from PokeAlarm.Utilities.QuestUtils import reward_string, get_item_id, \
get_quest_image
class QuestEvent(BaseEvent):
""" Event representing the discovery of a Quest. """
def __init__(self, data):
""" Creates a new Quest Event based on the given dict. """
super(QuestEvent, self).__init__('quests')
check_for_none = BaseEvent.check_for_none
# Identification
self.stop_id = data['pokestop_id']
self.stop_name = check_for_none(
str, data.get('pokestop_name', data.get('name')), Unknown.REGULAR)
self.stop_image = check_for_none(
str, data.get('pokestop_url', data.get('url')), Unknown.REGULAR)
# Location
self.lat = float(data['latitude'])
self.lng = float(data['longitude'])
# Completed by Manager
self.distance = Unknown.SMALL
self.direction = Unknown.TINY
# Used to reject
self.name = self.stop_id
self.geofence = Unknown.REGULAR
self.custom_dts = {}
# Quest Details
self.quest_type_raw = data['quest_type']
self.quest_type_id = data.get('quest_type_raw')
self.quest_target = data.get('quest_target')
self.quest_task_raw = data.get('quest_task')
self.quest_condition_raw = data.get('quest_condition')
self.quest_template = data.get('quest_template')
self.last_modified = datetime.utcfromtimestamp(data['timestamp'])
# Reward Details
self.reward_type_id = data['quest_reward_type_raw']
self.reward_type_raw = data.get('quest_reward_type')
self.reward_amount = data.get('item_amount', 1)
# Monster Reward Details
self.monster_id = data.get('pokemon_id', 0)
self.monster_form_id = data.get('pokemon_form', 0)
self.monster_costume_id = data.get('pokemon_costume', 0)
self.monster_types = get_base_types(self.monster_id) \
if self.monster_id != 0 else [0, 0]
# Item Reward Details
self.item_amount = self.reward_amount
self.item_type = data.get('item_type')
self.item_id = data.get('item_id', 0)
def generate_dts(self, locale, timezone, units):
""" Return a dict with all the DTS for this event. """
form_name = locale.get_form_name(self.monster_id, self.monster_form_id)
costume_name = locale.get_costume_name(
self.monster_id, self.monster_costume_id)
type1 = locale.get_type_name(self.monster_types[0])
type2 = locale.get_type_name(self.monster_types[1])
dts = self.custom_dts.copy()
dts.update({
# Identification
'stop_id': self.stop_id,
'stop_name': self.stop_name,
'stop_image': self.stop_image,
# Location
'lat': self.lat,
'lng': self.lng,
'lat_5': "{:.5f}".format(self.lat),
'lng_5': "{:.5f}".format(self.lng),
'distance': (
get_dist_as_str(self.distance, units)
if Unknown.is_not(self.distance) else Unknown.SMALL),
'direction': self.direction,
'gmaps': get_gmaps_link(self.lat, self.lng),
'applemaps': get_applemaps_link(self.lat, self.lng),
'waze': get_waze_link(self.lat, self.lng),
'geofence': self.geofence,
# Quest Details
# ToDo: Interpret the `quest_condition` field and use that instead
# of `quest_type`
# Will be able to better serve manager specific locales
# also do this for `quest_task`
'quest_type': self.quest_type_raw,
'quest_type_id': self.quest_type_id,
'quest_target': self.quest_target,
'quest_task': self.quest_task_raw,
'quest_template': self.quest_template,
'last_modified': self.last_modified,
'quest_condition': self.quest_condition_raw,
'quest_image': get_quest_image(self),
# Reward Details
'reward_type_id': self.reward_type_id,
'reward_type': locale.get_quest_type_name(self.reward_type_id),
'reward_type_raw': self.reward_type_raw,
'reward_amount': self.item_amount,
'reward': reward_string(self, locale),
# Monster Reward Details
'mon_name': locale.get_pokemon_name(self.monster_id),
'mon_id': self.monster_id,
'mon_id_3': "{:03}".format(self.monster_id),
'form': form_name,
'form_or_empty': Unknown.or_empty(form_name),
'form_id': self.monster_form_id,
'form_id_2': "{:02d}".format(self.monster_form_id),
'form_id_3': "{:03d}".format(self.monster_form_id),
'costume': costume_name,
'costume_or_empty': Unknown.or_empty(costume_name),
'costume_id': self.monster_costume_id,
'costume_id_2': "{:02d}".format(self.monster_costume_id),
'costume_id_3': "{:03d}".format(self.monster_costume_id),
'type1': type1,
'type1_or_empty': Unknown.or_empty(type1),
'type1_emoji': Unknown.or_empty(get_type_emoji(
self.monster_types[0])),
'type2': type2
|
,
'type2_or_empty': Unknown.or_empty(type2),
'type2_emoji'
|
: Unknown.or_empty(get_type_emoji(
self.monster_types[1])),
'types': (
"{}/{}".format(type1, type2)
if Unknown.is_not(type2) else type1),
'types_emoji': (
"{}{}".format(
get_type_emoji(self.monster_types[0]),
get_type_emoji(self.monster_types[1]))
if Unknown.is_not(type2)
else get_type_emoji(self.monster_types[0])),
# Item Reward Details
'raw_item_type': self.item_type,
'item': get_item_id(self.item_id),
'item_id': self.item_id,
'item_id_4': "{:04d}".format(self.item_id)
})
return dts
|
cpcloud/numba
|
numba/cuda/codegen.py
|
Python
|
bsd-2-clause
| 12,947
| 0
|
from llvmlite import binding as ll
from llvmlite import ir
from warnings import warn
from numba.core import config, serialize
from numba.core.codegen import Codegen, CodeLibrary
from numba.core.errors import NumbaInvalidConfigWarning
from .cudadrv import devices, driver, nvvm
import ctypes
import numpy as np
import os
import subprocess
import tempfile
CUDA_TRIPLE = 'nvptx64-nvidia-cuda'
def disassemble_cubin(cubin):
# nvdisasm only accepts input from a file, so we need to write out to a
# temp file and clean up afterwards.
fd = None
fname = None
try:
fd, fname = tempfile.mkstemp()
with open(fname, 'wb') as f:
f.write(cubin)
try:
cp = subprocess.run(['nvdisasm', fname], check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except FileNotFoundError as e:
if e.filename == 'nvdisasm':
msg = ("nvdisasm is required for SASS inspection, and has not "
"been found.\n\nYou may need to install the CUDA "
"toolkit and ensure that it is available on your "
"PATH.\n")
raise RuntimeError(msg)
return cp.stdout.decode('utf-8')
finally:
if fd is not None:
os.close(fd)
if fname is not None:
os.unlink(fname)
class CUDACodeLibrary(seri
|
alize.ReduceMixin, CodeLibrary):
"""
The CUDACodeLibrary generates PTX, SASS, cubins for multiple different
compute capabilities. It also loads cubins to multiple
|
devices (via
get_cufunc), which may be of different compute capabilities.
"""
def __init__(self, codegen, name, entry_name=None, max_registers=None,
nvvm_options=None):
"""
codegen:
Codegen object.
name:
Name of the function in the source.
entry_name:
Name of the kernel function in the binary, if this is a global
kernel and not a device function.
max_registers:
The maximum register usage to aim for when linking.
nvvm_options:
Dict of options to pass to NVVM.
"""
super().__init__(codegen, name)
# The llvmlite module for this library.
self._module = None
# CodeLibrary objects that will be "linked" into this library. The
# modules within them are compiled from NVVM IR to PTX along with the
# IR from this module - in that sense they are "linked" by NVVM at PTX
# generation time, rather than at link time.
self._linking_libraries = set()
# Files to link with the generated PTX. These are linked using the
# Driver API at link time.
self._linking_files = set()
# Maps CC -> PTX string
self._ptx_cache = {}
# Maps CC -> cubin
self._cubin_cache = {}
# Maps CC -> linker info output for cubin
self._linkerinfo_cache = {}
# Maps Device numeric ID -> cufunc
self._cufunc_cache = {}
self._max_registers = max_registers
if nvvm_options is None:
nvvm_options = {}
self._nvvm_options = nvvm_options
self._entry_name = entry_name
def get_llvm_str(self):
return str(self._module)
def get_asm_str(self, cc=None):
return self._join_ptxes(self._get_ptxes(cc=cc))
def _get_ptxes(self, cc=None):
if not cc:
ctx = devices.get_context()
device = ctx.device
cc = device.compute_capability
ptxes = self._ptx_cache.get(cc, None)
if ptxes:
return ptxes
arch = nvvm.get_arch_option(*cc)
options = self._nvvm_options.copy()
options['arch'] = arch
if not nvvm.NVVM().is_nvvm70:
# Avoid enabling debug for NVVM 3.4 as it has various issues. We
# need to warn the user that we're doing this if any of the
# functions that they're compiling have `debug=True` set, which we
# can determine by checking the NVVM options.
for lib in self.linking_libraries:
if lib._nvvm_options.get('debug'):
msg = ("debuginfo is not generated for CUDA versions "
f"< 11.2 (debug=True on function: {lib.name})")
warn(NumbaInvalidConfigWarning(msg))
options['debug'] = False
irs = [str(mod) for mod in self.modules]
if options.get('debug', False):
# If we're compiling with debug, we need to compile modules with
# NVVM one at a time, because it does not support multiple modules
# with debug enabled:
# https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#source-level-debugging-support
ptxes = [nvvm.llvm_to_ptx(ir, **options) for ir in irs]
else:
# Otherwise, we compile all modules with NVVM at once because this
# results in better optimization than separate compilation.
ptxes = [nvvm.llvm_to_ptx(irs, **options)]
# Sometimes the result from NVVM contains trailing whitespace and
# nulls, which we strip so that the assembly dump looks a little
# tidier.
ptxes = [x.decode().strip('\x00').strip() for x in ptxes]
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" % self._name).center(80, '-'))
print(self._join_ptxes(ptxes))
print('=' * 80)
self._ptx_cache[cc] = ptxes
return ptxes
def _join_ptxes(self, ptxes):
return "\n\n".join(ptxes)
def get_cubin(self, cc=None):
if cc is None:
ctx = devices.get_context()
device = ctx.device
cc = device.compute_capability
cubin = self._cubin_cache.get(cc, None)
if cubin:
return cubin
linker = driver.Linker(max_registers=self._max_registers, cc=cc)
ptxes = self._get_ptxes(cc=cc)
for ptx in ptxes:
linker.add_ptx(ptx.encode())
for path in self._linking_files:
linker.add_file_guess_ext(path)
cubin_buf, size = linker.complete()
# We take a copy of the cubin because it's owned by the linker
cubin_ptr = ctypes.cast(cubin_buf, ctypes.POINTER(ctypes.c_char))
cubin = bytes(np.ctypeslib.as_array(cubin_ptr, shape=(size,)))
self._cubin_cache[cc] = cubin
self._linkerinfo_cache[cc] = linker.info_log
return cubin
def get_cufunc(self):
if self._entry_name is None:
msg = "Missing entry_name - are you trying to get the cufunc " \
"for a device function?"
raise RuntimeError(msg)
ctx = devices.get_context()
device = ctx.device
cufunc = self._cufunc_cache.get(device.id, None)
if cufunc:
return cufunc
cubin = self.get_cubin(cc=device.compute_capability)
module = ctx.create_module_image(cubin)
# Load
cufunc = module.get_function(self._entry_name)
# Populate caches
self._cufunc_cache[device.id] = cufunc
return cufunc
def get_linkerinfo(self, cc):
try:
return self._linkerinfo_cache[cc]
except KeyError:
raise KeyError(f'No linkerinfo for CC {cc}')
def get_sass(self, cc=None):
return disassemble_cubin(self.get_cubin(cc=cc))
def add_ir_module(self, mod):
self._raise_if_finalized()
if self._module is not None:
raise RuntimeError('CUDACodeLibrary only supports one module')
self._module = mod
def add_linking_library(self, library):
library._ensure_finalized()
# We don't want to allow linking more libraries in after finalization
# because our linked libraries are modified by the finalization, and we
# won't be able to finalize again after adding new ones
self._raise_if_finalized()
self._linking_libraries.add(library)
def add_linking_file(self, filepath)
|
dgjnpr/py-junos-eznc
|
lib/jnpr/junos/cfg/srx/shared_ab.py
|
Python
|
apache-2.0
| 2,925
| 0
|
# debuggin
from lxml import etree
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg
|
import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.srx.shared_ab_addr import SharedAddrBookAddr
from jnpr.junos.cfg.srx.shared_ab_set import SharedAddrBookSet
class SharedAddrBook(Resource):
"""
|
[edit security address-book <name>]
Resource <name>
The address book name, string
Manages:
addr - SharedAddrBookAddr resources
set - SharedAddrBookAddrSet resources
"""
PROPERTIES = [
'description',
'$addrs', # read-only addresss
'$sets', # read-only address-sets
'zone_list' # attached zone
]
def __init__(self, junos, name=None, **kvargs):
if name is None:
# resource-manager
Resource.__init__(self, junos, name, **kvargs)
return
self.addr = SharedAddrBookAddr(junos, parent=self)
self.set = SharedAddrBookSet(junos, parent=self)
self._manages = ['addr', 'set']
Resource.__init__(self, junos, name, **kvargs)
def _xml_at_top(self):
return E.security(
E('address-book', E.name(self._name))
)
# -----------------------------------------------------------------------
# XML reading
# -----------------------------------------------------------------------
def _xml_hook_read_begin(self, xml):
ab = xml.find('.//address-book')
ab.append(E('description'))
ab.append(E('address', JXML.NAMES_ONLY))
ab.append(E('address-set', JXML.NAMES_ONLY))
ab.append(E('attach'))
return True
def _xml_at_res(self, xml):
return xml.find('.//address-book')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
to_py['$addrs'] = [name.text for name in as_xml.xpath('address/name')]
to_py['$sets'] = [
name.text for name in as_xml.xpath('address-set/name')]
# -----------------------------------------------------------------------
# XML writing
# -----------------------------------------------------------------------
def _xml_change_zone_list(self, xml):
x_attach = E('attach')
self._xml_list_property_add_del_names(x_attach,
prop_name='zone_list',
element_name='zone')
xml.append(x_attach)
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
raise RuntimeError("Need to implement!")
def _r_catalog(self):
raise RuntimeError("Need to implement!")
|
rackerlabs/django-DefectDojo
|
dojo/components/sql_group_concat.py
|
Python
|
bsd-3-clause
| 1,326
| 0.004525
|
from django.db.models import Aggregate, CharField
class Sql_GroupConcat(Aggregate):
function = 'GROUP_CONCAT'
allow_distinct = True
def __init__(self, expression, separator, distinct=False, ordering=None, **extra):
self.separator = separator
super(Sql_GroupConcat, self).__init__(e
|
xpression,
distinct='DISTINCT ' if distinct else '',
ordering=' ORDER BY %s' % ordering if ordering is not None else '',
separator=' SEPARATOR "%s"' % separator,
output_field=CharField(),
**extra)
def as_mysql(self, compiler, co
|
nnection):
return super().as_sql(compiler,
connection,
template='%(function)s(%(distinct)s%(expressions)s%(ordering)s%(separator)s)',
separator=' SEPARATOR \'%s\'' % self.separator)
def as_sql(self, compiler, connection, **extra):
return super().as_sql(compiler,
connection,
template='%(function)s(%(distinct)s%(expressions)s%(ordering)s)',
**extra)
|
catkin/catkin_tools
|
tests/utils.py
|
Python
|
apache-2.0
| 4,974
| 0.000402
|
import functools
import os
import re
import shutil
import subprocess
import sys
import tempfile
from io import StringIO
from subprocess import TimeoutExpired
from catkin_tools.commands.catkin import main as catkin_main
TESTS_DIR = os.path.dirname(__file__)
MOCK_DIR = os.path.join(TESTS_DIR, 'mock_resources')
def catkin_success(args, env={}):
orig_environ = dict(os.environ)
try:
os.environ.update(env)
catkin_main(args)
except SystemExit as exc:
ret = exc.code
if ret != 0:
import traceback
traceback.print_exc()
finally:
os.environ = orig_environ
return ret == 0
def catkin_failure(args, env={}):
orig_environ = dict(os.environ)
try:
os.environ.update(env)
catkin_main(args)
except SystemExit as exc:
ret = exc.code
finally:
os.environ = orig_environ
return ret != 0
class AssertRaisesContext(object):
def __init__(self, expected, expected_regex=None):
self.expected = expected
self.expected_regex = expected_regex
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if self.expected is None:
if exc_type is None:
return True
else:
raise
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise AssertionError("{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
raise
if self.expected_re
|
gex is None:
return True
expected_regex = self.expected_regex
expected_regex = re.compile(expected_regex)
if not expected_regex.search(str(exc_value)):
raise AssertionError("'{0}' does
|
not match '{1}'".format(expected_regex.pattern, str(exc_value)))
return True
class redirected_stdio(object):
def __enter__(self):
self.original_stdout = sys.stdout
self.original_stderr = sys.stderr
self.out = StringIO()
self.err = StringIO()
sys.stdout = self.out
sys.stderr = self.err
return self.out, self.err
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.original_stdout
sys.stderr = self.original_stderr
print(self.out.getvalue())
class temporary_directory(object):
def __init__(self, prefix=''):
self.prefix = prefix
self.delete = False
def __enter__(self):
self.original_cwd = os.getcwd()
self.temp_path = tempfile.mkdtemp(prefix=self.prefix)
os.chdir(self.temp_path)
return self.temp_path
def __exit__(self, exc_type, exc_value, traceback):
if self.delete and self.temp_path and os.path.exists(self.temp_path):
print('Deleting temporary testind directory: %s' % self.temp_path)
shutil.rmtree(self.temp_path)
if self.original_cwd and os.path.exists(self.original_cwd):
os.chdir(self.original_cwd)
def in_temporary_directory(f):
@functools.wraps(f)
def decorated(*args, **kwds):
with temporary_directory() as directory:
from inspect import getargspec
# If it takes directory of kwargs and kwds does already have
# directory, inject it
if 'directory' not in kwds and 'directory' in getargspec(f)[0]:
kwds['directory'] = directory
return f(*args, **kwds)
decorated.__name__ = f.__name__
return decorated
def run(args, **kwargs):
"""
Call to Popen, returns (errcode, stdout, stderr)
"""
print("run:", args)
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
cwd=kwargs.get('cwd', os.getcwd()))
print("P==", p.__dict__)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout, stderr)
def assert_cmd_success(cmd, **kwargs):
"""
Asserts that running a command returns zero.
returns: stdout
"""
print(">>>", cmd, kwargs)
(r, out, err) = run(cmd, **kwargs)
print("<<<", str(out))
assert r == 0, "cmd failed with result %s:\n %s " % (r, str(cmd))
return out
def assert_cmd_failure(cmd, **kwargs):
"""
Asserts that running a command returns non-zero.
returns: stdout
"""
print(">>>", cmd, kwargs)
(r, out, err) = run(cmd, withexitstatus=True, **kwargs)
print("<<<", str(out))
assert 0 != r, "cmd succeeded, but it should fail: %s result=%u\noutput=\n%s" % (cmd, r, out)
return out
def assert_files_exist(prefix, files):
"""
Assert that all files exist in the prefix.
"""
for f in files:
p = os.path.join(prefix, f)
print("Checking for", p)
assert os.path.exists(p), "%s doesn't exist" % p
|
Xavierwei/porsche_lemans
|
web/api/performance/util/pylot_win_recorder.py
|
Python
|
mit
| 1,833
| 0.00491
|
#!/usr/bin/env python
#
# Copyright (c) 2007-2009 Corey Goldberg (corey@goldb.org)
# License: GNU GPLv3
#
#
# This file is part of Pylot.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See the GNU General Public License
# for more details.
#
#
# Original code by David Solomon (dave.c.solomon@gmail.com)
#
#
# Only works on Windows.
# Browser capture tool. Builds Pylot test cases from an IE browsing session.
# You must have the Win32 Extensions for Pyth
|
on installed
# http://sourceforge.net/projects/pywin32/
import sys
import threading
import pythoncom
from win32com.client import Dispatch, WithEvents
stop_event = threading.Event()
finished = False
class EventSink(object):
def OnBeforeNavigate2(self, *args):
print ' <case>'
url = args[1]
post_data = args[4]
headers = args[5]
print ' <url>%s</url>' % url
if post_data:
print '
|
<method>POST</method>'
print ' <body><![CDATA[%s]]></body>' % post_data
if headers:
print ' <add_header>%s</add_header>' % headers
print " </case>"
stop_event.set()
def OnQuit(self):
global finished
finished = True
ie.Visible = 0
stop_event.set()
ie = Dispatch('InternetExplorer.Application', EventSink)
ev = WithEvents(ie, EventSink)
ie.Visible = 1
print '<testcases>'
while not finished:
pythoncom.PumpWaitingMessages()
stop_event.wait(.05)
stop_event.clear()
print '</testcases>'
|
jinankjain/zamboni
|
apps/zadmin/urls.py
|
Python
|
bsd-3-clause
| 2,133
| 0.001406
|
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from addons.urls import ADDON_ID
|
from amo.urlresolvers import reverse
from . import view
|
s
urlpatterns = patterns('',
# AMO stuff.
url('^$', views.index, name='zadmin.index'),
url('^models$', lambda r: redirect('admin:index'), name='zadmin.home'),
url('^addon/manage/%s/$' % ADDON_ID,
views.addon_manage, name='zadmin.addon_manage'),
url('^addon/recalc-hash/(?P<file_id>\d+)/', views.recalc_hash,
name='zadmin.recalc_hash'),
url('^env$', views.env, name='amo.env'),
url('^hera', views.hera, name='zadmin.hera'),
url('^memcache$', views.memcache, name='zadmin.memcache'),
url('^settings', views.show_settings, name='zadmin.settings'),
url('^fix-disabled', views.fix_disabled_file, name='zadmin.fix-disabled'),
url(r'^email_preview/(?P<topic>.*)\.csv$',
views.email_preview_csv, name='zadmin.email_preview_csv'),
url('^mail$', views.mail, name='zadmin.mail'),
url('^email-devs$', views.email_devs, name='zadmin.email_devs'),
url('^generate-error$', views.generate_error,
name='zadmin.generate-error'),
url('^export_email_addresses$', views.export_email_addresses,
name='zadmin.export_email_addresses'),
url('^email_addresses_file$', views.email_addresses_file,
name='zadmin.email_addresses_file'),
url('^price-tiers$', views.price_tiers, name='zadmin.price_tiers'),
# The Django admin.
url('^models/', include(admin.site.urls)),
url('^models/(?P<app_id>.+)/(?P<model_id>.+)/search.json$',
views.general_search, name='zadmin.search'),
)
# Hijack the admin's login to use our pages.
def login(request):
# If someone is already auth'd then they're getting directed to login()
# because they don't have sufficient permissions.
if request.user.is_authenticated():
raise PermissionDenied
else:
return redirect('%s?to=%s' % (reverse('users.login'), request.path))
admin.site.login = login
|
ecreall/lagendacommun
|
lac/views/lac_view_manager/questionnaire/improve.py
|
Python
|
agpl-3.0
| 3,097
| 0.000969
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
import colander
from pyramid.view import view_config
from dace.objectofcollaboration.principal.util import get_current
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.form import FormView
from pontus.schema import Schema, select
from pontus.widget import RadioChoiceWidget
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from lac.views.widget import EmailInputWidget
from lac.content.processes.lac_view_manager.behaviors import (
Improve)
from lac.content.lac_application import CreationCulturelleApplication
from lac import _
class ImproveStudyReport(BasicView):
title =
|
'Alert improve'
name = 'alertimprove'
template = 'lac:views/l
|
ac_view_manager/questionnaire/templates/improve_info.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class Improve1Schema(Schema):
id = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title="ID",
missing="improve"
)
url = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title="URL",
missing="None"
)
improvement = colander.SchemaNode(
colander.String(),
widget=deform.widget.TextAreaWidget(rows=3, cols=30),
title=_('Vos suggestions')
)
email = colander.SchemaNode(
colander.String(),
widget=EmailInputWidget(),
validator=colander.All(
colander.Email(),
colander.Length(max=100)
),
title=_('Email')
)
class ImproveFormView(FormView):
title = _('Votre avis')
schema = select(Improve1Schema(),
['id', 'url', 'improvement', 'email'])
behaviors = [Improve]
formid = 'formimprove'
name = 'improveform'
def before_update(self):
user = get_current()
if getattr(user, 'email', ''):
self.schema.get('email').widget = deform.widget.HiddenWidget()
def default_data(self):
user = get_current()
return {'email': getattr(user, 'email', '')}
@view_config(
name='improve',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class ImproveView(MultipleView):
title = _('Votre avis')
name = 'improve'
viewid = 'improve'
template = 'daceui:templates/simple_mergedmultipleview.pt'
views = (ImproveStudyReport, ImproveFormView)
validators = [Improve.get_validator()]
requirements = {'css_links': [],
'js_links': ['lac:static/js/questionnaire.js']}
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{Improve: ImproveView})
|
MakarenaLabs/Orator-Google-App-Engine
|
orator/orm/relations/has_many_through.py
|
Python
|
mit
| 5,076
| 0.000985
|
# -*- coding: utf-8 -*-
from ...query.expression import QueryExpression
from .relation import Relation
class HasManyThrough(Relation):
_first_key = None
_second_key = None
_far_parent = None
def __init__(self, query, far_parent, parent, first_key, second_key):
"""
:param query: A Builder instance
:type query: Builder
:param far_parent: The far parent model
:type far_parent: Model
:param parent: The parent model
:type parent: Model
:type first_key: str
:type second_key: str
"""
self._first_key = first_key
self._second_key = second_key
self._far_parent = far_parent
super(HasManyThrough, self).__init__(query, parent)
def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
parent_table = self._parent.get_table()
self._set_join()
if self._constraints:
self._query.where('%s.%s' % (parent_table, self._first_key), '=', self._far_parent.get_key())
def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
parent_table = self._parent.get_table()
self._set_join(query)
query.select(QueryExpression('COUNT(*)'))
key = self.wrap('%s.%s' % (parent_table, self._first_key))
return query.where(self.get_has_compare_key(), '=', QueryExpression(key))
def _set_join(self, query=None):
"""
Set the join clause for the query.
"""
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key)
def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
table = self._paren
|
t.get_table()
self._query.where_in('%s.%s' % (t
|
able, self._first_key), self.get_keys(models))
def init_relation(self, models, relation):
"""
Initialize the relation on a set of models.
:type models: list
:type relation: str
"""
for model in models:
model.set_relation(relation, self._related.new_collection())
return models
def match(self, models, results, relation):
"""
Match the eagerly loaded results to their parents.
:type models: list
:type results: Collection
:type relation: str
"""
dictionary = self._build_dictionary(results)
for model in models:
key = model.get_key()
relationship = self.new_instance(model)
if key in dictionary:
value = self._related.new_collection(dictionary[key])
else:
value = self._related.new_collection()
relationship.set_results(value)
model.set_relation(relation, relationship)
return models
def _build_dictionary(self, results):
"""
Build model dictionary keyed by the relation's foreign key.
:param results: The results
:type results: Collection
:rtype: dict
"""
foreign = self._first_key
dictionary = {}
for result in results:
key = getattr(result, foreign)
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(result)
return dictionary
def get_results(self):
"""
Get the results of the relationship.
"""
return self.get()
def get(self, columns=None):
"""
Execute the query as a "select" statement.
:type columns: list
:rtype: orator.Collection
"""
if columns is None:
columns = ['*']
select = self._get_select_columns(columns)
models = self._query.add_select(*select).get_models()
if len(models) > 0:
models = self._query.eager_load_relations(models)
return self._related.new_collection(models)
def _get_select_columns(self, columns=None):
"""
Set the select clause for the relation query.
:param columns: The columns
:type columns: list
:rtype: list
"""
if columns == ['*'] or columns is None:
columns = ['%s.*' % self._related.get_table()]
return columns + ['%s.%s' % (self._parent.get_table(), self._first_key)]
def get_has_compare_key(self):
return self._far_parent.get_qualified_key_name()
def new_instance(self, model):
return HasManyThrough(
self._related.new_query(),
model,
self._parent,
self._first_key,
self._second_key
)
|
IlyaSergeev/taxi_service
|
first_app/wsgi.py
|
Python
|
mit
| 393
| 0.002545
|
"""
WSGI config for first_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJ
|
ANGO_SETTINGS_MODULE", "first_app.settings")
fr
|
om django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
chen0031/rekall
|
rekall-core/rekall/plugins/collectors/darwin/sessions.py
|
Python
|
gpl-2.0
| 7,561
| 0.000132
|
# Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Darwin Session collectors.
"""
__author__ = "Adam Sindelar <adamsh@google.com>"
from rekall.entities import definitions
from rekall.plugins.collectors.darwin import common
from rekall.plugins.collectors.darwin import zones
class DarwinTerminalUserInferor3000(common.DarwinEntityCollector):
"""Infers the relationship between usernames and UIDs using tty sessions."""
outputs = ["User"]
collect_args = dict(
terminals=("Terminal/file matches (has component Permissions) and "
"Terminal/session"))
complete_input = True
def collect(self, hint, terminals):
for terminal in terminals:
owner = terminal["Terminal/file"]["Permissions/owner"]
user = terminal["Terminal/session"]["Session/user"]
# Now tell the manager that these two users are the same user.
if owner and user:
yield user.identity | owner.identity
class DarwinTTYZoneCollector(zones.DarwinZoneElementCollector):
outputs = ["Struct/type=tty"]
zone_name = "ttys"
type_name = "tty"
def validate_element(self, tty):
return tty.t_lock == tty
class DarwinClistParser(common.DarwinEntityCollector):
outputs = ["Buffer/purpose=terminal_input",
"Buffer/purpose=terminal_output"]
collect_args = dict(clists="Struct/type is 'clist'")
def collect(self, hint, clists):
for entity in clists:
clist = entity["Struct/base"]
yield [entity.identity,
definitions.Buffer(kind="ring",
state="freed",
contents=clist.recovered_contents,
start=clist.c_cs,
end=clist.c_ce,
size=clist.c_cn)]
class DarwinTTYParser(common.DarwinEntityCollector):
outputs = ["Terminal", "Struct/type=vnode", "Struct/type=clist",
"Buffer/purpose=terminal_input",
"Buffer/purpose=terminal_output"]
collect_args = dict(ttys="Struct/type is 'tty'")
def collect(self, hint, ttys):
for entity in ttys:
file_identity = None
session_identity = None
tty = entity["Struct/base"]
session = tty.t_session.deref()
vnode = session.s_ttyvp
if session:
session_identity = self.manager.identify({
"Struct/base": session})
if vnode:
# Look, it has a vnode!
yield definitions.Struct(base=vnode,
type="vnode")
file_identity = self.manager.identify({
"Struct/base": vnode})
# Yield just the stubs of the input and output ring buffers.
# DarwinClistParser will grab these if it cares.
yield [definitions.Struct(base=tty.t_rawq,
type="clist"),
definitions.Buffer(purpose="terminal_input",
context=entity.identity)]
yield [definitions.Struct(base=tty.t_outq,
type="clist"),
definitions.Buffer(purpose="terminal_output",
context=entity.identity)]
# Last, but not least, the Terminal itself.
yield [entity.identity,
definitions.Terminal(
session=session_identity,
file=file_identity)]
class DarwinSessionParser(common.DarwinEntityCollector):
"""Collects session entities
|
from the memory objects."""
_name = "sessions"
outputs = ["Session",
"User",
|
"Struct/type=tty",
"Struct/type=proc"]
collect_args = dict(sessions="Struct/type is 'session'")
def collect(self, hint, sessions):
for entity in sessions:
session = entity["Struct/base"]
# Have to sanitize the usernames to prevent issues when comparing
# them later.
username = str(session.s_login).replace("\x00", "")
if username:
user_identity = self.manager.identify({
"User/username": username})
yield [user_identity,
definitions.User(
username=username)]
else:
user_identity = None
sid = session.s_sid
# Turns out, SID is not always unique. This is disabled as it is
# not being currently used, and I need to investigate the causes
# of duplicate sessions occurring on 10.10.
# session_identity = self.manager.identify({
# "Session/sid": sid}) | entity.identity
session_identity = entity.identity
if session.s_ttyp:
yield definitions.Struct(
base=session.s_ttyp,
type="tty")
if session.s_leader and session.s_leader.validate():
yield definitions.Struct(
base=session.s_leader.deref(),
type="proc")
yield [session_identity,
definitions.Session(
user=user_identity,
sid=sid),
definitions.Named(
name="SID %d" % int(sid),
kind="Session")]
class DarwinSessionZoneCollector(zones.DarwinZoneElementCollector):
"""Collects sessions from the sessions allocation zone."""
outputs = ["Struct/type=session"]
zone_name = "session"
type_name = "session"
def validate_element(self, session):
return session.s_count > 0 and session.s_leader.p_argc > 0
class DarwinSessionCollector(common.DarwinEntityCollector):
"""Collects sessions."""
outputs = ["Struct/type=session"]
def collect(self, hint):
session_hash_table_size = self.profile.get_constant_object(
"_sesshash", "unsigned long")
# The hashtable is an array to session list heads.
session_hash_table = self.profile.get_constant_object(
"_sesshashtbl",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="sesshashhead",
count=session_hash_table_size.v())))
for sesshashhead in session_hash_table:
for session in sesshashhead.lh_first.walk_list("s_hash.le_next"):
yield definitions.Struct(
base=session,
type="session")
|
NeCTAR-RC/designate
|
designate/api/v2/controllers/limits.py
|
Python
|
apache-2.0
| 1,347
| 0
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# und
|
er the License.
import pecan
from designate.central import rpcapi as central_rpcapi
from designate.openstack.common import log as logging
from designate.api.v2.controllers import rest
from designate.api.v2.views import limits as limits_view
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
class LimitsController(rest.RestController):
_view = l
|
imits_view.LimitsView()
@pecan.expose(template='json:', content_type='application/json')
def get_all(self):
request = pecan.request
context = pecan.request.environ['context']
absolute_limits = central_api.get_absolute_limits(context)
return self._view.show(context, request, absolute_limits)
|
akuks/pretix
|
src/pretix/plugins/banktransfer/payment.py
|
Python
|
apache-2.0
| 2,341
| 0.002136
|
import json
from collections import OrderedDict
from django import forms
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from pretix.base.payment
|
import BasePaymentProvider
class BankTransfer(BasePaymentProvider):
identifier = 'banktransfer'
verbose_name = _('Bank transfer')
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('bank_details',
fo
|
rms.CharField(
widget=forms.Textarea,
label=_('Bank account details'),
))
]
)
def payment_form_render(self, request) -> str:
template = get_template('pretixplugins/banktransfer/checkout_payment_form.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def checkout_prepare(self, request, total):
return True
def payment_is_valid_session(self, request):
return True
def checkout_confirm_render(self, request):
form = self.payment_form(request)
template = get_template('pretixplugins/banktransfer/checkout_payment_confirm.html')
ctx = {'request': request, 'form': form, 'settings': self.settings}
return template.render(ctx)
def order_pending_mail_render(self, order) -> str:
template = get_template('pretixplugins/banktransfer/email/order_pending.txt')
ctx = {'event': self.event, 'order': order, 'settings': self.settings}
return template.render(ctx)
def order_pending_render(self, request, order) -> str:
template = get_template('pretixplugins/banktransfer/pending.html')
ctx = {'request': request, 'order': order, 'settings': self.settings}
return template.render(ctx)
def order_control_render(self, request, order) -> str:
if order.payment_info:
payment_info = json.loads(order.payment_info)
else:
payment_info = None
template = get_template('pretixplugins/banktransfer/control.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings,
'payment_info': payment_info, 'order': order}
return template.render(ctx)
|
timbotron/ICLS
|
framework.py
|
Python
|
gpl-3.0
| 2,817
| 0.048988
|
# This file is part of ICLS.
#
# ICLS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ICLS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License
|
for more details.
#
# You should have received a copy of the GNU General Public License
# along with ICLS. If not, see <http://www.gnu.org/licenses/>.
import xml.dom.minidom
from time import strftime, strptime
from sys import exit
from textwrap import wrap
from os import path
def colorize(the_color='blue',entry='',new_line=0):
color={'gray':30,'green':32,'red':31,'blue':34,'magenta':35,'cyan':36,'white':37,'highgreen':42,'highblue':44,'highred':41,'highgray':47}
if new_line==1:
new_line='
|
\n'
else:
new_line=''
return_me='\033[1;'+str(color[the_color])+'m'+entry+'\033[1;m'+new_line
return return_me
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
# Only if error is one that halts things, stop script
def aws_print_error(error_obj):
error_code=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Code')[0].childNodes)
error_message=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Message')[0].childNodes)
error_message=colorize('red',"ERROR",1)+colorize('red',"AWS Error Code: ")+error_code+colorize('red',"\nError Message: ")+error_message
print error_message
exit()
return True
def print_error(error_text):
error_message=colorize('red',"ERROR",1)+colorize('red',"\nError Message: ")+error_text
print error_message
exit()
return True
#takes an entry, and makes it pretty!
def makeover(entry,ismonochrome=False):
if ismonochrome==False:
output=colorize('gray','========================================',1)
output+=colorize('cyan',entry['entry'],1)
output+=colorize('cyan',strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000")),1)
output+=colorize('gray','ID: '+entry.name,0)
else:
output="========================================\n"
output+=entry['entry']+"\n"
output+=strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000"))+"\n"
output+='ID: '+entry.name
return output
#If, during parsing, help was flagged print out help text and then exit TODO read it from a md file
def print_help():
filepath = path.join(path.dirname(path.abspath(__file__)), 'DOCUMENTATION.mkd')
f = open(filepath,'r')
print f.read()
f.close()
exit()
|
lukasklein/pruefungsplan
|
pruefungsplan/notifier/views.py
|
Python
|
bsd-3-clause
| 3,148
| 0
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic.edit import FormView
from .forms import SignUpForm, ExamSignUpForm
from .models import Notification
from .utils import send_email, send_sms
class ExamSignUpView(FormView):
kind = 'exam'
form_class = ExamSignUpForm
template_name = 'home.html'
def form_valid(self, form):
notification, sms = form.draft_notification()
if sms:
send_sms(
sms,
'Dein Code fuer die Pruefungsplanbenachrichtigung \
lautet: %s' % (notification.sms_code)
)
send_email(
notification.email,
'Bitte bestaetige deine Pruefungsplanbenachrichtigung',
'Wenn du per Email benachrichtigt werden moechtest, \
klicke bitte auf den folgenden Link: %s' % (
self.request.build_absolute_uri('/confirm/%s/?mail_code=%s' % (
notification.password,
notification.email_token
))
)
)
self.success_url = '/confirm/%s' % notification.password
return super(ExamSignUpView, self).form_valid(form)
class SignUpView(FormView):
kind = 'pruefungsplan'
form_class = SignUpForm
template_name = 'home.html'
def form_valid(self, form):
notification, sms = form.draft_notification()
if sms:
send_sms(
sms,
'Dein Code fuer die Pruefungsplanbenachrichtigung \
lautet: %s' % (notification.sms_code)
)
send_email(
notification.email,
'Bitte bestaetige deine Pruefungsplanbenachrichtigung',
'Wenn du per Email benachrichtigt werden moechtest wenn der \
Pruefungsplan %s online ist, klicke bitte auf den folgenden \
Link: %s' % (
notification.pruefungsplan.name,
self.request.build_absolute_uri('/confirm/%s/?mail_code=%s' % (
notification.password,
notification.email_token
))
)
)
self.success_url = '/confirm/%s' % notification.password
return super(SignUpView, self).form_valid(form)
def confirm(request, password):
notification = get_object_or_404(Notification, password=password)
sms_error = False
sm
|
s_code = request.GET.get('sms_code')
if sms_code:
if sms_code == notification.sms_code:
notification.sms_verified = T
|
rue
notification.save()
else:
sms_error = True
mail_error = False
mail_code = request.GET.get('mail_code')
if mail_code:
if mail_code == notification.email_token:
notification.email_verified = True
notification.save()
else:
mail_error = True
return render_to_response('confirm.html', {
'notification': notification,
'sms_error': sms_error,
'mail_error': mail_error,
}, context_instance=RequestContext(request))
|
djedproject/djed.static
|
djed/static/__init__.py
|
Python
|
isc
| 5,214
| 0
|
import logging
import os
from collections import namedtuple
from zope.interface import Interface
from bowerstatic import (
Bower,
InjectorTween,
PublisherTween,
)
from pyramid.interfaces import IApplicationCreated
from pyramid.path import AssetResolver
from pyramid.exceptions import ConfigurationError
log = logging.getLogger('djed.static')
BowerComponentsInfo = namedtuple('BowerComponentsInfo', 'name path')
BowerComponentInfo = namedtuple('BowerComponentInfo', 'path components_name')
class IBower(Interface):
""" Bower interface
"""
class IBowerComponents(Interface):
""" Bower components interface
"""
class IBowerComponent(Interface):
""" Bower component interface for local components
"""
def bower_factory_from_settings(settings):
prefix = settings.get('djed.static.prefix', 'djed.static.')
bower = Bower()
bower.initialized = False
bower.publisher_signature = settings.get(
prefix + 'publisher_signature', 'bowerstatic')
bower.components_path = settings.get(
prefix + 'components_path', None)
bower.components_name = settings.get(
prefix + 'components_name', 'components')
return bower
def get_bower(request):
registry = getattr(request, 'registry', None)
if registry is None:
registry = request
return registry.getUtility(IBower)
def bowerstatic_tween_factory(handler, registry):
bower = get_bower(registry)
def bowerstatic_tween(request):
injector_handler = InjectorTween(bower, handler)
publisher_handler = PublisherTween(bower, injector_handler)
return publisher_handler(request)
return bowerstatic_tween
def add_bower_components(config, path, name=None):
"""
"""
registry = config.registry
resolver = AssetResolver()
directory = resolver.resolve(path).abspath()
if not os.path.isdir(directory):
raise ConfigurationError(
"Directory '{0}' does not exist".format(directory)
)
bower = get_bower(registry)
if name is None:
name = bower.components_name
discr = ('djed:static', name)
def register():
info = BowerComponentsInfo(name, direc
|
tory)
registry.registerUtility(info, IBowerComponents, name=name)
config.action(discr, register)
def add_bower_component(config, path, components_name=None):
"""
"""
registry = config.registry
resolver = AssetResolver()
directory = resolver.resolve(path).abspath()
if not os.path.isfile(os.path.join(directory, 'bower.json')):
raise ConfigurationError(
"Directory '{0}' does not contain 'bower.json' file"
.format(directory)
|
)
bower = get_bower(registry)
if components_name is None:
components_name = bower.components_name
discr = ('djed:static', directory, components_name)
def register():
info = BowerComponentInfo(directory, components_name)
registry.registerUtility(info, IBowerComponent, name='-'.join(discr))
config.action(discr, register)
def include(request, path_or_resource, components_name=None):
"""
"""
registry = request.registry
bower = get_bower(registry)
if components_name is None:
components_name = bower.components_name
collection = bower._component_collections.get(components_name)
if collection is None:
raise ConfigurationError("Bower components '{0}' not found."
.format(components_name))
include = collection.includer(request.environ)
include(path_or_resource)
def init_static(event):
registry = event.app.registry
bower = get_bower(registry)
if not bower.initialized:
log.info("Initialize static resources...")
for name, info in registry.getUtilitiesFor(IBowerComponents):
bower.components(info.name, info.path)
log.info("Add static bower components '{0}': {1}"
.format(info.name, info.path))
for name, info in registry.getUtilitiesFor(IBowerComponent):
collection = bower._component_collections.get(info.components_name)
if collection is None:
raise ConfigurationError(
"Bower components '{0}' not found.".format(
info.components_name))
component = collection.load_component(
info.path, 'bower.json')
collection.add(component)
log.info("Add local bower component: {0}".format(info.path))
bower.initialized = True
def includeme(config):
bower = bower_factory_from_settings(config.registry.settings)
config.registry.registerUtility(bower, IBower)
config.add_tween('djed.static.bowerstatic_tween_factory')
config.add_subscriber(init_static, IApplicationCreated)
config.add_directive('add_bower_components', add_bower_components)
config.add_directive('add_bower_component', add_bower_component)
config.add_request_method(include, 'include')
config.add_request_method(get_bower, 'get_bower')
if bower.components_path is not None:
config.add_bower_components(bower.components_path)
|
zaqwes8811/matlab_ext
|
measurement/mc-assistant/projects/py_hw_models/trash/testModelADDAC.py
|
Python
|
apache-2.0
| 5,245
| 0.061971
|
#-*- coding: utf-8 -*-
import unittest
import ModelADDAC as adda
import api_convertors.type_conv as tc
''' Просто заглушка '''
def printRpt( value, valueDisplacemented, valueScaled, valueCode, Kda ):
#print '\nvalueDisplacemented : '+str(valueDisplacemented)
pass
''' Класс тестов '''
class TestCaseModelADDAC(unittest.TestCase):
_valueDict = { 'value' : 0, 'zeroDisplacement' : 0, 'converter' : 0,
'scale' : 0, 'capacity' : 0, 'Vmax' : 0 }
def setUp
''' АЦП '''
def testADC( self ):
self._valueDict[ 'value' ] = 2.5
self._valueDict['displacement'] = 0
self._valueDict['converter' ] = 1
self._valueDict['scale'] = 1
self._valueDict['capacity'] = 8
self._valueDict['Vmax'] = 5.0 # V
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Проерка цифрового кода 8 бит!!, но разрядность может быть и больше0
self.assertEqual( tc.byte2strhex( code ), '0x80' )
# проверка коэфф. передачи
#self.assertEqual( Kda, 51.2 ) #? как сравнивать доубле! просто выражение посчитать
''' Проверка расчета по току, со смещением '''
def testCurrADCZeroX( self ):
# Constants and coeff.
R1 = 5.11
R2 = 10.0
Splitter = R2/(R1+R2)
Vmax = 5000.0 #mV
capacity = 10
Kiu = 188.0 # mV/A
I = 10 # A
dI = -1.0 # A положение нуля на Х
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dI
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentX )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x00E5' )
''' Проверка расчета по току, без смещения '''
def testCurrADCZeroXZ0( self ):
# Constants and coeff.
R1 = 5.11
R2 = 10.0
Splitter = R2/(R1+R2)
Vmax = 5000.0 #mV
capacity = 10
Kiu = 188.0 # mV/A
dI = 0.0 # A
I = 10 # A
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dI
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentX )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x00FE' )
''' Проверка расчета по току, со смещением по напряжению'''
def testCurrADCZeroY( self ):
# Constants and coeff.
R1 = 5.11
R2 = 10.0
Splitter = R2/(R1+R2)
Vmax = 5000.0 #mV
capacity = 10
Kiu = 188.0 # mV/A
dU = 500.0 # mV
I = 10 # A
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dU
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x0142' )
''' Проверка расчета по току, со смещением '''
def testCurrADCZeroYZ0( self ):
# Constants and coeff.
R1 = 5.11 # Om
R2 = 10.0 # Om
Splitter = R2/(R1+R2)
Vmax = 5000.0 # mV
capacity = 10 # bits
Kiu = 188.0 # mV/A
dU = 0.0 # mV
I = 10 # A
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dU
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self
|
._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x00FE' )
# Проверка множителя
''' Проверка ЦАП '''
def testDAC( self ):
# Constants and coeff.
R1 = 5.11 # Om
R2 = 10.0 # Om
Splitter = R2/(R1+R2)
Vmax = 5000.0 # mV
capacity = 10 # bits
Kiu = 18
|
8.0 # mV/A
Udig = 322 # V ue
# проверяем
self._valueDict[ 'value' ] = 0
self._valueDict['displacement'] = 500
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
# сперва получаем поправочный код
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Запускаем
self._valueDict[ 'value' ] = Udig-code
self._valueDict['displacement'] = None
analog = adda.modelDAC( self._valueDict, printRpt )
# проверка значения! float трудно сравнить, пока округляем
self.assertEqual( int( analog ), 10 )
# Run tests
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase( TestCaseModelADDAC )
unittest.TextTestRunner(verbosity=2).run(suite)
|
shinpeimuraoka/ryu
|
ryu/__init__.py
|
Python
|
apache-2.0
| 680
| 0
|
# Copyright (C) 2012 Nippon Telegraph
|
and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
|
BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version_info = (4, 12)
version = '.'.join(map(str, version_info))
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/urbansim/gridcell/is_near_arterial.py
|
Python
|
gpl-2.0
| 2,343
| 0.012804
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
from urbansim.length_constants import UrbanSimLength, UrbanSimLengthConstants
from numpy import array
class is_near_arterial(Variable):
"""Boolean indicating if this gridcell is near an arterial, as specified by the arterial
threshold (a constant). Distance is assumed to be measured from the "border" of the gridcell."""
distance_to_arterial = "distance_to_arterial"
def dependencies(self):
return [my_attribute_label(self.distance_to_arterial)]
def compute(self, dataset_pool):
return get_is_near_arterial(self.get_dataset().get_attribute(self.distance_to_arterial),
dataset_pool.get_dataset('urbansim_constant'))
def post_check(self, values, dataset_pool):
self.do_check("x == False or x == True", values)
def get_is_near_arterial(distance, urbansim_constant):
length = UrbanSimLength(distance, urbansim_constant["gridcell_width"].units)
return length.less_than(urbansim_constant["near_arterial_threshold_unit"])
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs( self ):
# Assumes distance is measured from the gridcell border to the arterial.
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
'gridcell':{
'grid_id': array([1,2,3,4,5,6]),
|
'distance_to_arterial': array([0.0, 50.0, 99.0, 100.0, 101.0, 200.0]),
},
'urbansim_constant':{
'cell_
|
size': array([150]),
'near_arterial_threshold': array([100]),
'units': array(['meters']),
}
}
)
should_be = array( [True, True, True, False, False, False] )
tester.test_is_equal_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main()
|
stefanv/selective-inference
|
selection/algorithms/tests/test_forward_step.py
|
Python
|
bsd-3-clause
| 8,482
| 0.00896
|
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from selection.algorithms.lasso import instance
from selection.algorithms.forward_step import forward_stepwise, info_crit_stop, sequential, data_carving_IC
def test_FS(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n))
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3)
print FS.model
|
_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
print FS.model_quadratic(3)
def test_FS_unknown(k=10):
n, p = 100, 200
|
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y)
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
def test_subset(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
subset = np.ones(n, np.bool)
subset[-10:] = 0
FS = forward_stepwise(X, Y, subset=subset,
covariance=0.5**2 * np.identity(n))
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3, saturated=True)
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
FS = forward_stepwise(X, Y, subset=subset)
for i in range(k):
FS.next()
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
def test_BIC(k=10, do_sample=True):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = info_crit_stop(Y, X, 0.5, cost=np.log(n))
final_model = len(FS.variables) - 1
if do_sample:
return [p[-1] for p in FS.model_pivots(final_model, saturated=False, burnin=5000, ndraw=5000)]
else:
saturated_pivots = FS.model_pivots(final_model)
return [p[-1] for p in saturated_pivots]
def test_sequential(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
print sequential(X, Y, sigma=0.5, saturated=True)[1]
print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000)[1]
print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000)[1]
# now use a subset of cases
subset = np.ones(n, np.bool)
subset[-10:] = 0
print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000,
subset=subset)[1]
print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000, subset=subset)[1]
def simulate_null(saturated=True):
n, p = 100, 40
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n))
for i in range(5):
FS.next()
return [p[-1] for p in FS.model_pivots(3, saturated=saturated,
use_new=False)]
def test_ecdf(nsim=1000, BIC=False,
saturated=True):
P = []
for _ in range(nsim):
if not BIC:
P.extend(simulate_null(saturated=saturated))
else:
P.extend(test_BIC(do_sample=True))
P = np.array(P)
ecdf = sm.distributions.ECDF(P)
plt.clf()
plt.plot(ecdf.x, ecdf.y, linewidth=4, color='black')
plt.show()
def test_data_carving_IC(n=100,
p=200,
s=7,
sigma=5,
rho=0.3,
snr=7.,
split_frac=0.9,
ndraw=5000,
burnin=1000,
df=np.inf,
coverage=0.90,
compute_intervals=False):
counter = 0
while True:
counter += 1
X, y, beta, active, sigma = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
snr=snr,
df=df)
mu = np.dot(X, beta)
splitn = int(n*split_frac)
indices = np.arange(n)
np.random.shuffle(indices)
stage_one = indices[:splitn]
FS = info_crit_stop(y, X, sigma, cost=np.log(n), subset=stage_one)
if set(range(s)).issubset(FS.active):
results, FS = data_carving_IC(y, X, sigma,
stage_one=stage_one,
splitting=True,
ndraw=ndraw,
burnin=burnin,
coverage=coverage,
compute_intervals=compute_intervals,
cost=np.log(n))
carve = [r[1] for r in results]
split = [r[3] for r in results]
Xa = X[:,FS.variables[:-1]]
truth = np.dot(np.linalg.pinv(Xa), mu)
split_coverage = []
carve_coverage = []
for result, t in zip(results, truth):
_, _, ci, _, si = result
carve_coverage.append((ci[0] < t) * (t < ci[1]))
split_coverage.append((si[0] < t) * (t < si[1]))
return ([carve[j] for j, i in enumerate(FS.active) if i >= s],
[split[j] for j, i in enumerate(FS.active) if i >= s],
[carve[j] for j, i in enumerate(FS.active) if i < s],
[split[j] for j, i in enumerate(FS.active) if i < s],
counter, carve_coverage, split_coverage)
def test_full_pvals(n=100, p=40, rho=0.3, snr=4):
X, y, beta, active, sigma = instance(n=n, p=p, snr=snr, rho=rho)
FS = forward_stepwise(X, y, covariance=sigma**2 * np.identity(n))
from scipy.stats import norm as ndist
pval = []
completed_yet = False
for i in range(min(n, p)):
FS.next()
var_select, pval_select = FS.model_pivots(i+1, alternative='twosided',
which_var=[FS.variables[-1]],
saturated=False,
burnin=2000,
ndraw=8000)[0]
pval_saturated = FS.model_pivots(i+1, alternative='twosided',
which_var=[FS.variables[-1]],
saturated=True)[0][1]
# now, nominal ones
LSfunc = np.linalg.pinv(FS.X[:,FS.variables])
Z = np.dot(LSfunc[-1], FS.Y) / (np.linalg.norm(LSfunc[-1]) * sigma)
pval_nominal = 2 * ndist.sf(np.fabs(Z))
|
danielvdao/facebookMacBot
|
venv/lib/python2.7/site-packages/sleekxmpp/plugins/xep_0196/stanza.py
|
Python
|
mit
| 536
| 0.003731
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. S
|
tout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase, ET
class UserGaming(ElementBase
|
):
name = 'gaming'
namespace = 'urn:xmpp:gaming:0'
plugin_attrib = 'gaming'
interfaces = set(['character_name', 'character_profile', 'name',
'level', 'server_address', 'server_name', 'uri'])
sub_interfaces = interfaces
|
LauritzThaulow/fakelargefile
|
fakelargefile/segment/literal.py
|
Python
|
agpl-3.0
| 2,718
| 0.001104
|
'''
A segment which is a literal string
A FakeLargeFile composed entirely of LiteralSegments is not fake, but may
still be more useful than a plain old file.
'''
COPYING = """\
Copyright 2014 Lauritz Vesteraas Thaulow
This file is part of the FakeLargeFile python package.
FakeLargeFile is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License version 3,
as published by the Free Software Foundation.
FakeLargeFile is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU General Affero Public License
along with FakeLargeFile. If not, see <http://www.gnu.org/licenses/>.
"""
import pkg_resources
from fakelargefile.segment.abc import AbstractSegment, register_segment
from fakelargefile.tools import parse_unit, Slice
@register_se
|
gment
class LiteralSegment(AbstractSegment):
"""
A segment containing exactly a given string.
"""
def __init__(self, s
|
tart, string):
"""
Initialize a LiteralSegment instance.
:param int start: The start pos of the segment.
:param str string: The string this segment should contain.
"""
start = parse_unit(start)
super(LiteralSegment, self).__init__(start, start + len(string))
self.string = string
def subsegment(self, start, stop):
sl = Slice(start, stop, self.start, self.stop)
if sl.size:
return type(self)(sl.start, self.string[sl.local_slice])
else:
return None
@classmethod
def example(cls, start, stop):
basis = pkg_resources.resource_stream(
"fakelargefile", "GPLv3.txt").read()
start = parse_unit(start)
stop = parse_unit(stop)
size = stop - start
basis = basis * (size // len(basis) + 1)
return cls(start, basis[:size])
def copy(self, start=None):
if start is None:
start = self.start
return type(self)(start, self.string)
def index(self, string, start=None, stop=None, end_pos=False):
sl = Slice(start, stop, self.start, self.stop)
index = self.string.index(string, sl.local_start, sl.local_stop)
if end_pos:
index += len(string)
return self.start + index
def substring(self, start, stop):
sl = Slice(start, stop, self.start, self.stop, clamp=False)
return self.string[sl.local_slice]
def __str__(self):
return self.string
|
aroraenterprise/projecteos
|
backend/api/v1/fundamentals/sage_methods.py
|
Python
|
mit
| 212
| 0.009434
|
"""
Project: flask-rest
Author: Saj Arora
Descri
|
ption: All of the rest
|
methods...
"""
class SageMethod:
GET = 'get'
POST = 'post'
DELETE = 'delete'
PUT = 'put'
ALL = [GET, POST, DELETE, PUT]
|
cflq3/getcms
|
plugins/mongodb_dbs.py
|
Python
|
mit
| 138
| 0.007246
|
#!/usr/bin/env py
|
thon
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_content(pluginname, "Replica set
|
status")
|
zapcoop/vertex
|
vertex_api/service/filters/note.py
|
Python
|
agpl-3.0
| 147
| 0
|
from vertex.filt
|
ers import IdListFilterSet
from ..m
|
odels import Note
class NoteFilterSet(IdListFilterSet):
class Meta:
model = Note
|
canvasnetworks/canvas
|
website/canvas/search.py
|
Python
|
bsd-3-clause
| 1,105
| 0.01086
|
import threading
import solr
from django.conf import settings
class SolrConnection(threading.local):
_connection = None
def __init__(self, core):
threading.local.__init__(core)
self.core = core
|
@property
def connection(self):
if self._connection:
return self._connection
else:
return solr.Solr(settings.SOLR_HOST + '/' + self.core)
valid_core_names = ['comment', 'group']
local = {}
def get_local(core):
assert core in valid_core_names
if not core in local:
local[core] = SolrConnection(core)
return local[core]
def escape(input):
escapes = '\\+-&|!(){}[]^~*?:"; '
return "".join(
|
(char if char not in escapes else '\\' + char)
for char
in input
)
def query(core, *args, **kwargs):
return get_local(core).connection.select(*args, **kwargs)
def add(core, *args, **kwargs):
return get_local(core).connection.add(*args, **kwargs)
def update(core, doc, *args, **kwargs):
get_local(core).connection.delete(doc['id'])
return add(core, doc)
|
open-power/op-test-framework
|
testcases/OpTestPCI.py
|
Python
|
apache-2.0
| 36,744
| 0.001116
|
#!/usr/bin/env python3
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/testcases/OpTestPCI.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015,2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
'''
OpTestPCI: PCI checks
-------------------------------
Perform various PCI validations and checks
--run-suite BasicPCI (includes skiroot_suite and host_suite)
--run-suite pci-regression
Sample naming conventions below, see each test method for
the applicable options per method.
--run testcases.OpTestPCI.PCISkiroot.pcie_link_errors
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^ ^^^^^^^^^^^^^^^^
module name subclass test method
--run testcases.OpTestPCI.PCIHost.pcie_link_errors
^^^^^^^^^^^^^^^^^^^ ^^^^^^^ ^^^^^^^^^^^^^^^^
module name subclass test method
'''
import unittest
import logging
import pexpect
import time
import re
import difflib
from distutils.version import LooseVersion
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed, UnexpectedCase
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
skiroot_done = 0
host_done = 0
skiroot_lspci = None
host_lspci = None
reset_console = 0
class OpClassPCI(unittest.TestCase):
'''
Main Parent class
We cannot guarantee a soft boot entry, so need to force to PS or OS
'''
@classmethod
def setUpClass(cls, desired=None, power_cycle=0):
'''
Main setUpClass, this is shared across all subclasses.
This is called once when the subclass is instantiated.
'''
if desired is None:
cls.desired = OpSystemState.PETITBOOT_SHELL
else:
cls.desired = desired
cls.power_cycle = power_cycle
cls.conf = OpTestConfiguration.conf
cls.cv_SYSTEM = cls.conf.system()
cls.cv_HOST = cls.conf.host()
cls.my_connect = None
if cls.power_cycle == 1:
cls.cv_SYSTEM.goto_state(OpSystemState.OFF)
cls.power_cycle = 0
try:
if cls.desired == OpSystemState.OS:
# set bootdev for reboot cases
cls.cv_SYSTEM.sys_set_bootdev_no_override()
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
cls.c = cls.cv_SYSTEM.host().get_ssh_connection()
else:
cls.cv_SYSTEM.sys_set_bootdev_setup()
cls.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
cls.c = cls.cv_SYSTEM.console
cls.pty = cls.cv_SYSTEM.console.get_console()
except Exception as e:
log.debug("Unable to find cls.desired, probably a test code problem")
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
@classmethod
def tearDownClass(cls):
'''
Main tearDownClass, this is shared across all subclasses.
This is called once when the subclass is taken down.
'''
global skiroot_done
global host_done
global skiroot_lspci
global host_lspci
global reset_console
if reset_console == 1:
cls.refresh_console()
@classmethod
def set_console(cls):
'''
This method allows setting the shared class console to the real
console when needed, i.e. driver_bind tests which unbind the
ethernet drivers.
'''
cls.c = cls.cv_SYSTEM.console
@classmethod
def refresh_console(cls):
'''
This method is used to set the shared class console back to the proper
object (this gets set to the real console when we unbind the ethernet)
in the driver_bind test as an example.
'''
# this done after a reboot
global reset_console
if cls.cv_SYSTEM.get_state() == OpSystemState.PETITBOOT_SHELL:
cls.c = cls.cv_SYSTEM.console
else:
cls.c = cls.cv_SYSTEM.host().get_ssh_connection()
reset_console = 0
def setUp(self):
'''
All variables common to a subclass need to be defined here since
this method gets called before each subclass test
'''
pass
def tearDown(self):
'''
This is done at the end of each subclass test.
'''
global reset_console
if reset_console == 1:
self.refresh_console()
def get_lspci(self):
'''
Usually used internally, can be run for query of system
Case A --run testcases.OpTestPCI.PCISkiroot.get_lspci
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.get_lspci
Case C --run testcases.OpTestPCI.PCISkirootHardboot.get_lspci
Case D --run testcases.OpTestPCI.PCI
|
Host.get_lspci
Case E --run testcases.OpTestPCI.PCIHostSoftboot.get_lspci
Case F --run testcases.OpTestPCI.PCIHostHardboot.get_lspci
'''
lspci_data = self.c.run_command("lspci -mm -n")
return lspci_data
def check_commands(self):
'''
Checks for general capability to run commands
Case A --run testcases.OpTestPCI.PCISkiroot.check_commands
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.check_commands
Case C --run testc
|
ases.OpTestPCI.PCISkirootHardboot.check_commands
Case D --run testcases.OpTestPCI.PCIHost.check_commands
Case E --run testcases.OpTestPCI.PCIHostSoftboot.check_commands
Case F --run testcases.OpTestPCI.PCIHostHardboot.check_commands
'''
list_pci_devices_commands = ["lspci -mm -n",
"lspci -m",
"lspci -t",
"lspci -n",
"lspci -nn",
"cat /proc/bus/pci/devices",
"ls --color=never /sys/bus/pci/devices/ -l",
"lspci -vvxxx",
]
for cmd in list_pci_devices_commands:
self.c.run_command(cmd, timeout=300)
list_usb_devices_commands = ["lsusb",
"lsusb -t",
"lsusb -v",
]
for cmd in list_usb_devices_commands:
self.c.run_command(cmd)
# Test that we do not EEH on reading all config space
self.c.run_command(
"hexdump -C /sys/bus/pci/devices/*/config", timeout=600)
def get_lspci_file(self):
'''
Usually used internally, can be run for query of system
Case A --run testcases.OpTestPCI.PCISkiroot.get_lspci_file
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.get_lspci_file
Case C --run testcases.OpTestPCI.PCISkirootHardboot.get_lspci_file
Case D --run testcases.OpTestPCI.PCIHost.get_lspci_file
Case E --run testcases.OpTestPCI.PCIHostSoftboot.get_lspci_file
Case F --run testcases.OpTestPCI.PCIHostHardboot.get_lspci_file
'''
if self.conf.lspci_file():
with open(self.conf.lspci_file(), 'r') as f:
file_content = f.read().splitlines()
log.debug("file_content={}".format(file_content))
return file_content
def _diff_my_devices(self,
listA=None,
listA_name=None,
listB=None,
listB_name=None):
'''
Performs unified dif
|
jordanemedlock/psychtruths
|
temboo/core/Library/KhanAcademy/Users/GetExerciseFollowUp.py
|
Python
|
apache-2.0
| 4,590
| 0.0061
|
# -*- coding: utf-8 -*-
###############################################################################
#
#
|
GetExerciseFollowUp
# Retrieves user data about all excercises which have the specified excercise as a prerequisite.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except
|
in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetExerciseFollowUp(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetExerciseFollowUp Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetExerciseFollowUp, self).__init__(temboo_session, '/Library/KhanAcademy/Users/GetExerciseFollowUp')
def new_input_set(self):
return GetExerciseFollowUpInputSet()
def _make_result_set(self, result, path):
return GetExerciseFollowUpResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetExerciseFollowUpChoreographyExecution(session, exec_id, path)
class GetExerciseFollowUpInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetExerciseFollowUp
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Khan Academy.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The OAuth Consumer Secret provided by Khan Academy.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('ConsumerSecret', value)
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((optional, string) The email address (coach or student ID) of user. If not provided, defaults to currently logged in user.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('Email', value)
def set_ExerciseName(self, value):
"""
Set the value of the ExerciseName input for this Choreo. ((required, string) The exercise for which you want to retrieve follwow up exercises (e.g. "simplifying_fractions").)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('ExerciseName', value)
def set_OAuthTokenSecret(self, value):
"""
Set the value of the OAuthTokenSecret input for this Choreo. ((required, string) The OAuth Token Secret retrieved during the OAuth process.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('OAuthTokenSecret', value)
def set_OAuthToken(self, value):
"""
Set the value of the OAuthToken input for this Choreo. ((required, string) The OAuth Token retrieved during the OAuth process.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('OAuthToken', value)
class GetExerciseFollowUpResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetExerciseFollowUp Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Khan Academy.)
"""
return self._output.get('Response', None)
class GetExerciseFollowUpChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetExerciseFollowUpResultSet(response, path)
|
danbarrese/pwdgen
|
pwdgen.py
|
Python
|
gpl-3.0
| 4,586
| 0.002617
|
# RANDOM PASSWORD GENERATOR
# Author: Dan Barrese (danbarrese.com)
# Version: 1.01
# Description: Yep. This is my first Python script.
# Skills exemplified in this script:
# * Random number generation
# * Command line interface
# * List comprehension
# * Loops
#
# Update History:
# 2013.12.25 [DRB][1.0] Initial implementation.
# 2013.12.29 [DRB][1.01] Defaulted to 10 passwords and length of 5.
import random
import string
import sys
import argparse
# Parse arguments.
parser = argparse.ArgumentParser(description='Random password generator.')
parser.add_argument('--length', '-l', metavar='L', type=int, nargs=1,
dest='length', default=[5],
help='the number (L) of characters in the generated password')
parser.add_argument('--count', '-c', metavar='C', type=int, nargs=1,
dest='count', default=[10],
help='the number (C) of passwords to generate')
parser.add_argument('--first-char', '-a', metavar='X', type=str, nargs=1,
dest='first_char', default=[None],
help='the first character (X) in the generated password')
parser.add_argument('--numbers', dest='do_numbers', action='store_true',
help='include numbers [0-9]')
parser.add_argument('--no-numbers', dest='do_numbers', action='store_false',
help='do NOT include numbers [0-9]')
parser.set_defaults(do_numbers=True)
parser.add_argument('--alpha-lower', dest='do_alpha_lower', action='store_true',
help='include numbers [a-z]')
|
parser.add_argument('--no-alpha-lower', dest='do_alpha_lower', ac
|
tion='store_false',
help='do NOT include alphas [a-z]')
parser.set_defaults(do_alpha_lower=True)
parser.add_argument('--alpha-upper', dest='do_alpha_upper', action='store_true',
help='include numbers [A-Z]')
parser.add_argument('--no-alpha-upper', dest='do_alpha_upper', action='store_false',
help='do NOT include alphas [A-Z]')
parser.set_defaults(do_alpha_upper=True)
parser.add_argument('--symbols-common', dest='do_symbols_common', action='store_true',
help='include common symbols')
parser.add_argument('--no-symbols-common', dest='do_symbols_common', action='store_false',
help='do NOT include common symbols')
parser.set_defaults(do_symbols_common=True)
parser.add_argument('--symbols-uncommon', dest='do_symbols_uncommon', action='store_true',
help='include uncommon symbols')
parser.add_argument('--no-symbols-uncommon', dest='do_symbols_uncommon', action='store_false',
help='do NOT include uncommon symbols')
parser.set_defaults(do_symbols_uncommon=False)
args = parser.parse_args()
# Set arguments to variables.
pwd_len = args.length[0]
pwd_count = args.count[0]
do_numbers = args.do_numbers
do_alpha_lower = args.do_alpha_lower
do_alpha_upper = args.do_alpha_upper
do_symbols_common = args.do_symbols_common
do_symbols_uncommon = args.do_symbols_uncommon
# Define possible sets of characters.
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
alphas_lowercase = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm'
, 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
alphas_uppercase = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
]
symbols_common = [
'!', '$', '&', '(', ')', '-', '_', '@', '#'
]
symbols_uncommon = [
'%', '\'', '/', ':', ';', '<', '=', '>', '?', '[',
'\\', ']', '^', '{', '|', '}', '~', '"', '*', '+', ',', '.', '`'
]
# Define keyset.
keyset = []
if do_numbers:
keyset = keyset + numbers
if do_alpha_lower:
keyset = keyset + alphas_lowercase
if do_alpha_upper:
keyset = keyset + alphas_uppercase
if do_symbols_common:
keyset = keyset + symbols_common
if do_symbols_uncommon:
keyset = keyset + symbols_uncommon
num_pwds_generated = 0
while num_pwds_generated < pwd_count:
# Define first character in the password.
first_char = args.first_char[0]
if first_char is None:
first_char = random.sample(keyset, 1)[0]
# Make password.
pwd_len_counter = 2
pwd_sequence = [first_char]
while pwd_len_counter <= pwd_len:
pwd_sequence.append(random.sample(keyset, 1)[0])
pwd_len_counter += 1
pwd_str = ''.join(pwd_sequence)
# Print password.
print(pwd_str)
num_pwds_generated += 1
|
eiginn/passpie
|
setup.py
|
Python
|
mit
| 2,730
| 0
|
#!/usr/bin/env python
import io
import os
import sys
try:
from setuptools import setup, Command, find_packages
except ImportError:
from distutils.core import setup, Command, find_packages
__version__ = "1.4.3"
with io.open('README.rst', encoding='utf-8') as readme_file:
long_description = readme_file.read() + "\n\n"
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
os.system("git tag -a v%s -m 'version v%s'" % (__version__, __version__))
os.system("git push --ta
|
gs")
os.system("git push")
sys.exit()
requirements = [
'click==6.2',
'PyYAML==3.11',
'tabulate==0.7.5',
'tinydb==3.1.2',
'rstr==2.2.3',
]
class PyTest(Command):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
self.pytest_args = ["-v", "tests/"]
def finalize_options(self):
p
|
ass
def run(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
class PyTestCoverage(PyTest):
def initialize_options(self):
self.pytest_args = [
"-v", "tests",
"--cov", 'passpie',
"--cov-config", ".coveragerc",
"--cov-report", "term-missing",
]
setup(
name='passpie',
version=__version__,
license='License :: OSI Approved :: MIT License',
description="Manage your login credentials from the terminal painlessly.",
long_description=long_description,
author='Marcwebbie',
author_email='marcwebbie@gmail.com',
url='https://github.com/marcwebbie/passpie',
download_url='https://github.com/marcwebbie/passpie',
packages=find_packages(),
entry_points={
'console_scripts': [
'passpie=passpie.cli:cli',
]
},
install_requires=requirements,
cmdclass={'test': PyTest, 'coverage': PyTestCoverage},
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python',
'Topic :: Security :: Cryptography',
],
)
|
matk86/pymatgen
|
pymatgen/io/abinit/launcher.py
|
Python
|
mit
| 47,862
| 0.003134
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Tools for the submission of Tasks."""
from __future__ import unicode_literals, division, print_function
import os
import time
import ruamel.yaml as yaml
import pickle
from collections import deque
from datetime import timedelta
from six.moves import cStringIO
from monty.io import get_open_fds
from monty.string import boxed, is_string
from monty.os.path import which
from monty.collections import AttrDict, dict2namedtuple
from monty.termcolor import cprint
from .utils import as_bool, File, Directory
from . import qutils as qu
from pymatgen.util.io_utils import ask_yesno
try:
import apscheduler
has_apscheduler = True
has_sched_v3 = apscheduler.version >= "3.0.0"
except ImportError:
has_apscheduler = False
import logging
logger = logging.getLogger(__name__)
__all__ = [
"ScriptEditor",
"PyLauncher",
"PyFlowScheduler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class ScriptEditor(object):
"""Simple editor that simplifies the writing of shell scripts"""
_shell = '/bin/bash'
def __init__(self):
self._lines = []
@property
def shell(self):
return self._shell
def _add(self, text, pre=""):
if is_string(text):
self._lines.append(pre + text)
else:
self._lines.extend([pre + t for t in text])
def reset(self):
"""Reset the editor."""
try:
del self._lines
except AttributeError:
pass
def shebang(self):
"""Adds the shebang line."""
self._lines.append('#!' + self.shell)
def declare_var(self, key, val):
"""Declare a env variable. If val is None the variable is unset."""
if val is not None:
line = "export " + key + '=' + str(val)
else:
line = "unset " + key
self._add(line)
def declare_vars(self, d):
"""Declare the variables defined in the dictionary d."""
for k, v in d.items():
self.declare_var(k, v)
def export_envar(self, key, val):
"""Export an environment variable."""
line = "export " + key + "=" + str(val)
self._add(line)
def export_envars(self, env):
"""Export the environment variables contained in the dict env."""
for k, v in env.items():
self.export_envar(k, v)
def add_emptyline(self):
"""Add an empty line."""
self._add("", pre="")
def add_comment(self, comment):
"""Add a comment"""
self._add(comment, pre="# ")
def load_modules(self, modules):
"""Load the list of specified modules."""
for module in modules:
self.load_module(module)
def load_module(self, module):
self._add('module load ' + module + " 2>> mods.err")
def add_line(self, line):
self._add(line)
def add_lines(self, lines):
self._add(lines)
def get_script_str(self, reset=True):
"""Returns a string with the script and reset the editor if reset is True"""
s = "\n".join(l for l in self._lines)
if reset:
self.reset()
return s
class PyLauncherError(Exception):
"""Error class for PyLauncher."""
class PyLauncher(object):
"""This object handle the submission of the tasks contained in a :class:`Flow`"""
Error = PyLauncherError
def __init__(self, flow, **kwargs):
"""
Initialize the object
Args:
flow: :class:`Flow` object
max_njobs_inqueue: The launcher will stop submitting jobs when the
number of jobs in the queue is >= Max number of jobs
"""
self.flow = flow
self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200)
#self.flow.check_pid_file()
def single_shot(self):
"""
Run the first :class:`Task` than is ready for execution.
Returns:
Number of jobs launched.
"""
num_launched = 0
# Get the tasks that can be executed in each workflow.
tasks = []
for work in self.flow:
try:
task = work.fetch_task_to_run()
if task is not None:
tasks.append(task)
else:
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.debug("No task to run! Possible deadlock")
except StopIteration:
logger.info("All tasks completed.")
# Submit the tasks and update the database.
if tasks:
tasks[0].start()
num_launched += 1
self.flow.pickle_dump()
return num_launched
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5):
"""
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run.
Args:
max_nlaunch: Maximum number of launches. default: no limit.
max_loops: Maximum number of loops
sleep_time: seconds to sleep between rapidfire loop iterations
Returns:
The number of tasks launched.
"""
num_launched, do_exit, launched = 0, False, []
for count in range(max_loops):
if do_exit:
break
if count > 0:
time.sleep(sleep_time)
tasks = self.fetch_tasks_to_run()
# I don't know why but we receive duplicated tasks.
if any(task in launched for task in tasks):
logger.critical("numtasks %d already in launched list:\n%s" % (len(tasks), launched))
# Preventive test.
tasks = [t for t in tasks if t not in launched]
if not tasks:
con
|
tinue
for task in tasks:
fired = task.start()
if fired:
launched.append(task)
num_launched += 1
if num_launched >= max_nlaunch > 0:
logger.info('num_launched >= max_nlaunch, going
|
back to sleep')
do_exit = True
break
# Update the database.
self.flow.pickle_dump()
return num_launched
def fetch_tasks_to_run(self):
"""
Return the list of tasks that can be submitted.
Empty list if no task has been found.
"""
tasks_to_run = []
for work in self.flow:
tasks_to_run.extend(work.fetch_alltasks_to_run())
return tasks_to_run
class PyFlowSchedulerError(Exception):
"""Exceptions raised by `PyFlowScheduler`."""
class PyFlowScheduler(object):
"""
This object schedules the submission of the tasks in a :class:`Flow`.
There are two types of errors that might occur during the execution of the jobs:
#. Python exceptions
#. Errors in the ab-initio code
Python exceptions are easy to detect and are usually due to a bug in the python code or random errors such as IOError.
The set of errors in the ab-initio is much much broader. It includes wrong input data, segmentation
faults, problems with the resource manager, etc. The flow tries to handle the most common cases
but there's still a lot of room for improvement.
Note, in particular, that `PyFlowScheduler` will shutdown automatically in the following cases:
#. The number of python exceptions is > max_num_pyexcs
#. The number of task errors (i.e. the number of tasks whose status is S_ERROR) is > max_num_abierrs
#. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).
#. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
If the mail cannot be sent, the scheduler will shutdown automatically.
This check prevents the scheduler from being trapped in an infinite loop.
"""
|
youtube/cobalt
|
third_party/llvm-project/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
|
Python
|
bsd-3-clause
| 5,589
| 0.013777
|
#!/usr/bin/env python
#
#===- check_clang_tidy.py - ClangTidy Test Helper ------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
ClangTidy Test Helper
=====================
This script runs clang-tidy in fix mode and verify fixes, messages or both.
Usage:
check_clang_tidy.py [-resource-dir=<resource-dir>] \
[-assume-filename=<file-with-source-extension>] \
[-check-suffix=<file-check-suffix>] \
<source-file> <check-name> <temp-file> \
-- [optional clang-tidy arguments]
Example:
// RUN: %check_clang_tidy %s llvm-include-order %t -- -- -isystem %S/Inputs
"""
import argparse
import os
imp
|
ort re
import subprocess
import sys
def write_file(file_name, text):
with open(file_name, 'w') as f:
f.write(text)
f.truncate()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-expect-clang-tidy-error', action='store_true')
parser.add_argument('-resource-dir')
parser.add_argument('-assume-filename')
parser.add_argument('-check-suffix', default='')
parser.add_argument('input_file_name')
parser.add_argument('check_name')
parser.a
|
dd_argument('temp_file_name')
args, extra_args = parser.parse_known_args()
resource_dir = args.resource_dir
assume_file_name = args.assume_filename
input_file_name = args.input_file_name
check_name = args.check_name
temp_file_name = args.temp_file_name
expect_clang_tidy_error = args.expect_clang_tidy_error
file_name_with_extension = assume_file_name or input_file_name
_, extension = os.path.splitext(file_name_with_extension)
if extension not in ['.c', '.hpp', '.m', '.mm']:
extension = '.cpp'
temp_file_name = temp_file_name + extension
clang_tidy_extra_args = extra_args
if len(clang_tidy_extra_args) == 0:
clang_tidy_extra_args = ['--']
if extension in ['.cpp', '.hpp', '.mm']:
clang_tidy_extra_args.append('--std=c++11')
if extension in ['.m', '.mm']:
clang_tidy_extra_args.extend(
['-fobjc-abi-version=2', '-fobjc-arc'])
if args.check_suffix and not re.match('^[A-Z0-9\-]+$', args.check_suffix):
sys.exit('Only A..Z, 0..9 and "-" are allowed in check suffix, but "%s" was given' % (args.check_suffix))
file_check_suffix = ('-' + args.check_suffix) if args.check_suffix else ''
check_fixes_prefix = 'CHECK-FIXES' + file_check_suffix
check_messages_prefix = 'CHECK-MESSAGES' + file_check_suffix
# Tests should not rely on STL being available, and instead provide mock
# implementations of relevant APIs.
clang_tidy_extra_args.append('-nostdinc++')
if resource_dir is not None:
clang_tidy_extra_args.append('-resource-dir=%s' % resource_dir)
with open(input_file_name, 'r') as input_file:
input_text = input_file.read()
has_check_fixes = check_fixes_prefix in input_text
has_check_messages = check_messages_prefix in input_text
if not has_check_fixes and not has_check_messages:
sys.exit('Neither %s nor %s found in the input' % (check_fixes_prefix, check_messages_prefix) )
# Remove the contents of the CHECK lines to avoid CHECKs matching on
# themselves. We need to keep the comments to preserve line numbers while
# avoiding empty lines which could potentially trigger formatting-related
# checks.
cleaned_test = re.sub('// *CHECK-[A-Z0-9\-]*:[^\r\n]*', '//', input_text)
write_file(temp_file_name, cleaned_test)
original_file_name = temp_file_name + ".orig"
write_file(original_file_name, cleaned_test)
args = ['clang-tidy', temp_file_name, '-fix', '--checks=-*,' + check_name] + \
clang_tidy_extra_args
if expect_clang_tidy_error:
args.insert(0, 'not')
print('Running ' + repr(args) + '...')
try:
clang_tidy_output = \
subprocess.check_output(args, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as e:
print('clang-tidy failed:\n' + e.output.decode())
raise
print('------------------------ clang-tidy output -----------------------\n' +
clang_tidy_output +
'\n------------------------------------------------------------------')
try:
diff_output = subprocess.check_output(
['diff', '-u', original_file_name, temp_file_name],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
diff_output = e.output
print('------------------------------ Fixes -----------------------------\n' +
diff_output.decode() +
'\n------------------------------------------------------------------')
if has_check_fixes:
try:
subprocess.check_output(
['FileCheck', '-input-file=' + temp_file_name, input_file_name,
'-check-prefix=' + check_fixes_prefix, '-strict-whitespace'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('FileCheck failed:\n' + e.output.decode())
raise
if has_check_messages:
messages_file = temp_file_name + '.msg'
write_file(messages_file, clang_tidy_output)
try:
subprocess.check_output(
['FileCheck', '-input-file=' + messages_file, input_file_name,
'-check-prefix=' + check_messages_prefix,
'-implicit-check-not={{warning|error}}:'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('FileCheck failed:\n' + e.output.decode())
raise
if __name__ == '__main__':
main()
|
Kankroc/pdf2image
|
pdf2image/pdf2image.py
|
Python
|
mit
| 8,412
| 0.004042
|
"""
pdf2image is a light wrapper for the poppler-utils tools that can convert your
PDFs into Pillow images.
"""
import os
import platform
import re
import uuid
import tempfile
import shutil
from subprocess import Popen, PIPE
from PIL import Image
from .parsers import (
parse_buffer_to_ppm,
parse_buffer_to_jpeg,
parse_buffer_to_png
)
from .exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError
)
TRANSPARENT_FILE_TYPES = ['png', 'tiff']
def convert_from_path(pdf_path, dpi=200, output_folder=None, first_page=None, last_page=None,
fmt='ppm', thread_count=1, userpw=None, use_cropbox=False, strict=False, transparent=False,
output_file=str(uuid.uuid4()), poppler_path=None):
"""
Description: Convert PDF to Image will throw whenever one of the condition is reached
Parameters:
pdf_path -> Path to the PDF that you want to convert
dpi -> Image quality in DPI (default 200)
output_folder -> Write the resulting images to a folder (instead of directly in memory)
first_page -> First page to process
last_page -> Last page to process before stopping
fmt -> Output image format
thread_count -> How many threads we are allowed to spawn for processing
userpw -> PDF's password
use_cropbox -> Use cropbox instead of mediabox
strict -> When a Syntax Error is thrown, it will be raised as an Exception
transparent -> Output with a transparent background instead of a white one.
output_file -> What is the output filename
poppler_path -> Path to look for poppler binaries
"""
page_count
|
= _page_count(pdf_path, userpw, poppler_path=poppler_path)
# We start by getting the output format, the buffer processing function and if we need pdftocairo
parsed_fmt, parse_buffer_func, use_pdfcairo_format = _pa
|
rse_format(fmt)
# We use pdftocairo is the format requires it OR we need a transparent output
use_pdfcairo = use_pdfcairo_format or (transparent and parsed_fmt in TRANSPARENT_FILE_TYPES)
if thread_count < 1:
thread_count = 1
if first_page is None:
first_page = 1
if last_page is None or last_page > page_count:
last_page = page_count
auto_temp_dir = False
if output_folder is None and use_pdfcairo:
auto_temp_dir = True
output_folder = tempfile.mkdtemp()
# Recalculate page count based on first and last page
page_count = last_page - first_page + 1
if thread_count > page_count:
thread_count = page_count
reminder = page_count % thread_count
current_page = first_page
processes = []
for i in range(thread_count):
thread_output_file = output_file + '_' + str(i) if thread_count > 1 else output_file
# Get the number of pages the thread will be processing
thread_page_count = page_count // thread_count + int(reminder > 0)
# Build the command accordingly
args = _build_command(['-r', str(dpi), pdf_path], output_folder, current_page, current_page + thread_page_count - 1, parsed_fmt, thread_output_file, userpw, use_cropbox, transparent)
if use_pdfcairo:
args = [_get_command_path('pdftocairo', poppler_path)] + args
else:
args = [_get_command_path('pdftoppm', poppler_path)] + args
# Update page values
current_page = current_page + thread_page_count
reminder -= int(reminder > 0)
# Spawn the process and save its uuid
processes.append((thread_output_file, Popen(args, stdout=PIPE, stderr=PIPE)))
images = []
for uid, proc in processes:
data, err = proc.communicate()
if b'Syntax Error'in err and strict:
raise PDFSyntaxError(err.decode("utf8", "ignore"))
if output_folder is not None:
images += _load_from_output_folder(output_folder, uid, in_memory=auto_temp_dir)
else:
images += parse_buffer_func(data)
if auto_temp_dir:
shutil.rmtree(output_folder)
return images
def convert_from_bytes(pdf_file, dpi=200, output_folder=None, first_page=None, last_page=None,
fmt='ppm', thread_count=1, userpw=None, use_cropbox=False, strict=False, transparent=False,
output_file=str(uuid.uuid4()), poppler_path=None):
"""
Description: Convert PDF to Image will throw whenever one of the condition is reached
Parameters:
pdf_file -> Bytes representing the PDF file
dpi -> Image quality in DPI
poppler_path -> Path to look for poppler binaries
output_folder -> Write the resulting images to a folder (instead of directly in memory)
first_page -> First page to process
last_page -> Last page to process before stopping
fmt -> Output image format
thread_count -> How many threads we are allowed to spawn for processing
userpw -> PDF's password
use_cropbox -> Use cropbox instead of mediabox
strict -> When a Syntax Error is thrown, it will be raised as an Exception
transparent -> Output with a transparent background instead of a white one.
output_file -> What is the output filename
poppler_path -> Path to look for poppler binaries
"""
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, 'wb') as f:
f.write(pdf_file)
f.flush()
return convert_from_path(f.name, dpi=dpi, output_folder=output_folder,
first_page=first_page, last_page=last_page, fmt=fmt, thread_count=thread_count,
userpw=userpw, use_cropbox=use_cropbox, strict=strict, transparent=transparent,
output_file=output_file, poppler_path=poppler_path)
finally:
os.close(fh)
os.remove(temp_filename)
def _build_command(args, output_folder, first_page, last_page, fmt, output_file, userpw, use_cropbox, transparent):
if use_cropbox:
args.append('-cropbox')
if transparent and fmt in TRANSPARENT_FILE_TYPES:
args.append('-transp')
if first_page is not None:
args.extend(['-f', str(first_page)])
if last_page is not None:
args.extend(['-l', str(last_page)])
if fmt != 'ppm':
args.append('-' + fmt)
if output_folder is not None:
args.append(os.path.join(output_folder, output_file))
if userpw is not None:
args.extend(['-upw', userpw])
return args
def _parse_format(fmt):
fmt = fmt.lower()
if fmt[0] == '.':
fmt = fmt[1:]
if fmt in ('jpeg', 'jpg'):
return 'jpeg', parse_buffer_to_jpeg, False
if fmt == 'png':
return 'png', parse_buffer_to_png, False
if fmt in ('tif', 'tiff'):
return 'tiff', None, True
# Unable to parse the format so we'll use the default
return 'ppm', parse_buffer_to_ppm, False
def _get_command_path(command, poppler_path=None):
if platform.system() == 'Windows':
command = command + '.exe'
if poppler_path is not None:
command = os.path.join(poppler_path, command)
return command
def _page_count(pdf_path, userpw=None, poppler_path=None):
try:
command = [_get_command_path("pdfinfo", poppler_path), pdf_path]
if userpw is not None:
command.extend(['-upw', userpw])
proc = Popen(command, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
except:
raise PDFInfoNotInstalledError('Unable to get page count. Is poppler installed and in PATH?')
try:
# This will throw if we are unable to get page count
return int(re.search(r'Pages:\s+(\d+)', out.decode("utf8", "ignore")).group(1))
except:
raise PDFPageCountError('Unable to get page count. %s' % err.decode("utf8", "ignore"))
def _load_from_output_folder(output_folder, output_file, in_memory=False):
image
|
samdsmx/omegaup
|
bin/karel_mdo_convert.py
|
Python
|
bsd-3-clause
| 4,150
| 0.024096
|
#!/usr/bin/python3
import struct
import sys
if len(sys.argv) == 1:
print("python karel_mdo_convert.py mundo.mdo")
sys.exit(1)
f = open(sys.argv[1], "rb")
data = f.read()
f.close()
worldname = sys.argv[1]
if '/' in worldname:
worldname = worldname[worldname.rfind('/')+1:]
if '.' in worldname:
worldname = worldname[:worldname.rfind('.')]
kec = False
for extension in ("kec", "KEC"):
try:
f = open(sys.argv[1][:sys.argv[1].rfind(".")] + "." + extension, "rb")
kec = f.read()
f.close()
break
except Exception:
pass
if not kec:
print("%s.kec not found" % worldname)
sys.exit(1)
(x1, width, height, buzzers, karelx, karely, karelorient, wallcount, heapcount, x10) = struct.unpack("HHHHHHHHHH", data[10:30])
tuples = [struct.unpack("HHH", data[i:i+6]) for i in range(30, len(data), 6)]
kec = [st
|
ruct.unpack("HHH", kec[i:i+6]) for i in range(0, len(kec), 6)]
maxlines = kec[0][1] if kec[0][0] else 10000000
maxmove = kec[1][1] if kec[1][0] else False
maxturnleft = kec[2][1] if kec[2][0] else False
maxpickbeeper = kec[3][1] if kec[3][0] else False
maxputbeeper = kec[4][1] if kec[4][0] else False
maxkarelbeepers = kec[5][1]
|
if kec[5][0] else False
maxbeepers = kec[6][1] if kec[6][0] else False
endposition = kec[7][1:] if kec[7][0] else False
endorientation = ["NORTE", "ESTE", "SUR", "OESTE"][kec[8][1]] if kec[8][0] else False
dumpcount = kec[9][1] if kec[9][0] else 0
def formatbuzzers(b):
if b == 65535:
return "INFINITO"
else:
return "%d" % b
def isborder(wall, w, h):
if wall[0] == wall[2]:
return wall[0] in (0, w)
if wall[1] == wall[3]:
return wall[1] in (0, h)
def decodewalls(t, w, h):
dx = ((-1, 0, -1, -1), (0, 0, 0, -1))
dy = ((0, -1, -1, -1), (0, 0, -1, 0))
for i in range(4):
if (t[2] & (1 << i)):
wall = (t[0] + dx[0][i], t[1] + dy[0][i], t[0] + dx[1][i], t[1] + dy[1][i])
if not isborder(wall, w, h):
yield wall
def encodewall(w):
if w[0] == w[2]:
return 'x1="%d" y1="%d" y2="%d"' % (w[0], min(w[1], w[3]), max(w[1], w[3]))
elif w[1] == w[3]:
return 'x1="%d" x2="%d" y1="%d"' % (min(w[0], w[2]), max(w[0], w[2]), w[1])
else:
sys.exit(1)
def generateIn():
print("<ejecucion>")
if maxmove != False or maxturnleft != False or maxpickbeeper != False or maxputbeeper != False:
print(" <condiciones instruccionesMaximasAEjecutar=\"%d\" longitudStack=\"65000\">" % maxlines)
if maxmove != False:
print(' <comando nombre="AVANZA" maximoNumeroDeEjecuciones="%d" />' % maxmove)
if maxturnleft != False:
print(' <comando nombre="GIRA_IZQUIERDA" maximoNumeroDeEjecuciones="%d" />' % maxturnleft)
if maxpickbeeper != False:
print(' <comando nombre="COGE_ZUMBADOR" maximoNumeroDeEjecuciones="%d" />' % maxpickbeeper)
if maxputbeeper != False:
print(' <comando nombre="DEJA_ZUMBADOR" maximoNumeroDeEjecuciones="%d" />' % maxputbeeper)
print(" </condiciones>")
else:
print(" <condiciones instruccionesMaximasAEjecutar=\"%d\" longitudStack=\"65000\" />" % maxlines)
print(" <mundos>")
print(" <mundo nombre=\"mundo_0\" ancho=\"%d\" alto=\"%d\">" % (width, height))
for i in range(wallcount):
for wall in decodewalls(tuples[i], width, height):
print(" <pared %s/>" % encodewall(wall))
for i in range(wallcount, wallcount + heapcount):
print(" <monton x=\"%d\" y=\"%d\" zumbadores=\"%d\"/>" % tuples[i])
for i in range(10, 10 + dumpcount):
print(" <posicionDump x=\"%d\" y=\"%d\" />" % kec[i][:2])
print(" </mundo>")
print(" </mundos>")
print(" <programas tipoEjecucion=\"CONTINUA\" intruccionesCambioContexto=\"1\" milisegundosParaPasoAutomatico=\"0\">")
print(" <programa nombre=\"p1\" ruta=\"{$2$}\" mundoDeEjecucion=\"mundo_0\" xKarel=\"%d\" yKarel=\"%s\" direccionKarel=\"%s\" mochilaKarel=\"%s\" >" \
% (karelx, karely, ["", "NORTE", "ESTE", "SUR", "OESTE"][karelorient], formatbuzzers(buzzers)))
if dumpcount:
print(" <despliega tipo=\"MUNDO\" />")
if endorientation:
print(" <despliega tipo=\"ORIENTACION\" />")
if endposition:
print(" <despliega tipo=\"POSICION\" />")
print(" </programa>")
print(" </programas>")
print("</ejecucion>")
generateIn()
|
Kjir/papyon
|
papyon/gnet/io/tcp.py
|
Python
|
gpl-2.0
| 1,708
| 0.001172
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com>
# Copyright (C) 2006-2007 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Fou
|
ndation, Inc., 51 Franklin St, Fifth Floor, Boston,
|
MA 02110-1301 USA
#
from papyon.gnet.constants import *
from papyon.gnet.proxy.proxyfiable import ProxyfiableClient
from sock import SocketClient
import gobject
__all__ = ['TCPClient']
class TCPClient(SocketClient, ProxyfiableClient):
"""Asynchronous TCP client class.
@sort: __init__, open, send, close
@undocumented: do_*, _watch_*, __io_*, _connect_done_handler
@since: 0.1"""
def __init__(self, host, port):
"""initializer
@param host: the hostname to connect to.
@type host: string
@param port: the port number to connect to.
@type port: integer > 0 and < 65536"""
SocketClient.__init__(self, host, port, AF_INET, SOCK_STREAM)
ProxyfiableClient.__init__(self)
gobject.type_register(TCPClient)
|
pkerpedjiev/forna
|
test/restserver_test.py
|
Python
|
apache-2.0
| 1,136
| 0.014085
|
import sys
import unittest
import restserver
import json
from flask import jsonify
class RestServerTest(unittest.TestCase):
def setUp(self):
self.app = restserver.create_app(static=True).test_client()
def tearDown(self):
pass
def test_struct_graph(self):
rv = self.app.post('/struct_graph')
# not posting any data should be a 'Bad Request'
# ideally, with an error message
self.
|
assertEqual(rv.data, "Missing a json in the request")
self.assertEqual(rv.status_code, 400)
data_in = json.dumps({'seq':'ACCCGG', 'struct':'((..))'})
rv = self.app.post('/struct_graph',
data=data_in,
content_type='application/json')
self.assertEqual(rv.status_code, 201)
data_in = json.dumps({'seq':'ACxCGG', 'struct':'((..))'})
rv = self.app.p
|
ost('/struct_graph',
data=data_in,
content_type='application/json')
self.assertEqual(rv.status_code, 400)
|
bbirand/python-driver
|
benchmarks/base.py
|
Python
|
apache-2.0
| 8,795
| 0.001478
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cProfile import Profile
import logging
import os.path
import sys
from threading import Thread
import time
from optparse import OptionParser
from greplin import scales
dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.append(dirname)
sys.path.append(os.path.join(dirname, '..'))
import cassandra
from cassandra.cluster import Cluster
from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.policies import HostDistance
log = logging.getLogger()
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
logging.getLogger('cassandra').setLevel(logging.WARN)
have_libev = False
supported_reactors = [AsyncoreConnection]
try:
from cassandra.io.libevreactor import LibevConnection
have_libev = True
supported_reactors.append(LibevConnection)
except ImportError as exc:
pass
have_twisted = False
try:
from cassandra.io.twistedreactor import TwistedConnection
have_twisted = True
supported_reactors.append(TwistedConnection)
except ImportError as exc:
log.exception("Error importing twisted")
pass
KEYSPACE = "testkeyspace" + str(int(time.time()))
TABLE = "testtable"
def setup(hosts):
log.info("Using 'cassandra' package from %s", cassandra.__path__)
cluster = Cluster(hosts, protocol_version=1)
cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
try:
session = cluster.connect()
log.debug("Creating keyspace...")
session.execute("""
CREATE KEYSPACE %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
""" % KEYSPACE)
log.debug("Setting keyspace...")
session.set_keyspace(KEYSPACE)
log.debug("Creating table...")
session.execute("""
CREATE TABLE %s (
thekey text,
col1 text,
col2 text,
PRIMARY KEY (thekey, col1)
)
""" % TABLE)
finally:
cluster.shutdown()
def teardown(hosts):
cluster = Cluster(hosts, protocol_version=1)
cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
session = cluster.connect()
session.execute("DROP KEYSPACE " + KEYSPACE)
cluster.shutdown()
def benchmark(thread_class):
options, args = parse_options()
for conn_class in options.supported_reactors:
setup(options.hosts)
log.info("==== %s ====" % (conn_class.__name__,))
kwargs = {'metrics_enabled': options.enable_metrics,
'connection_class': conn_class}
if options.protocol_version:
kwargs['protocol_version'] = options.protocol_version
cluster = Cluster(options.hosts, **kwargs)
session = cluster.connect(KEYSPACE)
log.debug("Sleeping for two seconds...")
time.sleep(2.0)
query = session.prepare("""
INSERT INTO {table} (thekey, col1, col2) VALUES (?, ?, ?)
""".format(tabl
|
e=TABLE))
values = ('key', 'a', 'b')
per_thread = options.num_ops // options.threads
threads = []
log.debug("Beginning inserts...")
start = time.time()
try:
for i in range(options.threads):
thread = thread_class(
|
i, session, query, values, per_thread,
cluster.protocol_version, options.profile)
thread.daemon = True
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
while thread.is_alive():
thread.join(timeout=0.5)
end = time.time()
finally:
cluster.shutdown()
teardown(options.hosts)
total = end - start
log.info("Total time: %0.2fs" % total)
log.info("Average throughput: %0.2f/sec" % (options.num_ops / total))
if options.enable_metrics:
stats = scales.getStats()['cassandra']
log.info("Connection errors: %d", stats['connection_errors'])
log.info("Write timeouts: %d", stats['write_timeouts'])
log.info("Read timeouts: %d", stats['read_timeouts'])
log.info("Unavailables: %d", stats['unavailables'])
log.info("Other errors: %d", stats['other_errors'])
log.info("Retries: %d", stats['retries'])
request_timer = stats['request_timer']
log.info("Request latencies:")
log.info(" min: %0.4fs", request_timer['min'])
log.info(" max: %0.4fs", request_timer['max'])
log.info(" mean: %0.4fs", request_timer['mean'])
log.info(" stddev: %0.4fs", request_timer['stddev'])
log.info(" median: %0.4fs", request_timer['median'])
log.info(" 75th: %0.4fs", request_timer['75percentile'])
log.info(" 95th: %0.4fs", request_timer['95percentile'])
log.info(" 98th: %0.4fs", request_timer['98percentile'])
log.info(" 99th: %0.4fs", request_timer['99percentile'])
log.info(" 99.9th: %0.4fs", request_timer['999percentile'])
def parse_options():
parser = OptionParser()
parser.add_option('-H', '--hosts', default='127.0.0.1',
help='cassandra hosts to connect to (comma-separated list) [default: %default]')
parser.add_option('-t', '--threads', type='int', default=1,
help='number of threads [default: %default]')
parser.add_option('-n', '--num-ops', type='int', default=10000,
help='number of operations [default: %default]')
parser.add_option('--asyncore-only', action='store_true', dest='asyncore_only',
help='only benchmark with asyncore connections')
parser.add_option('--libev-only', action='store_true', dest='libev_only',
help='only benchmark with libev connections')
parser.add_option('--twisted-only', action='store_true', dest='twisted_only',
help='only benchmark with Twisted connections')
parser.add_option('-m', '--metrics', action='store_true', dest='enable_metrics',
help='enable and print metrics for operations')
parser.add_option('-l', '--log-level', default='info',
help='logging level: debug, info, warning, or error')
parser.add_option('-p', '--profile', action='store_true', dest='profile',
help='Profile the run')
parser.add_option('--protocol-version', type='int', dest='protocol_version',
help='Native protocol version to use')
options, args = parser.parse_args()
options.hosts = options.hosts.split(',')
log.setLevel(options.log_level.upper())
if options.asyncore_only:
options.supported_reactors = [AsyncoreConnection]
elif options.libev_only:
if not have_libev:
log.error("libev is not available")
sys.exit(1)
options.supported_reactors = [LibevConnection]
elif options.twisted_only:
if not have_twisted:
log.error("Twisted is not available")
sys.exit(1)
options.supported_reactors = [TwistedConnection]
else:
options.supported_reactors = supported_reactors
if not have_libev:
log.warning("Not benchmarking libev reactor because libev is not available")
return options, args
class BenchmarkThread(Thread):
def __init__(self, thread_num,
|
eLRuLL/scrapy
|
scrapy/extensions/closespider.py
|
Python
|
bsd-3-clause
| 2,631
| 0.0019
|
"""CloseSpider is an extension that forces spiders to be closed after certain
conditions are met.
See documentation in docs/topics/extensions.rst
"""
from collections import defaultdict
from twisted.internet import reactor
from scrapy import signals
from scrapy.exceptions import NotConfigured
class CloseSpider(object):
def __init__(self, crawler):
self.crawler = crawler
self.close_on = {
'timeout': crawler.settings.getfloat('CLOSESPIDER_TIMEOUT'),
'itemcount': crawler.settings.getint('CLOSESPIDER_ITEMCOUNT'),
'pagecount': crawler.settings.getint('CLOSESPIDER_PAGECOUNT'),
'errorcount': crawler.settings.getint('CLOSESPIDER_ERRORCOUNT'),
}
if not any(self.close_on.values()):
raise NotConfigured
self.counter = defaultdict(int)
if self.close_on.get('errorcount'):
crawler.signals.connect(self.error_count, signal=signals.spider_error)
if self.close_on.get('pagecount'):
crawler.signals.connect(self.page_count, signal=signals.response_received)
if self.close_on.get('timeout'):
crawler.signals.connect(self.spider_opened, signal=signals.spider_opened)
if self.close_on.get('itemcount'):
crawler.signals.connect(self.item_scraped, signal=signals.item_scraped)
crawler.signals.connect(self.spider_closed, signal=signals.spider_closed)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def error_count(self, failure, response, spider):
self.counter['errorcount'] += 1
if self.counter['errorcount'] == self.close_on['errorcount']:
self.crawler.engine.close_spider(spider, 'closespider_errorcount')
def page_count(self, response, request, spider):
self.counter['pagecount'] += 1
if self.counter['pagecount'] == self.close_on['pagecount']:
|
self.crawler.engine.close_spider(spider, 'closespider_pagecount')
def spider_opened(self, spider):
self.task = reactor.callLater(self.close_on['timeout'],
self.crawler.engine.close_spider, spider,
reason='closespider_timeout')
def item_scr
|
aped(self, item, spider):
self.counter['itemcount'] += 1
if self.counter['itemcount'] == self.close_on['itemcount']:
self.crawler.engine.close_spider(spider, 'closespider_itemcount')
def spider_closed(self, spider):
task = getattr(self, 'task', False)
if task and task.active():
task.cancel()
|
FlorianLudwig/rueckenwind
|
test/test_scope.py
|
Python
|
apache-2.0
| 7,840
| 0.000128
|
from __future__ import absolute_import, division, print_function, with_statement
import pytest
import sys
from tornado import concurrent
import tornado.gen
import tornado.testing
import rw.scope
# def test_low_level_api():
# scope = rw.scope.Scope()
# current_user = object()
#
# def get_current_user():
# get_current_user.runs += 1
# return current_user
# scope.provider('user', get_current_user)
# get_current_user.runs = 0
#
# assert get_current_user.runs == 0
# assert rw.scope.get('user') is current_user
# assert get_current_user.runs == 1
# # make sure provider is not run twice
# rw.scope.get('user')
# assert get_current_user.runs == 1
#
# assert scope.get('unknown value', 'default') == 'default'
# with pytest.raises(IndexError):
# scope.get('unknown value')
def test_basic():
scope = rw.scope.Scope()
scope['some_static_value'] = 42
current_user = object()
def get_current_user():
return current_user
scope.provider('user', get_current_user)
@rw.scope.inject
def foo(user):
return user
@rw.scope.inject
def bar(some_static_value):
return some_static_value
@rw.scope.inject
def some_function_with_a_default_value(my_paramenter='my_default_value'):
return my_paramenter
with scope():
assert foo() is current_user
assert foo(1) == 1
assert foo() is current_user
assert bar() == 42
assert bar(10) == 10
assert bar(some_static_value=11) == 11
# normal calling behaviour must be preserved
assert some_function_with_a_default_value('value') == 'value'
assert some_function_with_a_default_value() == 'my_default_value'
# check nested scope
nested_scope = rw.scope.Scope()
nested_scope['user'] = 2
with nested_scope():
assert foo() == 2
assert bar() == 42
assert foo() is current_user
assert bar() == 42
def test_recursion():
"""Entering the same scope twice should not produce unexpected behaviour"""
scope = rw.scope.Scope()
scope2 = rw.scope.Scope()
with scope():
assert rw.scope.get_current_scope() is scope
with scope2():
assert rw.scope.get_current_scope() is scope2
with scope2():
assert rw.scope.get_current_scope() is scope2
with scope():
assert rw.scope.get_current_scope() is scope
assert rw.scope.get_current_scope() is scope2
assert rw.scope.get_current_scope() is scope2
assert rw.scope.get_current_scope() is scope
assert rw.scope.get_current_scope() is None
def test_sub_scope():
scope1 = rw.scope.Scope()
scope2 = rw.scope.Scope()
scope3 = rw.scope.Scope()
sub1 = scope1.su
|
bscope('my_sub_scope')
sub2 = scope2.subscope('my_sub_scope')
sub1['value_1'] = 1
sub1['shared'] = 1
sub2['value_2'] = 2
sub2['shared'] = 2
@rw.scope.inject
def ge
|
t_sub_scope(my_sub_scope):
return my_sub_scope
@rw.scope.inject
def get_sub_scope_var(var, my_sub_scope):
return my_sub_scope[var]
def checks_inside_scope1():
assert rw.scope.get('my_sub_scope') == get_sub_scope()
assert rw.scope.get('my_sub_scope')['value_1'] == 1
assert get_sub_scope_var('value_1') == 1
assert rw.scope.get('my_sub_scope')['shared'] == 1
assert get_sub_scope_var('shared') == 1
assert 'value_2' not in rw.scope.get('my_sub_scope')
def checks_inside_scope2():
assert rw.scope.get('my_sub_scope') == get_sub_scope()
assert rw.scope.get('my_sub_scope')['value_1'] == 1
assert get_sub_scope_var('value_1') == 1
assert rw.scope.get('my_sub_scope')['value_2'] == 2
assert get_sub_scope_var('value_2') == 2
assert rw.scope.get('my_sub_scope')['shared'] == 2
assert get_sub_scope_var('shared') == 2
with scope1():
checks_inside_scope1()
with scope2():
checks_inside_scope2()
with scope3():
# scope 3 does not have a 'my_sub_scope' subscope
# so we expect the same results as for scope 2
checks_inside_scope2()
checks_inside_scope1()
def test_fail():
@rw.scope.inject
def foo(something_to_inject):
pass
with pytest.raises(rw.scope.OutsideScopeError):
foo()
# if all arguments are provided we are ok to run outside of a scope
foo(something_to_inject=1)
class ScopeLeakingTest(tornado.testing.AsyncTestCase):
def test_scope_leaking(self):
# if an exception ocurus inside a scope the scope might not
# get clean up correctly.
scope = rw.scope.Scope()
with pytest.raises(NotImplementedError):
with scope():
raise NotImplementedError('Just some random error')
# no we are outside of the scope
assert rw.scope.get_current_scope() is None
@tornado.gen.coroutine
def check_a():
assert rw.scope.get('name') == 'a'
class ConcurrencyTestWithoutWithStatement(tornado.testing.AsyncTestCase):
@tornado.testing.gen_test
def test_stuff(self):
"""Setup two scopes and two "locks"."""
self.scope_a = rw.scope.Scope()
self.scope_a['name'] = 'a'
yield self.scope_a.run(check_a)
class ConcurrencyTest(tornado.testing.AsyncTestCase):
"""test concurrent ioloop futures inside different scopes
Three tests with different resolution order
"""
def setup(self):
"""Setup two scopes and two "locks"."""
self.scope_a = rw.scope.Scope()
self.scope_a['name'] = 'a'
self.lock_a = concurrent.Future()
self.scope_b = rw.scope.Scope()
self.scope_b['name'] = 'b'
self.lock_b = concurrent.Future()
@rw.scope.inject
def get_name(name):
return name
@tornado.gen.coroutine
def thread_a():
yield self.lock_a
raise tornado.gen.Return(get_name())
@tornado.gen.coroutine
def thread_b():
yield self.lock_b
raise tornado.gen.Return(get_name())
with self.scope_a():
future_a = thread_a()
with self.scope_b():
future_b = thread_b()
return future_a, future_b
@tornado.testing.gen_test
def test_concurrent_scopes_both(self):
"""set both results before yield-ing"""
future_a, future_b = self.setup()
self.lock_a.set_result(None)
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
assert (yield future_a) == 'a'
@tornado.testing.gen_test
def test_concurrent_scopes_both(self):
"""set both results before yield-ing"""
future_a, future_b = self.setup()
self.lock_a.set_result(None)
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
assert (yield future_a) == 'a'
@tornado.testing.gen_test
def test_concurrent_scopes_ba(self):
"""b then a"""
future_a, future_b = self.setup()
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
self.lock_a.set_result(None)
assert (yield future_a) == 'a'
@tornado.testing.gen_test
def test_concurrent_scopes_ab(self):
"""a then b"""
future_a, future_b = self.setup()
self.lock_a.set_result(None)
assert (yield future_a) == 'a'
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
if sys.version_info >= (3, 0):
def test_scope_with_hint():
from test.scope_py3 import test_python3_typehinted_injection
test_python3_typehinted_injection()
|
soerendip42/rdkit
|
Contrib/M_Kossner/Frames.py
|
Python
|
bsd-3-clause
| 6,124
| 0.033801
|
#!/usr/bin/python
# encoding: utf-8
# Jan 2011 (markus kossner) Cleaned up the code, added some documentation
# somwhere around Aug 2008 (markus kossner) created
#
# This script extracts the molecular framework for a database of molecules.
# You can use two modes (hard coded):
# - Scaff: The molecular frame is extracted
# - RedScaff: All linking chains between rings are deleted. The rings are directly connected.
#
# You can comment in/out the code snippets indicated by the comments
# to force each atom of the frame to be a Carbon.
#
# Usage: Frames.py <database.sdf>
# Output:
# - sd files containing all molecules belonging to one frame (1.sdf, 2.sdf etc)
# - frames.smi containing the (caninical) smiles and count of occurrence
#
from __future__ import print_function
import os,sys
from Chem import AllChem as Chem
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all nested sub-sequences (iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def GetFrame(mol, mode='Scaff'):
'''return a ganeric molecule defining the reduced scaffold of the input mol.
mode can be 'Scaff' or 'RedScaff':
Scaff -> chop off the side chains and return the scaffold
RedScaff -> remove all linking chains and connect the rings
directly at the atoms where the linker was
'''
ring = mol.GetRingInfo()
RingAtoms = flatten(ring.AtomRings())
NonRingAtoms = [ atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() not in RingAtoms ]
RingNeighbors = []
Paths = []
for NonRingAtom in NonRingAtoms:
for neighbor in mol.GetAtomWithIdx(NonRingAtom).GetNeighbors():
if neighbor.GetIdx() in RingAtoms:
RingNeighbors.append(NonRingAtom)
Paths.append([neighbor.GetIdx(),NonRingAtom]) #The ring Atoms having a non ring Nieghbor will be the start of a walk
break
PosConnectors = [x for x in NonRingAtoms if x not in RingNeighbors] #Only these Atoms are potential starting points of a Linker chain
#print 'PosConnectors:'
#print PosConnectors
Framework = [ x for x in RingAtoms ]
#Start a list of pathways which we will have to walk
#print 'Path atoms:'
#print Paths
Linkers = []
while len(Paths)>0:
NewPaths = []
for P in Paths:
if P == None:
print('ooh')
else:
for neighbor in mol.GetAtomWithIdx(P[-1]).GetNeighbors():
if neighbor.GetIdx() not in P:
if neighbor.GetIdx() in NonRingAtoms:
n = P[:]
n.append(neighbor.GetIdx())
NewPaths.append(n[:])
elif neighbor.GetIdx() in RingAtoms:
#print 'adding the following path to Framework:'
#print P
n = P[:]
n.append(neighbor.GetIdx())
Linkers.append(n)
Framework=Framework+P[:]
Paths = NewPaths[:]
#print 'Linkers:',Linkers
#print 'RingAtoms:',RingAtoms
#em.AddBond(3,4,Chem.BondType.SINGLE)
if mode == 'RedScaff':
Framework = list(set(Framework))
todel = []
NonRingAtoms.sort(reverse=True)
em = Chem.EditableMol(mol)
BondsToAdd = [ sorted([i[0],i[-1]]) for i in Linkers ]
mem = []
for i in BondsToAdd:
if i not in mem:
em.AddBond(i[0],i[1],Chem.BondType.SINGLE)
mem.append(i)
for i in NonRingAtoms:
todel.append(i)
for i in todel:
em.RemoveAtom(i)
m = em.GetMol()
#===================================#
# Now do the flattening of atoms and bonds!
# Any heavy atom will become a carbon and any bond will become a single bond! #
#===================================#
# for atom in m.GetAtoms(): #
# atom.SetAtomicNum(6) #
# atom.SetFormalCharge(0) #
# for bond in m.GetBonds(): #
# bond.SetBondType(Chem.BondType.SINGLE) #
# Chem.SanitizeMol(m) #
#===================================#
return m
if mode == 'Scaff':
Framework = list(set(Framework))
todel = []
NonRingAtoms.sort(reverse=True)
for i in NonRingAtoms:
if i != None:
if i not in Framework:
todel.append(i)
em = Chem.EditableMol(mol)
for i in todel:
em.RemoveAtom(i)
m = em.GetMol()
#===================================#
# Now do the flattening of atoms and bonds!
# Any heavy atom will become a carbon and any bond will become a single bond!! #
#===================================#
# for atom in m.GetAtoms(): #
# atom.SetAtomicNum(6) #
# atom.SetFormalCharge(0) #
# for bond in m.GetBonds(): #
# bond.SetBondType(Chem.BondType.SINGLE) #
# Chem.SanitizeMol(m) #
#===================================#
return m
if __name__=='__main__':
if len(sys.argv) < 2:
print("No input file provided: Frames.py filetosprocess.ext")
sys.exit(1)
suppl = Chem.SDMolSupplier(sys.argv[1])
FrameDict = {}
for mol in suppl:
|
m = GetFrame(mol)
cansmiles = Chem.MolToSmiles(m, isomericSmiles=True)
if FrameDict.has_key(cansmiles):
FrameDict[cansmiles].append(mol)
else:
FrameDict[cansmiles]=[mol,]
counter=0
w=open('frames.smi','w')
for key,item in FrameDict.items():
counter+=1
d=Chem.SDWriter(str(c
|
ounter)+'.sdf')
for i in item:
i.SetProp('Scaffold',key)
i.SetProp('Cluster',str(counter))
d.write(i)
print(key,len(item))
w.write(key+'\t'+str(len(item))+'\n')
w.close
print('number of Clusters: %d' %(counter))
|
haggi/OpenMaya
|
src/common/python/Renderer/textureCreator.py
|
Python
|
mit
| 17,166
| 0.005942
|
import path
import logging
import shutil
import os
log = logging
START_ID = "automatically created attributes start"
END_ID = "automatically created attributes end"
START_STATIC_ID = "automatically created static attributes start"
END_STATIC_ID = "automatically created static attributes end"
START_NODE_ID = 0x0011EF5E
COMPILER_VERSION = "vs2010"
baseDestPath = None
baseSourcePath = None
CODINGROOT = "H:/userDatenHaggi/documents/coding/"
# an automatic attibute is defined as follows:
# attributeName, type, displayName, defaultValue, options
# e.g.
# samples, int, Shading Samples, 2
# filters, enum, Pixel Filter, 0, Mitchell:Gauss:Triangle
# bgColor, color, Background Color, 0.4:0.5:0.7
def aeTemplateCreator(attDict, renderer, shortCut):
sourceAEFile = baseSourcePath + "/mt@_devmodule/scripts/@/AETemplate/AE@shaderTemplate.py"
destAEPath = path.path(baseDestPath + "/mt@_devmodule/scripts/@/AETemplate/".replace("mt@_", shortCut + "_").replace("@", renderer.capitalize()))
print "Sourcefile", sourceAEFile
print "Destpath", destAEPath
allContent = []
allContent.append(' self.addSeparator()\n')
for key in attDict.keys():
if key.lower() == "all":
for attKey in attDict[key].keys():
if attKey == "out":
continue
attName = attKey
attDisplayName = attDict[key][attKey][1]
allContent.append(' self.addControl("{0}", label="{1}")\n'.format(attName, attDisplayName))
for key in attDict.keys():
newContent = []
aeFileName = "AE" + renderer.lower() + key.capitalize() + "Template.py"
destAEFile = path.path(destAEPath + aeFileName)
#print "create AE for", key, destAEFile
if destAEFile.exists():
continue
if key.lower() == "all":
continue
print "Creating AE file", destAEFile
sourceHandle = open(sourceAEFile, "r")
content = sourceHandle.readlines()
sourceHandle.close()
startIndex = 0
endIndex = 0
noColorOut = False
for attKey in attDict[key].keys():
if attKey == "out":
if not "color" in attDict[key][attKey][0]:
noColorOut = True
for index, line in enumerate(content):
if "AE@shaderTemplate" in line:
content[index] = line.replace("AE@shaderTemplate", "AE" + renderer.lower() + key.capitalize() + "Template")
#if noColorOut:
# if "pm.mel.AEswatchDisplay(nodeName)" in line:
# content[index] = "#"+line
if "#autoAddBegin" in line:
print "Start new content"
startIndex = index
if "#autoAddEnd" in line:
print "End new content"
endIndex = index
#print "Creating data for", key
#print attDict[key]
for attKey in attDict[key].keys():
if attKey == "out":
continue
attName = attKey
attDisplayName = attDict[key][attKey][1]
#print ' self.addControl("{0}", label="{1}")\n'.format(attName, attDisplayName)
newContent.append(' self.addControl("{0}", label="{1}")\n'.format(attName, attDisplayName))
finalContent = []
finalContent.extend(content[:startIndex+1])
finalContent.extend(newContent)
finalContent.extend(allContent)
finalContent.extend(content[endIndex:])
#print finalContent
destHandle = open(destAEFile, "w")
destHandle.writelines(finalContent)
destHandle.close()
class Attribute(object):
def __init__(self, aName, aType, aDisplayName, default, data=None):
self.name = aName
self.type = aType
self.displayName = aDisplayName
self.default = defa
|
ult
self.data = data
print "Attribute", self.name, self.type, self.displayName, self.default, self.data
print "make func", "make{0}".format(self.type.capitalize())
def getDefForDefFile(self):
return "inAtt:{0}:{1}".format(self.name, self.type)
def getDefinition(self):
return "\tstatic MObject {0};".format(self.name)
def getAEDefinition(self):
return '
|
self.addControl("{0}", label="{1}")'.format(self.name, self.displayName)
def getStaticDefinition(self):
return "MObject\tTextureBase::{0};".format(self.name)
def getImplementation(self):
methodName = "make" + self.type.capitalize()
print "Calling", methodName, "for", self.name
return getattr(Attribute, methodName)(self)
def makeInt(self):
string = "\t{0} = nAttr.create(\"{0}\", \"{0}\", MFnNumericData::kInt, {1});\n".format(self.name, self.default)
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeBool(self):
string = "\t{0} = nAttr.create(\"{0}\", \"{0}\", MFnNumericData::kBoolean, {1});\n".format(self.name, self.default)
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeFloat(self):
string = "\t{0} = nAttr.create(\"{0}\", \"{0}\", MFnNumericData::kFloat, {1});\n".format(self.name, self.default)
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeColor(self):
string = "\t{0} = nAttr.createColor(\"{0}\", \"{0}\");\n".format(self.name)
if len(self.default.split(":")) == 1:
a = self.default.split(":")[0]
self.default = "{0}:{0}:{0}".format(a)
string += "\tnAttr.setDefault({0});\n".format(",".join(self.default.split(":")))
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeVector(self):
string = "\tMObject {0}X = nAttr.create(\"{0}X\", \"{0}x\", MFnNumericData::kDouble, 0.0);\n".format(self.name)
string += "\tMObject {0}Y = nAttr.create(\"{0}Y\", \"{0}y\", MFnNumericData::kDouble, 0.0);\n".format(self.name)
string += "\tMObject {0}Z = nAttr.create(\"{0}Z\", \"{0}z\", MFnNumericData::kDouble, 0.0);\n".format(self.name)
string += "\t{0} = nAttr.create(\"{0}\", \"{0}\", {0}X, {0}Y, {0}Z);\n".format(self.name)
if len(self.default.split(":")) == 1:
a = self.default.split(":")[0]
self.default = "{0}:{0}:{0}".format(a)
string += "\tMAKE_INPUT(nAttr);\n"
if self.type.endswith("Array"):
string += "\tnAttr.setArray(true);\n"
string += "\tnAttr.setDefault({0});\n".format(",".join(self.default.split(":")))
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
class ShaderNode(object):
def __init__(self, name):
self.name = name
self.attributeList = []
self.pluginId = 0
self.path = None
class TextureCreator(object):
def __init__(self, startId, name, shortcut):
self.pluginStartId = startId
self.rendererName = name
self.codingRoot = path.path(__file__).parent.parent.parent.parent
self.rendererCodingRoot = self.codingRoot + "/mayaTo" + self.rendererName.capitalize() + "/src"
self.capitalName = self.rendererName.capitalize()
self.shortCut = shortcut
self.baseDestination = path.path("{0}/mayaTo{1}".format(self.codingRoot , self.capitalName))
self.mayaToBaseTexH = "{0}/mayaToBase/src/shaders/textureBase.h".format(self.codingRoot )
self.mayaToBaseTexCPP = "{0}/mayaToBase/src/shaders/textureBase.cpp".format(self.codingRoot )
self.rendererMatDefs = path.path("{0}/mayaTo{1}/vs2010/sourceCodeDocs/textures.txt".format(self.codingRoot , self.capitalName))
self.destinationDir = path.path("{0}/mayaTo{1}/src/textures".format(self.codingRoot , self.capitalName))
self.nodesToCreate = []
self.texFileContent = None
self.textureFileHandle = None
|
RDCEP/psims
|
pysims/models/apsim75.py
|
Python
|
agpl-3.0
| 2,004
| 0.003992
|
import glob
import os
import shutil
import sys
import tarfile
import traceback
from model import Model
from subprocess import Popen, PIPE
class Apsim75(Model):
def run(self, latidx, lonidx):
try:
|
apsim_bin = self.config.get('executable')
# The apsim 'execu
|
table' is a gzipped tarball that needs to be extracted into the current working directory
tar = tarfile.open(apsim_bin)
tar.extractall()
tar.close()
model_dir = 'Model'
for xml_file in glob.glob('*.xml'):
if os.path.basename(xml_file) == 'Apsim.xml':
continue
old_xml = '%s/%s' % (model_dir, os.path.basename(xml_file))
if os.path.isfile(old_xml):
os.remove(old_xml)
if os.path.islink(xml_file):
link = os.readlink(xml_file)
shutil.copy(link, model_dir)
else:
shutil.copy(xml_file, model_dir)
# Create sim files
p = Popen('source paths.sh ; mono Model/ApsimToSim.exe Generic.apsim', shell=True, executable='/bin/bash', stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout_file = open('RESULT.OUT', 'w')
stdout_file.write(stdout)
if p.returncode != 0:
rc = p.returncode
# Run apsim for each sim file
for sim in glob.glob('*.sim'):
p = Popen('source paths.sh ; Model/ApsimModel.exe %s' % sim, shell=True, executable='/bin/bash', stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout_file.write(stdout)
if p.returncode != 0:
rc = p.returncode
stdout_file.close()
return True
except:
print "[%s]: %s" % (os.path.basename(__file__), traceback.format_exc())
return False
|
mishbahr/djangocms-fbcomments
|
aldryn_config.py
|
Python
|
bsd-3-clause
| 819
| 0.003663
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from aldryn_client import forms
class Form(forms.BaseForm):
plugin_module = forms.CharField('Plugin module name', initial='Generic')
plugin_name = forms.CharField('Plugin name', initial='Facebook Comments')
plugin_template = forms.CharField('Plugin Template', initial='djangocms_fbcomments/default.html')
app_id = forms.CharField('Facebook App ID', required=False)
def to_settings(se
|
lf, data, settings):
setti
|
ngs['DJANGOCMS_FBCOMMENTS_PLUGIN_MODULE'] = data['plugin_module']
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_NAME'] = data['plugin_name']
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_TEMPLATE'] = data['plugin_template']
settings['DJANGOCMS_FBCOMMENTS_APP_ID'] = data['app_id']
return settings
|
jamespcole/home-assistant
|
homeassistant/components/knx/climate.py
|
Python
|
apache-2.0
| 11,048
| 0
|
"""Support for KNX/IP climate devices."""
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
STATE_DRY, STATE_ECO, STATE_FAN_ONLY, STATE_HEAT, STATE_IDLE, STATE_MANUAL,
SUPPORT_ON_OFF, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
CONF_SETPOINT_SHIFT_ADDRESS = 'setpoint_shift_address'
CONF_SETPOINT_SHIFT_STATE_ADDRESS = 'setpoint_shift_state_address'
CONF_SETPOINT_SHIFT_STEP = 'setpoint_shift_step'
CONF_SETPOINT_SHIFT_MAX = 'setpoint_shift_max'
CONF_SETPOINT_SHIFT_MIN = 'setpoint_shift_min'
CONF_TEMPERATURE_ADDRESS = 'temperature_address'
CONF_TARGET_TEMPERATURE_ADDRESS = 'target_temperature_address'
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = 'target_temperature_state_address'
CONF_OPERATION_MODE_ADDRESS = 'operation_mode_address'
CONF_OPERATION_MODE_STATE_ADDRESS = 'operation_mode_state_address'
CONF_CONTROLLER_STATUS_ADDRESS = 'controller_status_address'
CONF_CONTROLLER_STATUS_STATE_ADDRESS = 'controller_status_state_address'
CONF_CONTROLLER_MODE_ADDRESS = 'controller_mode_address'
CONF_CONTROLLER_MODE_STATE_ADDRESS = 'controller_mode_state_address'
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = \
'operation_mode_frost_protection_address'
CONF_OPERATION_MODE_NIGHT_ADDRESS = 'operation_mode_night_address'
CONF_OPERATION_MODE_COMFORT_ADDRESS = 'operation_mode_comfort_address'
CONF_OPERATION_MODES = 'operation_modes'
CONF_ON_OFF_ADDRESS = 'on_off_address'
CONF_ON_OFF_STATE_ADDRESS = 'on_off_state_address'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
DEFAULT_NAME = 'KNX Climate'
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEPENDENCIES = ['knx']
# Map KNX operation modes to HA modes. This list might not be full.
OPERATION_MODES = {
# Map DPT 201.100 HVAC operating modes
"Frost Protection": STATE_MANUAL,
"Night": STATE_IDLE,
"Standby": STATE_ECO,
"Comfort": STATE_HEAT,
# Map DPT 201.104 HVAC control modes
"Fan only": STATE_FAN_ONLY,
"Dehumidification": STATE_DRY
}
OPERATION_MODES_INV = dict((
reversed(item) for item in OPERATION_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STEP,
default=DEFAULT_SETPOINT_SHIFT_STEP): vol.All(
float, vol.Range(min=0, max=2)),
vol.Optional(CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX):
vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN):
vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES):
vol.All(cv.ensure_list, [vol.In(OPERATION_MODES)]),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME) + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS),
group_address_controller_status=config.get(
CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.g
|
et(
CONF_CONTROLLER_STATUS_STATE_ADDRESS),
group_address_cont
|
roller_mode=config.get(
CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS),
operation_modes=config.get(
CONF_OPERATION_MODES))
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_temperature=config[CONF_TEMPERATURE_ADDRESS],
group_address_target_temperature=config.get(
CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_target_temperature_state=config[
CONF_TARGET_TEMPERATURE_STATE_ADDRESS],
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS),
setpoint_shift_step=config.get(CONF_SETPOINT_SHIFT_STEP),
setpoint_shift_max=config.get(CONF_SETPOINT_SHIFT_MAX),
setpoint_shift_min=config.get(CONF_SETPOINT_SHIFT_MIN),
group_address_on_off=config.get(CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.device.mode.supports_operation_mode:
support |= SUPPORT_OPERATION_MODE
if self.device.supports_on_off:
support |= SUPPORT_ON_OFF
return support
async def async_added_to_hass(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(
|
akshayaurora/kivy
|
kivy/uix/splitter.py
|
Python
|
mit
| 13,228
| 0.000076
|
'''Splitter
======
.. versionadded:: 1.5.0
.. image:: images/splitter.jpg
:align: right
The :class:`Splitter` is a widget that helps you re-size its child
widget/layout by letting you re-size it via dragging the boundary or
double tapping the boundary. This widget is similar to the
:class:`~kivy.uix.scrollview.ScrollView` in that it allows only one
child widget.
Usage::
splitter = Splitter(sizable_from = 'right')
splitter.add_widget(layout_or_widget_instance)
splitter.min_size = 100
splitter.max_size = 250
To change the size of the strip/border used for resizing::
splitter.strip_size = '10pt'
To change its appearance::
splitter.strip_cls = your_custom_class
You can also change the appearance of the `strip_cls`, which defaults to
:class:`SplitterStrip`, by overriding the `kv` rule in your app:
.. code-block:: kv
<SplitterStrip>:
horizontal: True if self.parent and self.parent.sizable_from[0] \
in ('t', 'b') else False
background_normal: 'path to normal horizontal image' \
if self.horizontal else 'path to vertical normal image'
background_down: 'path to pressed horizontal image' \
if self.horizontal else 'path to vertical pressed image'
'''
__all__ = ('Splitter', )
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.uix.button import Button
from kivy.properties import (OptionProperty, NumericProperty, ObjectProperty,
ListProperty, BooleanProperty)
from kivy.uix.boxlayout import BoxLayout
class SplitterStrip(Button):
'''Class used for tbe graphical representation of a
:class:`kivy.uix.splitter.SplitterStripe`.
'''
pass
class Splitter(BoxLayout):
'''See module documentation.
:Events:
`on_press`:
Fired when the splitter is pressed.
`on_release`:
Fired when the splitter is released.
.. versionchanged:: 1.6.0
Added `on_press` and `on_release` events.
'''
border = ListProperty([4, 4, 4, 4])
'''Border used for the
:class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction.
This must be a list of four values: (bottom, right, top, left).
Read the BorderImage instructions for mor
|
e information about how
to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and
defaults to (4, 4, 4, 4).
'''
strip_cls = ObjectProperty(SplitterStrip)
'''Specifies the class of the resize Strip.
:attr:`strip_cls` is an :class:`kivy.properties.ObjectProperty` and
|
defaults to :class:`~kivy.uix.splitter.SplitterStrip`, which is of type
:class:`~kivy.uix.button.Button`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
sizable_from = OptionProperty('left', options=(
'left', 'right', 'top', 'bottom'))
'''Specifies whether the widget is resizable. Options are:
`left`, `right`, `top` or `bottom`
:attr:`sizable_from` is an :class:`~kivy.properties.OptionProperty`
and defaults to `left`.
'''
strip_size = NumericProperty('10pt')
'''Specifies the size of resize strip
:attr:`strp_size` is a :class:`~kivy.properties.NumericProperty`
defaults to `10pt`
'''
min_size = NumericProperty('100pt')
'''Specifies the minimum size beyond which the widget is not resizable.
:attr:`min_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to `100pt`.
'''
max_size = NumericProperty('500pt')
'''Specifies the maximum size beyond which the widget is not resizable.
:attr:`max_size` is a :class:`~kivy.properties.NumericProperty`
and defaults to `500pt`.
'''
_parent_proportion = NumericProperty(0.)
'''(internal) Specifies the distance that the slider has travelled
across its parent, used to automatically maintain a sensible
position if the parent is resized.
:attr:`_parent_proportion` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
.. versionadded:: 1.9.0
'''
_bound_parent = ObjectProperty(None, allownone=True)
'''(internal) References the widget whose size is currently being
tracked by :attr:`_parent_proportion`.
:attr:`_bound_parent` is a
:class:`~kivy.properties.ObjectProperty` and defaults to None.
.. versionadded:: 1.9.0
'''
keep_within_parent = BooleanProperty(False)
'''If True, will limit the splitter to stay within its parent widget.
:attr:`keep_within_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
rescale_with_parent = BooleanProperty(False)
'''If True, will automatically change size to take up the same
proportion of the parent widget when it is resized, while
staying within :attr:`min_size` and :attr:`max_size`. As long as
these attributes can be satisfied, this stops the
:class:`Splitter` from exceeding the parent size during rescaling.
:attr:`rescale_with_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
__events__ = ('on_press', 'on_release')
def __init__(self, **kwargs):
self._container = None
self._strip = None
super(Splitter, self).__init__(**kwargs)
do_size = self._do_size
fbind = self.fbind
fbind('max_size', do_size)
fbind('min_size', do_size)
fbind('parent', self._rebind_parent)
def on_sizable_from(self, instance, sizable_from):
if not instance._container:
return
sup = super(Splitter, instance)
_strp = instance._strip
if _strp:
# remove any previous binds
_strp.unbind(on_touch_down=instance.strip_down)
_strp.unbind(on_touch_move=instance.strip_move)
_strp.unbind(on_touch_up=instance.strip_up)
self.unbind(disabled=_strp.setter('disabled'))
sup.remove_widget(instance._strip)
else:
cls = instance.strip_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
instance._strip = _strp = cls()
sz_frm = instance.sizable_from[0]
if sz_frm in ('l', 'r'):
_strp.size_hint = None, 1
_strp.width = instance.strip_size
instance.orientation = 'horizontal'
instance.unbind(strip_size=_strp.setter('width'))
instance.bind(strip_size=_strp.setter('width'))
else:
_strp.size_hint = 1, None
_strp.height = instance.strip_size
instance.orientation = 'vertical'
instance.unbind(strip_size=_strp.setter('height'))
instance.bind(strip_size=_strp.setter('height'))
index = 1
if sz_frm in ('r', 'b'):
index = 0
sup.add_widget(_strp, index)
_strp.bind(on_touch_down=instance.strip_down)
_strp.bind(on_touch_move=instance.strip_move)
_strp.bind(on_touch_up=instance.strip_up)
_strp.disabled = self.disabled
self.bind(disabled=_strp.setter('disabled'))
def add_widget(self, widget, index=0):
if self._container or not widget:
return Exception('Splitter accepts only one Child')
self._container = widget
sz_frm = self.sizable_from[0]
if sz_frm in ('l', 'r'):
widget.size_hint_x = 1
else:
widget.size_hint_y = 1
index = 0
if sz_frm in ('r', 'b'):
index = 1
super(Splitter, self).add_widget(widget, index)
self.on_sizable_from(self, self.sizable_from)
def remove_widget(self, widget, *largs):
super(Splitter, self).remove_widget(widget)
if widget == self._container:
self._container = None
def clear_widgets(self):
self.remove_widget(self._container)
def strip_down(self, instance, touch):
if not instance.collide_point(*touch.pos):
return False
touch.grab(self)
self.dis
|
hovo1990/deviser
|
generator/legacy/createCMakeFiles.py
|
Python
|
lgpl-2.1
| 16,329
| 0.020454
|
#!/usr/bin/env python
#
# @file createCMakeFiles.py
# @brief create the CMake files
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2014 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
import sys
import os
import fileHeaders
import strFunctions
def writeSrcListsFile(name, nameOfPackage, plugins, classes):
capName = name.upper()
uname = strFunctions.cap(name)
codeName = 'CMakeLists.txt'
fileOut = open(codeName, 'w')
fileOut.write('# This CMake file integrates the binding source with the libsbml source tree\n#\n\n\n'.format(name))
fileOut.write('# include common functions (used for copying / removing files)\n')
fileOut.write('if(NOT EXISTS ${LIBSBML_SOURCE}/common.cmake)\n')
fileOut.write(' message(FATAL_ERROR "Invalid libsbml source directory")\n')
fileOut.write('endif()\n\n')
fileOut.write('include(${LIBSBML_SOURCE}/common.cmake)\n\n')
fileOut.write('# specify the package files\n')
fileOut.write('set(PACKAGE_FILES\n\n')
fileOut.write(' # forward declaractions\n')
fileOut.write(' "common/{0}fwd.h"\n'.format(name))
fileOut.write(' "common/{0}ExtensionTypes.h"\n\n'.format(uname))
fileOut.write(' # extension points\n')
fileOut.write(' "extension/{0}Extension.h"\n'.format(uname))
for i in range (0, len(plugins)):
fileOut.write(' "extension/{0}{1}Plugin.h"\n'.format(nameOfPackage, plugins[i]['sbase']))
fileOut.write(' "extension/{0}Extension.cpp"\n'.format(uname))
for i in range (0, len(plugins)):
fileOut.write(' "extension/{0}{1}Plugin.cpp"\n'.format(nameOfPackage, plugins[i]['sbase']))
fileOut.write('\n #new SBML classes\n')
for i in range (0, len(classes)):
fileOut.write(' "sbml/{0}.h"\n'.format(classes[i]['name']))
for i in range (0, len(classes)):
fileOut.write(' "sbml/{0}.cpp"\n'.format(classes[i]['name']))
fileOut.write('\n #test cases\n')
fileOut.write('\n\n')
fileOut.write(' )\n\n')
fileOut.write('# specify the files for the language bindings\n')
fileOut.write('set(BINDING_FILES\n\n')
fileOut.write(' # C# bindings\n')
fileOut.write(' "bindings/csharp/local-downcast-extension-{0}.i"\n'.format(name))
fileOut.write(' "bindings/csharp/local-downcast-namespaces-{0}.i"\n'.format(name))
fileOut.write(' "bindings/csharp/local-packages-{0}.i"\n\n'.format(name))
fileOut.write(' # java bindings\n')
fileOut.write(' "bindings/java/local-downcast-extension-{0}.i"\n'.format(name))
fileOut.write(' "bindings/java/local-downcast-namespaces-{0}.i"\n'.format(name))
fileOut.write(' "bindings/java/local-packages-{0}.i"\n\n'.format(name))
fileOut.write(' # perl bindings\n')
fileOut.write(' "bindings/perl/local-downcast-extension-{0}.cpp"\n'.format(name))
fileOut.write('
|
"bindings/perl/local-downcast-packages-{0}.cpp"\n'.
|
format(name))
fileOut.write(' "bindings/perl/local-downcast-namespaces-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/perl/local-downcast-plugins-{0}.cpp"\n\n'.format(name))
fileOut.write(' # python bindings\n')
fileOut.write(' "bindings/python/local-downcast-extension-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/python/local-downcast-packages-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/python/local-downcast-namespaces-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/python/local-downcast-plugins-{0}.cpp"\n\n'.format(name))
fileOut.write(' # ruby bindings\n')
fileOut.write(' "bindings/ruby/local-downcast-extension-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/ruby/local-downcast-packages-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/ruby/local-downcast-namespaces-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/ruby/local-downcast-plugins-{0}.cpp"\n\n'.format(name))
fileOut.write(' # generic swig bindings\n')
fileOut.write(' "bindings/swig/{0}-package.h"\n'.format(name))
fileOut.write(' "bindings/swig/{0}-package.i"\n\n'.format(name))
fileOut.write(' )\n\n')
fileOut.write('if(MODE STREQUAL "integrate")\n')
fileOut.write(' # integrate the package with the specified libsbml source directory\n\n')
fileOut.write(' # copy the CMake script that integrates the source files with libsbml-5\n')
fileOut.write(' copy_file("../{0}-package.cmake" '.format(name))
fileOut.write('${LIBSBML_SOURCE})\n')
fileOut.write(' copy_file("{0}-package.cmake" '.format(name))
fileOut.write('${LIBSBML_SOURCE}/src)\n\n')
fileOut.write(' # copy language binding files\n')
fileOut.write(' foreach(bindingFile ${BINDING_FILES})\n')
fileOut.write(' copy_file_to_subdir( ${bindingFile} ${LIBSBML_SOURCE}/src)\n')
fileOut.write(' endforeach()\n\n')
fileOut.write(' # copy package files\n')
fileOut.write(' foreach(packageFile ${PACKAGE_FILES})\n')
fileOut.write(' copy_file_to_subdir( ${packageFile} ${LIBSBML_SOURCE}')
fileOut.write('/src/packages/{0})\n'.format(name))
fileOut.write(' endforeach()\n\n')
fileOut.write(' # copy header files to include directory just in case\n')
fileOut.write(' foreach(dir common extension sbml)\n\n')
fileOut.write(' # copy files\n')
fileOut.write(' copy_files( ${CMAKE_CURRENT_SOURCE_DIR}/${dir}/\n')
fileOut.write(' ${LIBSBML_SOURCE}')
fileOut.write('/include/sbml/{0} *.h )\n\n'.format(name))
fileOut.write(' endforeach()\n\n')
fileOut.write(' add_custom_target(integrate ALL)\n\n')
fileOut.write(' message(STATUS "Finished integrating the SBML {0} package with the libsbml source tree in:")\n'.format(name))
fileOut.write(' message(STATUS "${LIBSBML_SOURCE}")\n\n')
fileOut.write('elseif(MODE STREQUAL "remove")\n')
fileOut.write(' # remove all package files from the specified libsbml source directory\n\n')
fileOut.write(' remove_file(${LIBSBML_SOURCE}')
fileOut.write('/{0}-package.cmake)\n'.format(name))
fileOut.write(' remove_file(${LIBSBML_SOURCE}')
fileOut.write('/src/{0}-package.cmake)\n\n'.format(name))
fileOut.write(' # copy language binding files\n')
fileOut.write(' foreach(bindingFile ${BINDING_FILES})\n')
fileOut.write(' remove_file_in_subdir( ${bindingFile} ${LIBSBML_SOURCE}/src)\n')
fileOut.write(' endforeach()\n\n')
fileOut.write(' # copy package files\n')
fileOut.write(' foreach(packageFile ${PACKAGE_FILES})\n')
fileOut.write(' remove_file_in_subdir( ${packageFile} ${LIBSBML_SOURCE}')
fileOut.write('/src/packages/{0})\n'.format(name))
fileOut.write(' endforeach()\n\n')
fileOut.write(' # delete package directory\n')
fileOut.write(' fi
|
mic4ael/indico
|
indico/testing/fixtures/util.py
|
Python
|
mit
| 2,040
| 0.00049
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import inspect
from datetime import datetime
import freezegun
import pytest
from sqlalchemy import DateTime, cast
from sqlalchemy.sql.functions import _FunctionGenerator
@pytest.fixture
def monkeypatch_methods(monkeypatch):
"""Monkeypatches all methods from `cls` onto `target`
This utility lets you easily mock multiple methods in an existing class.
In case of classmethods the binding will not be changed, i.e. `cls` will
keep pointing to the source class and not the target class.
"""
def _monkeypatch_methods(target, cls):
for name, method in inspect.getmembers(cls, inspect.ismethod):
if method.__self__ is None:
# For unbound methods we need to copy the underlying function
method = method.__func__
monkeypatch.setattr('{}.{}'.format(target, name), method)
return _monkeypatch_methods
@pytest.fixture
def freeze_time(monkeypatch):
"""Returns a function that freezes the current time
It affects datetime.now, date.today, etc. and also SQLAlchemy's `func.now()`
which simply returns the current time from `datetime.now()` instead of
retrieving it using the actual `now()` function of PostgreSQL.
"""
freezers = []
orig_call = _FunctionGene
|
rator.__call__
def FunctionGenerator_call(self, *args, **kwargs):
if self._FunctionGenerator__names == ['now']:
return cast(datetime.now().isoformat(), DateTime)
return orig_call(self, *args, **kwargs)
monkeypatch.setattr(_FunctionGenerator, '__call__', FunctionGenerator_call)
def _freeze_time(time_to_freeze):
freezer = freezegun.freeze_time(time_to_freeze)
|
freezer.start()
freezers.append(freezer)
yield _freeze_time
for freezer in reversed(freezers):
freezer.stop()
|
nevtum/Echopoint
|
tests/testrunner.py
|
Python
|
apache-2.0
| 569
| 0.005272
|
from tests.subscriptions import Subscription_tests
from tests.messaging import Event_publisher_tests
from tests.callbacks import Callback
|
_tests
from tests.shortcut_subscriptions import Decorator_tests
import unittest
import logging
def configure_logger():
handler = logging.FileHandler('log.txt')
formatter = logging.Formatter("%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
if __name__ == '__main__':
|
configure_logger()
unittest.main()
|
BiRG/Omics-Dashboard
|
modules/sbin/batch_parse_txtxy.py
|
Python
|
mit
| 457
| 0
|
#!/usr/bin/env python3
import sys
import text_parsers as tp
import os
infilenames = sys.argv[1:len(sys.argv)-1]
nameprefix = sys.argv[len(sys.argv)-1]
for infilename in infilenames:
outfilename = f'{os.environ["
|
HOME"]}/{os.path.basename(infilename)}.h5'
data = tp.parse_txt_xy(infilename)
data['metadata']['name'] = f'{nameprefix}: {os.path.basename(infilename)}'
tp.save_sample_file(outfilenam
|
e, data['data'], data['metadata'])
sys.exit(0)
|
utiasASRL/batch-informed-trees
|
tests/control/test_control.py
|
Python
|
bsd-3-clause
| 11,947
| 0.002595
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll
import sys
from os.path import abspath, dirname, join
sys.path.insert(0, join(dirname(dirname(dirname(abspath(__file__)))), 'py-bindings'))
from functools import partial
from time import clock
from math import fabs
import unittest
import copy
import ompl.util as ou
import ompl.base as ob
import ompl.control as oc
from ompl.util import setLogLevel, LogLevel
SOLUTION_TIME = 10.0
MAX_VELOCITY = 3.0
class Environment(object):
def __init__(self, fname):
fp = open(fname, 'r')
lines = fp.readlines()
fp.close()
self.width, self.height = [int(i) for i in lines[0].split(' ')[1:3]]
self.grid = []
self.start = [int(i) for i in lines[1].split(' ')[1:3]]
self.goal = [int(i) for i in lines[2].split(' ')[1:3]]
for i in range(self.width):
self.grid.append(
[int(j) for j in lines[4+i].split(' ')[0:self.height]])
self.char_mapping = ['__', '##', 'oo', 'XX']
def __str__(self):
result = ''
for line in self.grid:
result = result + ''.join([self.char_mapping[c] for c in line]) + '\n'
return result
def isValid(grid, state):
# planning is done in a continuous space, but our collision space
# representation is discrete
x = int(state[0])
y = int(state[1])
if x < 0 or y < 0 or x >= len(grid) or y >= len(grid[0]):
return False
return grid[x][y] == 0 # 0 means valid state
class MyStateSpace(ob.RealVectorStateSpace):
def __init__(self):
super(MyStateSpace, self).__init__(4)
def distance(self, state1, state2):
x1 = int(state1[0])
y1 = int(state1[1])
x2 = int(state2[0])
y2 = int(state2[1])
return fabs(x1-x2) + fabs(y1-y2)
class MyProjectionEvaluator(ob.ProjectionEvaluator):
def __init__(self, space, cellSizes):
super(MyProjectionEvaluator, self).__init__(space)
self.setCellSizes(cellSizes)
def getDimension(self):
return
|
2
def project(self, state, projection):
projection[0] = state[0]
|
projection[1] = state[1]
class MyStatePropagator(oc.StatePropagator):
def propagate(self, state, control, duration, result):
result[0] = state[0] + duration*control[0]
result[1] = state[1] + duration*control[1]
result[2] = control[0]
result[3] = control[1]
class TestPlanner(object):
def execute(self, env, time, pathLength, show=False):
result = True
sSpace = MyStateSpace()
sbounds = ob.RealVectorBounds(4)
# dimension 0 (x) spans between [0, width)
# dimension 1 (y) spans between [0, height)
# since sampling is continuous and we round down, we allow values until
# just under the max limit
# the resolution is 1.0 since we check cells only
sbounds.low = ou.vectorDouble()
sbounds.low.extend([0.0, 0.0, -MAX_VELOCITY, -MAX_VELOCITY])
sbounds.high = ou.vectorDouble()
sbounds.high.extend([float(env.width) - 0.000000001, \
float(env.height) - 0.000000001, \
MAX_VELOCITY, MAX_VELOCITY])
sSpace.setBounds(sbounds)
cSpace = oc.RealVectorControlSpace(sSpace, 2)
cbounds = ob.RealVectorBounds(2)
cbounds.low[0] = -MAX_VELOCITY
cbounds.high[0] = MAX_VELOCITY
cbounds.low[1] = -MAX_VELOCITY
cbounds.high[1] = MAX_VELOCITY
cSpace.setBounds(cbounds)
ss = oc.SimpleSetup(cSpace)
isValidFn = ob.StateValidityCheckerFn(partial(isValid, env.grid))
ss.setStateValidityChecker(isValidFn)
propagator = MyStatePropagator(ss.getSpaceInformation())
ss.setStatePropagator(propagator)
planner = self.newplanner(ss.getSpaceInformation())
ss.setPlanner(planner)
# the initial state
start = ob.State(sSpace)
start()[0] = env.start[0]
start()[1] = env.start[1]
start()[2] = 0.0
start()[3] = 0.0
goal = ob.State(sSpace)
goal()[0] = env.goal[0]
goal()[1] = env.goal[1]
goal()[2] = 0.0
goal()[3] = 0.0
ss.setStartAndGoalStates(start, goal, 0.05)
startTime = clock()
if ss.solve(SOLUTION_TIME):
elapsed = clock() - startTime
time = time + elapsed
if show:
print('Found solution in %f seconds!' % elapsed)
path = ss.getSolutionPath()
path.interpolate()
if not path.check():
return (False, time, pathLength)
pathLength = pathLength + path.length()
if show:
print(env, '\n')
temp = copy.deepcopy(env)
for i in range(len(path.states)):
x = int(path.states[i][0])
y = int(path.states[i][1])
if temp.grid[x][y] in [0, 2]:
temp.grid[x][y] = 2
else:
temp.grid[x][y] = 3
print(temp, '\n')
else:
result = False
return (result, time, pathLength)
def newplanner(self, si):
raise NotImplementedError('pure virtual method')
class RRTTest(TestPlanner):
def newplanner(self, si):
planner = oc.RRT(si)
return planner
class ESTTest(TestPlanner):
def newplanner(self, si):
planner = oc.EST(si)
cdim = ou.vectorDouble()
cdim.extend([1, 1])
ope = MyProjectionEvaluator(si.getStateSpace(), cdim)
planner.setProjectionEvaluator(ope)
return planner
class SyclopDecomposition(oc.GridDecomposition):
def __init__(self, length, bounds):
super(SyclopDecomposition, self).__init__(length, 2, bounds)
def project(self, state, coord):
coord[0] = state[0]
coord[1] = state[1]
def sampleFullState(self, sampler, coord, s):
sampler.sampleUniform(s)
s[0] = coord[0]
s[1] = coord[1]
class SyclopRRTTest(TestPlanner):
def newplanner(self, si):
spacebounds = si.getStateSpace().getBounds()
bounds = ob.RealVectorBounds(2)
bounds.setLow(0, spacebounds.low[0])
bounds.setLow(1, spacebounds.low[1])
bounds.setHigh(0, spacebounds.high[0])
bounds.setHigh(1, spacebounds.high[1])
# Create a 10x
|
eayunstack/fuel-ostf
|
fuel_health/tests/ha/test_mysql_replication.py
|
Python
|
apache-2.0
| 6,751
| 0
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import traceback
from fuel_health.common.ssh import Client as SSHClient
from fuel_health.common.utils import data_utils
from fuel_health.tests.ha.test_mysql_status import BaseMysqlTest
LOG = logging.getLogger(__name__)
class TestMysqlReplication(BaseMysqlTest):
@classmethod
def setUpClass(cls):
super(TestMysqlReplication, cls).setUpClass()
cls.database = 'ost1' + str(data_utils.rand_int_id(100, 999))
cls.master_ip = None
def setUp(self):
super(TestMysqlReplication, self).setUp()
if 'ha' not in self.config.compute.deployment_mode:
self.skipTest('Cluster is not HA mode, skipping tests')
@classmethod
def tearDownClass(cls):
if cls.master_ip:
try:
cmd = "mysql -h localhost -e 'DROP DATABASE %s'" % cls.database
SSHClient(cls.master_ip, cls.node_user,
key_filename=cls.node_key).exec_command(cmd)
except Exception:
LOG.debug(traceback.format_exc())
def test_mysql_replication(self):
"""Check data replication over mysql
|
Target Service: HA mysql
Scenario:
1. Check that mysql is running on all controller or database no
|
des.
2. Create database on one node.
3. Create table in created database
4. Insert data to the created table
5. Get replicated data from each database node.
6. Verify that replicated data in the same from each database
7. Drop created database
Duration: 10 s.
"""
LOG.info("'Test MySQL replication' started")
databases = self.verify(20, self.get_database_nodes,
1, "Can not get database hostnames. Check that"
" at least one controller is operable",
"get database nodes",
self.controller_ip,
self.node_user,
key=self.node_key)
self.verify_response_body_not_equal(0, len(databases),
self.no_db_msg, 1)
if len(databases) == 1:
self.skipTest(self.one_db_msg)
LOG.info("Database nodes are " + ", ".join(databases))
self.master_ip = databases[0]
# check that mysql is running on all hosts
cmd = 'mysql -h localhost -e "" '
for db_node in databases:
ssh_client = SSHClient(
db_node, self.node_user,
key_filename=self.node_key, timeout=100)
self.verify(
20, ssh_client.exec_command, 1,
'Can not connect to mysql. '
'Please check that mysql is running and there '
'is connectivity by management network',
'detect mysql node', cmd)
database_name = self.database
table_name = 'ost' + str(data_utils.rand_int_id(100, 999))
record_data = str(data_utils.rand_int_id(1000000000, 9999999999))
create_database = (
'mysql -h localhost -e "CREATE DATABASE IF NOT EXISTS '
'{database}" '.format(database=database_name)
)
create_table = (
'mysql -h localhost -e'
' "CREATE TABLE IF NOT EXISTS {database}.{table}'
' (data VARCHAR(100))" '.format(database=database_name,
table=table_name)
)
create_record = (
'mysql -h localhost -e "INSERT INTO {database}.{table} (data) '
'VALUES({data})" '.format(database=database_name,
table=table_name,
data=record_data)
)
get_record = (
'mysql -h localhost -e "SELECT * FROM {database}.{table} '
'WHERE data = \"{data}\"" '.format(database=database_name,
table=table_name,
data=record_data)
)
drop_db = "mysql -h localhost -e 'DROP DATABASE {database}'".format(
database=database_name
)
# create db, table, insert data on one node
LOG.info('target node ip/hostname: "{0}" '.format(self.master_ip))
master_ssh_client = SSHClient(self.master_ip, self.node_user,
key_filename=self.node_key,
timeout=100)
self.verify(20, master_ssh_client.exec_command, 2,
'Database creation failed', 'create database',
create_database)
LOG.info('create database')
self.verify(20, master_ssh_client.exec_command, 3,
'Table creation failed', 'create table', create_table)
LOG.info('create table')
self.verify(20, master_ssh_client.exec_command, 4,
'Can not insert data in created table', 'data insertion',
create_record)
LOG.info('create data')
# Verify that data is replicated on other databases
for db_node in databases:
if db_node != self.master_ip:
client = SSHClient(db_node,
self.node_user,
key_filename=self.node_key)
output = self.verify(
20, client.exec_command, 5,
'Can not get data from database node %s' % db_node,
'get_record', get_record)
self.verify_response_body(output, record_data,
msg='Expected data missing',
failed_step='6')
# Drop created db
ssh_client = SSHClient(self.master_ip, self.node_user,
key_filename=self.node_key)
self.verify(20, ssh_client.exec_command, 7,
'Can not delete created database',
'database deletion', drop_db)
self.master_ip = None
|
deonwu/robotframework-debuger
|
src/rdb/interface/base.py
|
Python
|
gpl-2.0
| 3,484
| 0.01062
|
import logging
class BaseDebugInterface(object):
def __init__(self, debuger):
self.robotDebuger = debuger
self.debugCtx = debuger.debugCtx
self.logger = logging.getLogger("rbt.int")
self.bp_id = 0
def start(self, settings):
"""start debug interface."""
pass
def close(self):
pass
def go_steps(self, count): self.debugCtx.go_steps(int(count))
def go_into(self): self.debugCtx.go_into()
def go_over(self): self.debugCtx.go_over()
def go_on(self): self.debugCtx.go_on()
def go_return(self): self.debugCtx.go_return()
def go_pause(self): return self.debugCtx.go_pause()
def add_breakpoint(self, bp): self.robotDebuger.add_breakpoint(bp)
def watch_variable(self, name): return self.robotDebuger.watch_variable(name)
def remove_variable(self, name): return self.robotDebuger.remove_variable(name)
def run_keyword(self, name, *args): return self.robotDebuger.run_keyword(name, *args)
def update_variable(self, name, value):
from robot.running import NAMESPACES
if NAMESPACES.current is not None:
NAMESPACES.current.variables[name] = value
def variable_value(self, var_list):
from robot.running import NAMESPACES
if NAMESPACES.current is None:
return [(e, None) for e in var_list]
robot_vars = NAMESPACES.current.variables
val_list = []
for e in var_list:
try:
v = robot_vars.replace_scalar(e)
except Exception, et:
if "Non-existing" in str(et):
v = None
else: raise
val_list.append((e, v))
return val_list
@property
def watching_variable(self):return self.robotDebuger.watching_variable
@property
def callstack(self):
"""Return a runtime list"""
return list(self.debugCtx.call_stack)
@property
def breakpoints(self):
"""Return list of breakpoint"""
return list(self.debugCtx.break_points)
@property
def active_breakpoint(self):return self.debugCtx.active_break_point
def disable_breakpoint(self, name, match_kw=False):
bp = self._get_breakpoint(name, match_kw)
if bp: bp.active = False
def enable_breakpoint(self, name, match_kw=False):
bp = self._get_breakpoint(name, match_kw)
if bp: bp.active = True
def update_breakpoint(self, name, match_kw=False):
bp = self._get_breakpoint(name, match_kw)
if bp: bp.active = not bp.active
def _get_breakpoint(self, name, match_kw):
for e in self.debugCtx.break_points:
if match_kw and hasattr(e, 'kw_name') and e.kw_name == name:
return e
elif not match_kw and e.name == name:
return e
return None
def add_telnet_monitor(self, monitor):
"""this is IPAMml special feature."""
|
self.robotDebuger.add_telnet_monitor(monitor)
def add_debug_listener(self, l):
self.debugCtx.add_listener(l)
def remove_debug_listener(self, l):
self.debugCtx.remove_listener(l)
class Listener:
def __init__(self):
pass
def pause(self, breakpoint):
pass
def go_on(self):
pass
def start_keyword(self, keyword):
|
pass
def end_keyword(self, keyword):
pass
|
aakashrana1995/svnit-tnp
|
tnp/consent/migrations/0014_auto_20170325_1723.py
|
Python
|
mit
| 523
| 0
|
# -*- coding: utf-8 -*-
# Generated
|
by Django 1.10.5 on 2017-03-25 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consent', '0013_auto_20170217_1606'),
]
operations = [
migrations.AlterField(
model_name='educationdetail',
name='college_passout_year',
field=models.CharField(default=2017, max_length=4),
preserve_default=False,
|
),
]
|
mcs07/ChemDataExtractor
|
chemdataextractor/doc/table.py
|
Python
|
mit
| 13,370
| 0.003141
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.doc.table
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Table document elements.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unico
|
de_liter
|
als
import logging
from collections import defaultdict
from ..model import Compound, ModelList
from ..parse.table import CompoundHeadingParser, CompoundCellParser, UvvisAbsHeadingParser, UvvisAbsCellParser, \
QuantumYieldHeadingParser, QuantumYieldCellParser, UvvisEmiHeadingParser, UvvisEmiCellParser, ExtinctionCellParser, \
ExtinctionHeadingParser, FluorescenceLifetimeHeadingParser, FluorescenceLifetimeCellParser, \
ElectrochemicalPotentialHeadingParser, ElectrochemicalPotentialCellParser, IrHeadingParser, IrCellParser, \
SolventCellParser, SolventHeadingParser, SolventInHeadingParser, UvvisAbsEmiQuantumYieldHeadingParser, \
UvvisAbsEmiQuantumYieldCellParser, MeltingPointHeadingParser, MeltingPointCellParser, GlassTransitionHeadingParser, GlassTransitionCellParser, TempInHeadingParser, \
UvvisAbsDisallowedHeadingParser, UvvisEmiQuantumYieldHeadingParser, UvvisEmiQuantumYieldCellParser
# TODO: Sort out the above import... import module instead
from ..nlp.tag import NoneTagger
from ..nlp.tokenize import FineWordTokenizer
from ..utils import memoized_property
from .element import CaptionedElement
from .text import Sentence
log = logging.getLogger(__name__)
class Table(CaptionedElement):
#: Table cell parsers
parsers = [
(CompoundHeadingParser(), CompoundCellParser()),
(UvvisAbsEmiQuantumYieldHeadingParser(), UvvisAbsEmiQuantumYieldCellParser()),
(UvvisEmiQuantumYieldHeadingParser(), UvvisEmiQuantumYieldCellParser()),
(UvvisEmiHeadingParser(), UvvisEmiCellParser()),
(UvvisAbsHeadingParser(), UvvisAbsCellParser(), UvvisAbsDisallowedHeadingParser()),
(IrHeadingParser(), IrCellParser()),
(ExtinctionHeadingParser(), ExtinctionCellParser()),
(QuantumYieldHeadingParser(), QuantumYieldCellParser()),
(FluorescenceLifetimeHeadingParser(), FluorescenceLifetimeCellParser()),
(ElectrochemicalPotentialHeadingParser(), ElectrochemicalPotentialCellParser()),
(MeltingPointHeadingParser(), MeltingPointCellParser()),
(GlassTransitionHeadingParser(), GlassTransitionCellParser()),
(SolventHeadingParser(), SolventCellParser()),
(SolventInHeadingParser(),),
(TempInHeadingParser(),)
]
def __init__(self, caption, label=None, headings=None, rows=None, footnotes=None, **kwargs):
super(Table, self).__init__(caption=caption, label=label, **kwargs)
self.headings = headings if headings is not None else [] # list(list(Cell))
self.rows = rows if rows is not None else [] # list(list(Cell))
self.footnotes = footnotes if footnotes is not None else []
@property
def document(self):
return self._document
@document.setter
def document(self, document):
self._document = document
self.caption.document = document
for row in self.headings:
for cell in row:
cell.document = document
for row in self.rows:
for cell in row:
cell.document = document
def serialize(self):
"""Convert Table element to python dictionary."""
data = {
'type': self.__class__.__name__,
'caption': self.caption.serialize(),
'headings': [[cell.serialize() for cell in hrow] for hrow in self.headings],
'rows': [[cell.serialize() for cell in row] for row in self.rows],
}
return data
def _repr_html_(self):
html_lines = ['<table class="table">']
html_lines.append(self.caption._repr_html_ ())
html_lines.append('<thead>')
for hrow in self.headings:
html_lines.append('<tr>')
for cell in hrow:
html_lines.append('<th>' + cell.text + '</th>')
html_lines.append('</thead>')
html_lines.append('<tbody>')
for row in self.rows:
html_lines.append('<tr>')
for cell in row:
html_lines.append('<td>' + cell.text + '</td>')
html_lines.append('</tbody>')
html_lines.append('</table>')
return '\n'.join(html_lines)
@property
def records(self):
"""Chemical records that have been parsed from the table."""
caption_records = self.caption.records
# Parse headers to extract contextual data and determine value parser for the column
value_parsers = {}
header_compounds = defaultdict(list)
table_records = ModelList()
seen_compound_col = False
log.debug('Parsing table headers')
for i, col_headings in enumerate(zip(*self.headings)):
# log.info('Considering column %s' % i)
for parsers in self.parsers:
log.debug(parsers)
heading_parser = parsers[0]
value_parser = parsers[1] if len(parsers) > 1 else None
disallowed_parser = parsers[2] if len(parsers) > 2 else None
allowed = False
disallowed = False
for cell in col_headings:
log.debug(cell.tagged_tokens)
results = list(heading_parser.parse(cell.tagged_tokens))
if results:
allowed = True
log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.serialize() for c in results]))
# Results from every parser are stored as header compounds
header_compounds[i].extend(results)
# Referenced footnote records are also stored
for footnote in self.footnotes:
# print('%s - %s - %s' % (footnote.id, cell.references, footnote.id in cell.references))
if footnote.id in cell.references:
log.debug('Adding footnote %s to column %s: %s' % (footnote.id, i, [c.serialize() for c in footnote.records]))
# print('Footnote records: %s' % [c.to_primitive() for c in footnote.records])
header_compounds[i].extend(footnote.records)
# Check if the disallowed parser matches this cell
if disallowed_parser and list(disallowed_parser.parse(cell.tagged_tokens)):
log.debug('Column %s: Disallowed %s' % (i, heading_parser.__class__.__name__))
disallowed = True
# If heading parser matches and disallowed parser doesn't, store the value parser
if allowed and not disallowed and value_parser and i not in value_parsers:
if isinstance(value_parser, CompoundCellParser):
# Only take the first compound col
if seen_compound_col:
continue
seen_compound_col = True
log.debug('Column %s: Value parser: %s' % (i, value_parser.__class__.__name__))
value_parsers[i] = value_parser
# Stop after value parser is assigned?
# for hrow in self.headings:
# for i, cell in enumerate(hrow):
# log.debug(cell.tagged_tokens)
# for heading_parser, value_parser in self.parsers:
# results = list(heading_parser.parse(cell.tagged_tokens))
# if results:
# log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.to_primitive() for c in results]))
# # Results from every parser are stored as header compounds
# header_compounds[i].extend(results)
# if results and value_parser and i not in value_parsers:
# if isinstance(value_parser, CompoundCellParser):
# # Only take
|
BowdoinOrient/bongo
|
bongo/apps/api/serializers/event.py
|
Python
|
mit
| 200
| 0
|
from
|
bongo.apps.bongo import models
from rest_framework import serializers
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = models.Event
fields = ('i
|
d', )
|
nhlx5haze/Kaggle_WestNileVirus
|
src/predict.py
|
Python
|
bsd-3-clause
| 5,983
| 0.01755
|
from __future__ import print_function
import numpy as np
import datetime
import csv
import pickle
import sys
species_map = {'CULEX RESTUANS' : "100000",
'CULEX TERRITANS' : "010000",
'CULEX PIPIENS' : "001000",
'CULEX PIPIENS/RESTUANS' : "101000",
'CULEX ERRATICUS' : "000100",
'CULEX SALINARIUS': "000010",
'CULEX TARSALIS' : "000001",
'UNSPECIFIED CULEX': "001000"} # Treating unspecified as PIPIENS (http://www.ajtmh.org/content/80/2/268.full)
def date(text):
return datetime.datetime.strptime(text, "%Y-%m-%d").date()
def precip(text):
TRACE = 1e-3
text = text.strip()
if text == "M":
return None
if text == "-":
return None
if text == "T":
return TRACE
return float(text)
def impute_missing_weather_station_values(weather):
# Stupid simple
for k, v in weather.items():
if v[0] is None:
v[0] = v[1]
elif v[1] is None:
v[1] = v[0]
for k1 in v[0]:
if v[0][k1] is None:
v[0][k1] = v[1][k1]
for k1 in v[1]:
if v[1][k1] is None:
v[1][k1] = v[0][k1]
def load_weather(weatherfile):
weather = {}
for line in csv.DictReader(open(weatherfile)):
for name, converter in {"Date" : date,
"Tmax" : float,"Tmin" : float,"Tavg" : float,
"DewPoint" : float, "WetBulb" : float,
"PrecipTotal" : precip,"Sunrise" : precip,"Sunset" : precip,
"Depart" : float, "Heat" : precip,"Cool" : precip,
"ResultSpeed" : float,"ResultDir" : float,"AvgSpeed" : float,
"StnPressure" : float, "SeaLevel" : float}.items():
x = line[name].strip()
line[name] = converter(x) if (x != "M") else None
station = int(line["Station"]) - 1
assert station in [0,1]
dt = line["Date"]
if dt not in weather:
weather[dt] = [None, None]
assert weather[dt][station] is None, "duplicate weather reading {0}:{1}".format(dt, station)
weather[dt][station] = line
impute_missing_weather_station_values(weather)
return weather
def load_testing(testfile):
training = []
for line in csv.DictReader(open(testfile)):
for name, converter in {"Date" : date,
"Latitude" : float, "Longitude" : float}.items():
line[name] = converter(line[name])
training.append(line)
return training
def closest_station(lat, longi):
# Chicago is small enough that we can treat coordinates as rectangular.
stations = np.array([[41.995, -87.933],
[41.786, -87.752]])
loc = np.array([lat, longi])
deltas = stations - loc[None, :]
dist2 = (deltas**2).sum(1)
return np.argmin(dist2)
def normalize(X, mean=None, std=None):
count = X.shape[1]
if mean is None:
mean = np.nanmean(X
|
, axis=0)
for i in range(count):
X[np.isnan(X[:,i]), i] = mean[i]
if std is None:
std = np.std(X, axis=0)
for i in range(
|
count):
X[:,i] = (X[:,i] - mean[i]) / std[i]
return mean, std
def scaled_count(record):
SCALE = 9.0
if "NumMosquitos" not in record:
# This is test data
return 1
return int(np.ceil(record["NumMosquitos"] / SCALE))
def assemble_X(base, weather):
X = []
for b in base:
date = b["Date"]
lat, longi = b["Latitude"], b["Longitude"]
case = [date.year, date.month, date.day, date.weekday(), lat, longi]
# Look at a selection of past weather values
for days_ago in [0,1,3,5,8,12]:
day = date - datetime.timedelta(days=days_ago)
for obs in ["Tmax","Tmin","Tavg","DewPoint","WetBulb","PrecipTotal","Depart","Sunrise","Sunset","Cool","ResultSpeed","ResultDir"]:
station = closest_station(lat, longi)
case.append(weather[day][station][obs])
# Specify which mosquitos are present
species_vector = [float(x) for x in species_map[b["Species"]]]
case.extend(species_vector)
# Weight each observation by the number of mosquitos seen. Test data
# Doesn't have this column, so in that case use 1. This accidentally
# Takes into account multiple entries that result from >50 mosquitos
# on one day.
for repeat in range(scaled_count(b)):
X.append(case)
X = np.asarray(X, dtype=np.float32)
return X
class AdjustVariable(object):
def __init__(self, variable, target, half_life=20):
self.variable = variable
self.target = target
self.half_life = half_life
def __call__(self, nn, train_history):
delta = self.variable.get_value() - self.target
delta /= 2**(1.0/self.half_life)
self.variable.set_value(np.float32(self.target + delta))
def submit(net, mean, std, testfile, weatherfile):
weather = load_weather(weatherfile)
testing = load_testing(testfile)
X = assemble_X(testing, weather)
normalize(X, mean, std)
predictions = net.predict_proba(X)[:,0]
out = csv.writer(open("submissionlasagna.tmp", "w"))
out.writerow(["Id","WnvPresent"])
for row, p in zip(testing, predictions):
out.writerow([row["Id"], p])
if __name__ == "__main__":
if len(sys.argv) == 3:
fileObject = open("modellasagne.dat",'r')
dict = pickle.load(fileObject)
fileObject.close()
submit(dict['net'], dict['mean'], dict['std'], sys.argv[1], sys.argv[2])
else:
print("The script needs 2 arguments : \n1: Test file \n2: Weather csv file \n"
"Example: python predict.py ./input/test.csv ./input/weather.csv")
|
stxnext-kindergarten/presence-analyzer-kjagodzinski
|
src/presence_analyzer/script.py
|
Python
|
mit
| 3,487
| 0
|
# -*- coding: utf-8 -*-
"""Startup utilities"""
# pylint:skip-file
import os
import sys
from functools import partial
import paste.script.command
import werkzeug.script
etc = partial(os.path.join, 'parts', 'etc')
DEPLOY_INI = etc('deploy.ini')
DEPLOY_CFG = etc('deploy.cfg')
DEBUG_INI = etc('debug.ini')
DEBUG_CFG = etc('debug.cfg')
_buildout_path = __file__
for i in range(2 + __name__.count('.')):
_buildout_path = os.path.dirname(_buildout_path)
abspath = partial(os.path.join, _buildout_path)
del _buildout_path
# bin/paster serve parts/etc/deploy.ini
def make_app(global_conf={}, config=DEPLOY_CFG, debug=False):
from presence_analyzer import app
app.config.from_pyfile(abspath(config))
app.debug = debug
return app
# bin/paster serve parts/etc/debug.ini
def make_debug(global_conf={}, **conf):
from werkzeug.debug import DebuggedApplication
app = make_app(global_conf, config=DEBUG_CFG, debug=True)
return Debugg
|
edApplication(app, evalex=True)
# bin/flask
|
-ctl shell
def make_shell():
"""
Interactive Flask Shell.
"""
from flask import request
app = make_app()
http = app.test_client()
reqctx = app.test_request_context
return locals()
def _serve(action, debug=False, dry_run=False):
"""
Build paster command from 'action' and 'debug' flag.
"""
if debug:
config = DEBUG_INI
else:
config = DEPLOY_INI
argv = ['bin/paster', 'serve', config]
if action in ('start', 'restart'):
argv += [action, '--daemon']
elif action in ('', 'fg', 'foreground'):
argv += ['--reload']
else:
argv += [action]
# Print the 'paster' command
print ' '.join(argv)
if dry_run:
return
# Configure logging and lock file
if action in ('start', 'stop', 'restart', 'status'):
argv += [
'--log-file', abspath('var', 'log', 'paster.log'),
'--pid-file', abspath('var', 'log', '.paster.pid'),
]
sys.argv = argv[:2] + [abspath(config)] + argv[3:]
# Run the 'paster' command
paste.script.command.run()
# bin/flask-ctl ...
def run():
action_shell = werkzeug.script.make_shell(make_shell, make_shell.__doc__)
# bin/flask-ctl serve [fg|start|stop|restart|status]
def action_serve(action=('a', 'start'), dry_run=False):
"""Serve the application.
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
Options:
- 'action' is one of [fg|start|stop|restart|status]
- '--dry-run' print the paster command and exit
"""
_serve(action, debug=False, dry_run=dry_run)
# bin/flask-ctl debug [fg|start|stop|restart|status]
def action_debug(action=('a', 'start'), dry_run=False):
"""
Serve the debugging application.
"""
_serve(action, debug=True, dry_run=dry_run)
# bin/flask-ctl status
def action_status(dry_run=False):
"""
Status of the application.
"""
_serve('status', dry_run=dry_run)
# bin/flask-ctl stop
def action_stop(dry_run=False):
"""
Stop the application.
"""
_serve('stop', dry_run=dry_run)
werkzeug.script.run()
def download_xml():
"""
Download xml files from stx website.
"""
import urllib
url = 'http://sargo.bolt.stxnext.pl/users.xml'
urllib.urlretrieve(url, 'runtime/data/users.xml')
|
CMSS-BCRDB/RDS
|
trove/guestagent/strategies/restore/mysql_impl.py
|
Python
|
apache-2.0
| 13,702
| 0.000146
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import glob
import os
import pexpect
import re
import tempfile
from trove.guestagent.strategies.restore import base
from trove.openstack.common import log as logging
from trove.common import exception
from trove.common import utils
import trove.guestagent.datastore.mysql.service as dbaas
from trove.common.i18n import _ # noqa
LOG = logging.getLogger(__name__)
class MySQLRestoreMixin(object):
"""Common utils for restoring MySQL databases."""
RESET_ROOT_RETRY_TIMEOUT = 100
RESET_ROOT_SLEEP_INTERVAL = 10
# Reset the root password in a single transaction with 'FLUSH PRIVILEGES'
# to ensure we never leave database wide open without 'grant tables'.
RESET_ROOT_MYSQL_COMMANDS = ("START TRANSACTION;",
"UPDATE `mysql`.`user` SET"
" `password`=PASSWORD('')"
" WHERE `user`='root';",
"FLUSH PRIVILEGES;",
"COMMIT;")
# This is a suffix MySQL appends to the file name given in
# the '--log-error' startup parameter.
_ERROR_LOG_SUFFIX = '.err'
_ERROR_MESSAGE_PATTERN = re.compile("^ERROR:\s+.+$")
def mysql_is_running(self):
try:
utils.execute_with_timeout("/usr/bin/mysqladmin", "ping")
LOG.debug("MySQL is up and running.")
return True
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return False
def mysql_is_not_running(self):
try:
utils.execute_with_timeout("/usr/bin/pgrep", "mysqld")
LOG.info("MySQL is still running.")
return False
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return True
def poll_until_then_raise(self, event, exc):
try:
utils.poll_until(event,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
except exception.PollTimeOut:
raise exc
def _start_mysqld_safe_with_init_file(self, init_file, err_log_file):
child = pexpect.spawn("sudo mysqld_safe"
" -
|
-skip-grant-tables"
" --skip-networking"
" --init-file='%s'"
" --log-error='%s'" %
(init_file.name, err_log_file.name)
)
try:
i = child.expect(['Starting mysqld daemon'])
if i == 0:
LOG.info(_("Starting MySQL"))
except pexpect.TIMEOUT:
LOG.exception(_("Got a ti
|
meout launching mysqld_safe"))
finally:
# There is a race condition here where we kill mysqld before
# the init file been executed. We need to ensure mysqld is up.
#
# mysqld_safe will start even if init-file statement(s) fail.
# We therefore also check for errors in the log file.
self.poll_until_then_raise(
self.mysql_is_running,
base.RestoreError("Reset root password failed:"
" mysqld did not start!"))
first_err_message = self._find_first_error_message(err_log_file)
if first_err_message:
raise base.RestoreError("Reset root password failed: %s"
% first_err_message)
LOG.info(_("Root password reset successfully."))
LOG.debug("Cleaning up the temp mysqld process.")
utils.execute_with_timeout("mysqladmin", "-uroot", "shutdown")
LOG.debug("Polling for shutdown to complete.")
try:
utils.poll_until(self.mysql_is_not_running,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
LOG.debug("Database successfully shutdown")
except exception.PollTimeOut:
LOG.debug("Timeout shutting down database "
"- performing killall on mysqld_safe.")
utils.execute_with_timeout("killall", "mysqld_safe",
root_helper="sudo",
run_as_root=True)
self.poll_until_then_raise(
self.mysql_is_not_running,
base.RestoreError("Reset root password failed: "
"mysqld did not stop!"))
def reset_root_password(self):
with tempfile.NamedTemporaryFile() as init_file:
utils.execute_with_timeout("sudo", "chmod", "a+r", init_file.name)
self._writelines_one_per_line(init_file,
self.RESET_ROOT_MYSQL_COMMANDS)
# Do not attempt to delete the file as the 'trove' user.
# The process writing into it may have assumed its ownership.
# Only owners can delete temporary
# files (restricted deletion).
err_log_file = tempfile.NamedTemporaryFile(
suffix=self._ERROR_LOG_SUFFIX,
delete=False)
try:
self._start_mysqld_safe_with_init_file(init_file, err_log_file)
finally:
err_log_file.close()
MySQLRestoreMixin._delete_file(err_log_file.name)
def _writelines_one_per_line(self, fp, lines):
fp.write(os.linesep.join(lines))
fp.flush()
def _find_first_error_message(self, fp):
if MySQLRestoreMixin._is_non_zero_file(fp):
return MySQLRestoreMixin._find_first_pattern_match(
fp,
self._ERROR_MESSAGE_PATTERN
)
return None
@classmethod
def _delete_file(self, file_path):
"""Force-remove a given file as root.
Do not raise an exception on failure.
"""
if os.path.isfile(file_path):
try:
utils.execute_with_timeout("rm", "-f", file_path,
run_as_root=True,
root_helper="sudo")
except Exception:
LOG.exception("Could not remove file: '%s'" % file_path)
@classmethod
def _is_non_zero_file(self, fp):
file_path = fp.name
return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0)
@classmethod
def _find_first_pattern_match(self, fp, pattern):
for line in fp:
if pattern.match(line):
return line
return None
class MySQLDump(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for MySQLDump."""
__strategy_name__ = 'mysqldump'
base_restore_cmd = 'sudo mysql'
class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for InnoBackupEx."""
__strategy_name__ = 'innobackupex'
base_restore_cmd = 'sudo xbstream -x -C %(restore_location)s'
base_prepare_cmd = ('sudo innobackupex --apply-log %(restore_location)s'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup xtrabackup 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBack
|
ecorreig/automatic_pattern
|
tests/main_tests.py
|
Python
|
gpl-2.0
| 32,788
| 0.001921
|
__author__ = 'dracks'
import unittest
import models
from Api.Manager import DataManager
import main
import dateutil.parser
import datetime
import models_tests
import mocks
class DayWeekTests(unittest.TestCase):
def test_sunday(self):
day = dateutil.parser.parse("2015-09-27")
self.assertEqual(main.day_week(day), 'sunday')
def test_monday(self):
day = dateutil.parser.parse("2015-09-21")
self.assertEqual(main.day_week(day), 'monday')
class WeekStartDateTests(unittest.TestCase):
def test_monday(self):
day = "2015-09-28"
monday = dateutil.parser.parse(day)
self.assertEqual(monday, main.week_start_date(monday))
def test_sunday(self):
monday = dateutil.parser.parse("2015-09-21")
day = dateutil.parser.parse("2015-09-27")
self.assertEqual(monday, main.week_start_date(day))
class GetFilteredTimesTests(unittest.TestCase):
def test_server_response_copy(self):
activity = models.Activity()
activity.sort = "100"
activity.times = "125,167,140,140,128,128,141,128,128,143,127,129,129,129,142,116,127,269"
session = models_tests.generate_session(activities=[activity])
deleted, words_minute = main.get_filtered_times(session)
self.assertEqual(deleted, 1)
self.assertEqual(int(words_minute*1000), 449933)
def test_activities_sort(self):
a = models.Activity()
a.sort = "20"
a.times = "1,2,3,2,1,2,3"
a2 = models.Activity()
a2.sort = "100"
a2.times = "1,1,1,1,2,2,2,2,8,10"
deleted, words_minute = main.get_filtered_times(models_tests.generate_session(activities=[a, a2]))
self.assertEqual(deleted, 2)
self.assertEqual(words_minute, 60*1000/1.5)
class GetPercentileTest(unittest.TestCase):
def setUp(self):
self.percentiles = []
this = self
self.get_filtered_times_value = (0,0)
self.get_filtered_times_last_session = None
self.mock_get_filtered_times = main.get_filtered_times
def mock_retrieve_all(model):
return this.percentiles
def mock_get_filtered_times(session):
self.get_filtered_times_last_session = session
return self.get_filtered_times_value
main.get_filtered_times = mock_get_filtered_times
DataManager.sharedManager().retrieve_all = mock_retrieve_all
self.mock_older = models.Older()
self.mock_older.group = models.Group()
mock_course = models.Course()
mock_course.id = 2
self.mock_older.group.course = mock_course
self.model_session1 = models.ModelSession()
self.model_session1.type_percentile = 1
self.model_session2 = models.ModelSession()
self.model_session2.type_percentile = 4
def tearDown(self):
main.get_filtered_times = self.mock_get_filtered_times
def test_get_not_valid(self):
session1 = models.Session()
session1.completed_time = dateutil.parser.parse('2015-08-01')
session1.model_based = self.model_session1
self.percentiles = []
self.assertIsNone(main.get_percentile(self.mock_older, session1))
def test_get_multiple_percentiles(self):
date = dateutil.parser.parse('2015-08-01')
trimester = main.get_trimester(date)
session = models.Session()
session.completed_time = date
session.model_based = self.model_session1
list_percentiles = []
|
for i in range(0, 10):
list_percentiles.append(models_tests.generate_percentile(
pk=i, seed=5, type=i * 2,
|
course=i * 3, trimester=trimester
))
self.percentiles = list_percentiles
activity1 = models.Activity()
activity1.sort = 10
activity1.words_minute = 10
activity2 = models.Activity()
activity2.sort = 20
activity2.words_minute = 19
session.list_activities = [
activity2, activity1
]
self.assertIsNone(main.get_percentile(self.mock_older, session))
session.model_based = self.model_session2
self.assertIsNone(main.get_percentile(self.mock_older, session))
self.get_filtered_times_value = (0,19)
self.mock_older.group.course.id = 6
r = main.get_percentile(self.mock_older, session)
self.assertEqual(self.get_filtered_times_last_session, session)
self.assertIsNotNone(r)
self.assertEqual(r, 20)
class GetAverageDataTest(unittest.TestCase):
def setUp(self):
self.get_percentile = main.get_percentile
def tearDown(self):
main.get_percentile = self.get_percentile
def test_no_data(self):
p, m = main.get_average_data(None, [], [])
self.assertIsNone(p)
self.assertIsNone(m)
def test_with_data(self):
self.tmp = 0
def mock_get_percentile(older, session):
self.tmp += 1
return self.tmp
main.get_percentile = mock_get_percentile
ms1 = models_tests.generate_model_session(1)
ms2 = models_tests.generate_model_session(2)
ms3 = models_tests.generate_model_session(3)
sessions = [
models_tests.generate_session(model=ms1),
models_tests.generate_session(model=ms1),
models_tests.generate_session(model=ms1)
]
p, m = main.get_average_data(None, sessions, [ms2, ms3])
self.assertIsNone(p)
self.assertIsNone(m)
sessions = [
models_tests.generate_session(model=ms1, motivation=1),
models_tests.generate_session(model=ms2),
models_tests.generate_session(model=ms3)
]
p, m = main.get_average_data(None, sessions, [ms1])
self.assertEqual(p, 1)
self.assertEqual(m, 1)
sessions = [
models_tests.generate_session(model=ms1, motivation=1),
models_tests.generate_session(model=ms2, motivation=5),
models_tests.generate_session(model=ms3)
]
p, m = main.get_average_data(None, sessions, [ms1, ms2])
self.assertEqual(p, 2.5)
self.assertEqual(m, 3)
class JumpTests(unittest.TestCase):
def setUp(self):
def generate_sessions():
list_mock_sessions = []
for level in range(1, 5):
list_mock_sessions.extend([
models_tests.generate_block_session(level * 5, level=level, session=models.ModelSession(),
order=10),
models_tests.generate_block_session(level * 5 + 1, level=level, session=models.ModelSession(),
order=20),
models_tests.generate_block_session(level * 5 + 2, level=level, session=models.ModelSession(),
order=30),
])
return list_mock_sessions
self.configuration = models.OlderConfig()
self.b1 = models_tests.generate_block(order=10, sessions=generate_sessions())
self.b2 = models_tests.generate_block(order=20, sessions=generate_sessions())
self.b3 = models_tests.generate_block(order=30, sessions=generate_sessions())
self.configuration.pattern = models_tests.generate_pattern(blocks=[self.b1, self.b3, self.b2])
def test_not_repeat(self):
self.configuration.block = self.b1
bj = models.BlockJumpDefault()
bj.repeatBlock = False
bj.nextLevel = 2
main.jump(self.configuration, bj)
self.assertEqual(self.configuration.block, self.b2)
self.assertEqual(self.configuration.level, 2)
sessions = filter(lambda e: e.level == 2, self.b2.sessions)
self.assertEqual(self.configuration.session, sessions[0])
def test_repeat(self):
bj = models.BlockJumpDefault()
bj.repeatBlock = True
bj.nextLevel = 3
self.configuration.block = self.b1
main.jump(self.configuration, bj)
self.assertEqual(self.configuration.block, self.b1)
self.assertEqual(self
|
elego/tkobr-addons
|
unported/tko_purchase_show_only_supplier_products/__init__.py
|
Python
|
agpl-3.0
| 1,094
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-
|
2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero
|
General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase
|
fergalmoran/dss
|
spa/migrations/0021_auto__add_activitylike.py
|
Python
|
bsd-2-clause
| 16,207
| 0.007836
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ActivityLike'
#db.delete_table(u'spa_activitylike')
db.create_table(u'spa_activitylike', (
(u'activity_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['spa.Activity'], unique=True, primary_key=True)),
('mix', self.gf('django.db.models.fields.related.ForeignKey')(related_name='likes', to=orm['spa.Mix'])),
))
db.send_create_signal('spa', ['ActivityLike'])
def backwards(self, orm):
# Deleting model 'ActivityLike'
db.delete_table(u'spa_activitylike')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.activity': {
'Meta': {'object_name': 'Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']", 'null': 'True', 'blank': 'True'})
},
'spa.activityfavourite': {
'Meta': {'object_name': 'ActivityFavourite', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favourites'", 'to': "orm['spa.Mix']"})
},
'spa.activitylike': {
'Meta': {'object_name': 'ActivityLike', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'likes'", 'to': "orm['spa.Mix']"})
},
'spa.activitymix': {
'Meta': {'object_name': 'ActivityMix', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.activityplay': {
'Meta': {'object_name': 'ActivityPlay', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plays'", 'to': "orm['spa.Mix']"})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name':
|
"'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.relate
|
d.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'spa.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attendees'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'event_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'event_description': ('tinymce.views.HTMLField', [], {}),
'event_recurrence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Recurrence']"}),
'event_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'event_title': ('django.db.models.fields.CharField',
|
scorphus/authomatic
|
tests/functional_tests/expected_values/yammer.py
|
Python
|
mit
| 4,202
| 0.000714
|
from datetime import datetime
import fixtures
import constants
from authomatic.providers import oauth2
conf = fixtures.get_configuration('yammer')
LINK = 'https://www.yammer.com/peterhudec.com/users/{0}'\
.format(conf.user_username)
# Yammer allows users to only set month and day of their birth day.
# The year is always 1900.
BD = datetime.strptime(conf.user_birth_date, '%x')
BIRTH_DATE = datetime(1900, BD.month, BD.day).strftime('%x')
CONFIG = {
'login_xpath': '//*[@id="login"]',
'password_xpath': '//*[@id="password"]',
'consent_xpaths': [
'//*[@id="login-form"]/fieldset[2]/p[2]/button',
'//*[@id="oauth2-authorize"]/div[3]/div[3]/form/input[1]',
],
'class_': oauth2.Yammer,
'scope': oauth2.Yammer.user_info_scope,
'user': {
'birth_date': BIRTH_DATE,
'city': conf.user_city,
'country': conf.user_country,
'email': conf.user_email,
'first_name': conf.user_first_name,
'gender': None,
'id': conf.user_id,
'last_name': conf.user_last_name,
'link': LINK,
'locale': conf.user_locale,
'name': conf.user_name,
'nickname': None,
'phone': conf.user_phone,
'picture': conf.user_picture,
'postal_code': None,
'timezone': conf.user_timezone,
'username': conf.user_username,
},
'content_should_contain': [
conf.user_city,
conf.user_country,
conf.user_email,
conf.user_first_name,
conf.user_id,
conf.user_last_name,
LINK,
conf.user_locale,
conf.user_name,
conf.user_phone,
conf.user_picture,
conf.user_timezone.replace('&', '\\u0026'),
conf.user_username,
# User info JSON keys
'type', 'id', 'network_id', 'state', 'guid', 'job_title', 'location',
'significant_other', 'kids_names', 'interests', 'summary', 'expertise',
'full_name', 'activated_at', 'show_ask_for_photo', 'first_name',
'last_name', 'network_name', 'network_domains', 'url', 'web_url',
'name', 'mugshot_url', 'mugshot_url_template', 'birth_date', 'timezone',
'external_urls', 'admin', 'verified_admin', 'can_broadcast',
'department', 'email', 'can_create_new_network',
'can_browse_external_networks', 'previous_companies', 'schools',
'contact', 'im', 'provider', 'user
|
name', 'phone_numbers', 'number',
'email_addresses', 'address', 'has_fake_email', 'stats', 'following',
'followers', 'updates', 'settings', 'xdr_proxy', 'web_preferences',
'absolute_timestamps', 'threaded_mode', 'network_settings',
'message_prompt', 'allow_attachments', 'show_c
|
ommunities_directory',
'enable_groups', 'allow_yammer_apps', 'admin_can_delete_messages',
'allow_inline_document_view', 'allow_inline_video',
'enable_private_messages', 'allow_external_sharing', 'enable_chat',
'home_tabs', 'select_name', 'feed_description', 'ordering_index',
'enter_does_not_submit_message', 'preferred_my_feed',
'prescribed_my_feed', 'sticky_my_feed', 'dismissed_feed_tooltip',
'dismissed_group_tooltip', 'dismissed_profile_prompt',
'dismissed_invite_tooltip', 'dismissed_apps_tooltip',
'dismissed_invite_tooltip_at', 'dismissed_browser_lifecycle_banner',
'make_yammer_homepage', 'locale', 'yammer_now_app_id', 'has_yammer_now',
'has_mobile_client', 'follow_general_messages'
],
# Case insensitive
'content_should_not_contain':
conf.no_gender +
conf.no_nickname +
conf.no_postal_code,
# True means that any thruthy value is expected
'credentials': {
'token_type': 'Bearer',
'provider_type_id': '2-15',
'_expiration_time': None,
'consumer_key': None,
'provider_id': None,
'consumer_secret': None,
'token': True,
'token_secret': None,
'_expire_in': True,
'provider_name': 'yammer',
'refresh_token': None,
'provider_type': 'authomatic.providers.oauth2.OAuth2',
'refresh_status': constants.CREDENTIALS_REFRESH_NOT_SUPPORTED,
},
}
|
vlukes/sfepy
|
tests/test_dg_input_advection.py
|
Python
|
bsd-3-clause
| 187
| 0.026738
|
from __future__ i
|
mport absolute_import
input_name = '../examples/dg/advection_2D.py'
output_name = 'advection_sol.msh'
from tests_basic i
|
mport TestInput
class Test( TestInput ):
pass
|
mrcl/HakketyYaks
|
run.py
|
Python
|
mit
| 94
| 0
|
#!/usr/bin/python2.7
from app import app
|
if __name__ == '__main__':
app.run(debug=Tr
|
ue)
|
pjdufour/slackbot-osm
|
slackbotosm/settings.py
|
Python
|
bsd-3-clause
| 101
| 0
|
import os
DE
|
BUG = False
try:
from l
|
ocal_settings import * # noqa
except ImportError:
pass
|
IntelLabs/hpat
|
sdc/tests/test_hiframes.py
|
Python
|
bsd-2-clause
| 29,355
| 0.000477
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import numba
import numpy as np
import os
import pandas as pd
import pyarrow.parquet as pq
import random
import string
import unittest
from numba import types
import sdc
from sdc import hiframes
from sdc.str_arr_ext import StringArray
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (count_array_OneDs,
count_array_REPs,
count_parfor_OneDs,
count_parfor_REPs,
dist_IR_contains,
get_start_end,
skip_numba_jit,
skip_sdc_jit)
class TestHiFrames(TestCase):
@skip_numba_jit
def test_column_list_select2(self):
# make sure SDC copies the columns like Pandas does
def test_impl(df):
df2 = df[['A']]
df2['A'] += 10
return df2.A, df.A
hpat_func = self.jit(test_impl)
n = 11
df = pd.DataFrame(
{'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
np.testing.assert_array_equal(hpat_func(df.copy())[1], test_impl(df)[1])
@skip_numba_jit
def test_pd_DataFrame_from_series_par(self):
def test_impl(n):
S1 = pd.Series(np.ones(n))
S2 = pd.Series(np.random.ranf(n))
df = pd.DataFrame({'A': S1, 'B': S2})
return df.A.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
@skip_numba_jit
def test_getitem_bool_series(self):
def test_impl(df):
return df['A'][df['B']].values
hpat_func = self.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
np.testing.assert_array_equal(test_impl(df), hpat_func(df))
@skip_numba_jit
def test_fillna(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
B = df.A.fillna(5.0)
return B.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_fillna_inplace(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
df.A.fillna(5.0, inplace=True)
return df.A.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_column_mean(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
return df.A.mean()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_column_var(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.var()
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
@skip_numba_jit
def test_column_std(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.std()
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
@skip_numba_jit
def test_column_map(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
df['B'] = df.A.map(lambda a: 2 * a)
return df.B.sum()
n = 121
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_column_map_arg(self):
def test_impl(df):
df['B'] = df.A.map(lambda a: 2 * a)
return
n = 121
df1 = pd.DataFrame({'A': np.arange(n)})
df2 = pd.DataFrame({'A': np.arange(n)})
hpat_func = self.jit(test_impl)
hpa
|
t_func(df1)
self.assertTrue(hasattr(df1, 'B'))
test_impl(df2)
np.testing.assert_equal(df1.B.values, df2.B.values)
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequ
|
ential transport layer')
def test_cumsum(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df.A.cumsum()
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_array_OneDs(), 2)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 2)
self.assertTrue(dist_IR_contains('dist_cumsum'))
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_column_distribution(self):
# make sure all column calls are distributed
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df.A.fillna(5.0, inplace=True)
DF = df.A.fillna(5.0)
s = DF.sum()
m = df.A.mean()
v = df.A.var()
t = df.A.std()
Ac = df.A.cumsum()
return Ac.sum() + s + m + v + t
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(dist_IR_contains('dist_cumsum'))
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_quantile_parallel(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_parallel_float_nan(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32)})
df.A[0:100] = np.nan
df.A[200:331] = np.nan
return d
|
favll/pogom
|
pogom/pgoapi/protos/POGOProtos/Settings/Master/EquippedBadgeSettings_pb2.py
|
Python
|
mit
| 3,197
| 0.006569
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/EquippedBadgeSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/EquippedBadgeSettings.proto',
package='POGOProtos.Settings.Master',
syntax='proto3',
serialized_pb=_b('\n6POGOProtos/Settings/Master/EquippedBadgeSettings.proto\x12\x1aPOGOProtos.Sett
|
ings.Master\"y\n\x15\x45quippedBadgeSettings\x12\x1f\n\x17\x65quip_badge_cooldo
|
wn_ms\x18\x01 \x01(\x03\x12\x1f\n\x17\x63\x61tch_probability_bonus\x18\x02 \x03(\x02\x12\x1e\n\x16\x66lee_probability_bonus\x18\x03 \x03(\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EQUIPPEDBADGESETTINGS = _descriptor.Descriptor(
name='EquippedBadgeSettings',
full_name='POGOProtos.Settings.Master.EquippedBadgeSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='equip_badge_cooldown_ms', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.equip_badge_cooldown_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='catch_probability_bonus', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.catch_probability_bonus', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flee_probability_bonus', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.flee_probability_bonus', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=207,
)
DESCRIPTOR.message_types_by_name['EquippedBadgeSettings'] = _EQUIPPEDBADGESETTINGS
EquippedBadgeSettings = _reflection.GeneratedProtocolMessageType('EquippedBadgeSettings', (_message.Message,), dict(
DESCRIPTOR = _EQUIPPEDBADGESETTINGS,
__module__ = 'POGOProtos.Settings.Master.EquippedBadgeSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.EquippedBadgeSettings)
))
_sym_db.RegisterMessage(EquippedBadgeSettings)
# @@protoc_insertion_point(module_scope)
|
astropy/astropy-helpers
|
astropy_helpers/commands/test.py
|
Python
|
bsd-3-clause
| 1,466
| 0.000682
|
"""
Different implementations of the ``./setup.py test`` command depending on
what's locally available.
If Astropy v1.1 or later is available it should be possible to import
AstropyTest from ``astropy.tests.command``. Otherwise there is a skeleton
implementation that allows users to at least discover the ``./setup.py test``
command and learn that they need Astropy to run it.
"""
import os
from ..utils import import_file
# Previously these except statements caught o
|
nly ImportErrors, but there are
# some other obscure exceptional conditions that can occur when importing
# astropy.tests (at least on older versions) that can cause these imports to
# fail
try:
# If we are testing astropy itself, we need to use import_file to avoid
# actually importing astropy (just the file we need).
command_file = os.path.join('astropy', 'tests', 'command.py')
if os.path.exists(command_file)
|
:
AstropyTest = import_file(command_file, 'astropy_tests_command').AstropyTest
else:
import astropy # noqa
from astropy.tests.command import AstropyTest
except Exception:
# No astropy at all--provide the dummy implementation
from ._dummy import _DummyCommand
class AstropyTest(_DummyCommand):
command_name = 'test'
description = 'Run the tests for this package'
error_msg = (
"The 'test' command requires the astropy package to be "
"installed and importable.")
|
quantifiedcode/checkmate
|
checkmate/contrib/plugins/python/pep8/setup.py
|
Python
|
mit
| 266
| 0.018797
|
from .analyzer import Pep8Analyzer
from .issues_
|
data import issues_data
analyzers = {
'pep8' :
{
'title' : 'Pep-8',
'class' : Pep8Analyzer,
'language' : 'python',
'issues_data' : issues_data,
|
},
}
|
dianvaltodorov/happy-commas
|
db_migrate.py
|
Python
|
mit
| 963
| 0
|
#!env/bin/python
"""Migrate the database"""
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,
|
SQLALCHEMY_MIGRATE_REPO,
|
tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
lowRISC/fusesoc
|
tests/test_edalizer.py
|
Python
|
gpl-3.0
| 1,641
| 0.001219
|
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
def test_generators():
import os
import tempfile
from fusesoc.config import Config
from fusesoc.coremanager import CoreManager
from fusesoc.edalizer import Edalizer
from fusesoc.librarymanager import Library
from fusesoc.vlnv import Vlnv
tests_dir = os.path.dirname(__file__)
cores_dir = os.path.join(tests_dir, "capi2_cores", "misc", "generate")
lib = Library("edalizer", cores_dir)
cm = CoreManager(Config())
cm.add_library(lib)
core = cm.get_core(Vlnv("::generate"))
build_root = tempfile.mkdtemp(prefix="export_")
cache_root = tempfile.mkdtemp(prefix="export_cache_")
export_root = os.path.join(build_root, "exported_files")
edalizer = Edalizer(
toplevel=core.name,
flags={"tool": "icarus"},
core_manager=cm,
cache_root=cache_root,
work_root=os.path.join(build_root, "work"),
export_root=export_root,
system_name=None,
)
edalizer.run()
gendir = os.path.j
|
oin(
cache_root, "generated", "generat
|
e-testgenerate_without_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_without_params_input.yml"))
gendir = os.path.join(
cache_root, "generated", "generate-testgenerate_with_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_with_params_input.yml"))
|
yaohongkok/py-one-chat
|
onechat/client.py
|
Python
|
mit
| 747
| 0.034806
|
from ConfigParser import SafeConfigParser
import requests
class Client(object):
def __init__(self, configFileName):
self.config = SafeConfigParser()
self.config.read(configFileName)
def send(self, recepient, message):
i
|
f message.platform == "facebook":
return self.sendFacebookMessage(recepient, message)
else:
raise Exception('Unknown Message\' Platform')
def sendFacebookMessage(self,recepient, message):
token = self.config.get('onechat','FACEBOOK_MESSENGER_TOKEN')
sendUrl = "https://graph.facebook.com/v2.6/me/messages?access_token=" + token
try:
response = requests.post(sendUrl, json=message.toPayload(recepient))
return response
except Exception:
print "Faile
|
d"
return "Failed sending request"
|
dagoaty/eve-wspace
|
evewspace/account/views.py
|
Python
|
gpl-3.0
| 3,492
| 0.002864
|
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it
|
will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
|
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from account.models import RegistrationForm
from account.utils import *
from account.forms import EditProfileForm
from django.contrib.auth.models import User
from django.template.response import TemplateResponse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from django.forms.util import ErrorList
# Create your views here.
def register(request):
if request.method == "POST":
form = RegistrationForm(request.POST)
if form.is_valid() == True:
# Enforce ACCOUNT_REQUIRE_REG_CODE
if settings.ACCOUNT_REQUIRE_REG_CODE:
if len(get_groups_for_code(form.cleaned_data['regcode'])) != 0:
newUser = form.save()
newUser.email = form.cleaned_data['email']
newUser.save()
register_groups(newUser, form.cleaned_data['regcode'])
return HttpResponseRedirect(reverse('login'))
else:
form._errors['regcode'] = ErrorList([u'Invalid Registration Code.'])
else:
newUser = form.save()
newUser.email = form.cleaned_data['email']
newUser.save()
register_groups(newUser, form.cleaned_data['regcode'])
return HttpResponseRedirect(reverse('login'))
else:
form = RegistrationForm()
context = {'form': form}
return TemplateResponse(request, "register.html", context)
def edit_profile(request):
if request.method == "POST":
form = EditProfileForm(request.POST)
if form.is_valid():
if not request.user.check_password(form.cleaned_data['password']):
form._errors['password'] = ErrorList([u'The password you entered is incorrect.'])
else:
request.user.email = form.cleaned_data['email']
if form.cleaned_data['password1']:
request.user.set_password(form.cleaned_data['password1'])
request.user.save()
return HttpResponseRedirect('/settings/')
else:
form = EditProfileForm()
form.fields['email'].initial = request.user.email
return TemplateResponse(request, "edit_profile_form.html",
{'form': form})
def password_reset_confirm(*args, **kwargs):
from django.contrib.auth import views
return views.password_reset_confirm(*args, post_reset_redirect=reverse('login'),
template_name='password_reset_confirm.html',
**kwargs)
|
tschmorleiz/amcat
|
amcat/scripts/actions/deduplicate.py
|
Python
|
agpl-3.0
| 6,399
| 0.003907
|
#!/usr/bin/python
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import logging; log = logging.getLogger(__name__)
import collections
import itertools
import pprint
from django import forms
from django.forms import widgets
from django.core.exceptions import ValidationError
from amcat.scripts.script import Script
from amcat.models import ArticleSet
try:
import Levenshtein
except ImportError:
Levenshtein = None
log.error("Levenshtein module not installed. Deduplicate cannot be used.")
class Deduplicate(Script):
"""
Deduplicate articles using two articlesets. For all duplicated articles
the articles in set 2 will be removed.
"""
def __init__(self, *args, **kwargs):
super(Deduplicate, self).__init__(*args, **kwargs)
self._articles_cache_contains = None
self._articles_cache = None
class options_form(forms.Form):
articleset_1 = forms.ModelChoiceField(queryset=ArticleSet.objects.all())
articleset_2 = forms.ModelChoiceField(queryset=ArticleSet.objects.all())
dry_run = forms.BooleanField(initial=False, required=False)
text_ratio = forms.IntegerField(initial=99, help_text="Match articles which text match ..%%")
headline_ratio = forms.IntegerField(initial=80, help_text="Compare articles which headlines match ..%%")
delete_same = forms.BooleanField(initial=False, required=False, help_text="Remove articles with same id's")
skip_simple = forms.BooleanField(initial=False, required=False, help_text="Do not use an approximation of levenhstein ratio")
def clean_ratio(self, ratio):
if not (0 <= self.cleaned_data[ratio] <= 100):
raise ValidationError("{}: give a percentage. For example: 20.".format(ratio))
return self.cleaned_data[ratio] / 100.0
def clean_text_ratio(self):
return self.clean_ratio("text_ratio")
def clean_headline_ratio(self):
return self.clean_ratio("headline_ratio")
def get_matching(self, compare_with, article, ratio, prop):
return (ca for ca in compare_with if Levenshtein.ratio(
getattr(article, prop), getattr(ca, prop)) >= ratio)
def get_simple_levenhstein(self, articles, article, text_ratio):
text_length = len(article.text)
min_length = text_ratio * text_length
max_length = ((1 - text_ratio) + 1) * text_length
for comp_article in articles:
if min_length <= len(comp_article.text) <= max_length:
yield comp_article
def get_articles(self, articleset, article, text_ratio):
medium_id, date = article.medium_id, article.date
# Same medium / date since previous call?
if not self._articles_cache_contains == (medium_id, date):
# Fill cache
self._articles_cache_contains = (medium_id, date)
self._articles_cache = articleset.articles.filter(date=date, medium__id=medium_id)
self._articles_cache = self._articles_cache.only("id", "text", "headline")
return self._articles_cache
def _get_deduplicates(self, articleset_1, articleset_2, text_ratio, headline_ratio, skip_simple, delete_same):
log.info("Start deduplicating ({articleset_1}, {articleset_2})..".format(**locals()))
all_articles = articleset_1.articles.only("id", "date", "medium", "text", "headline")
n_articles = all_articles.count()
articles = all_articles.order_by("medium",
|
"date")
for i, article in enumerate(articles.iterator(), start=1):
if not i % 100 or i == n_articles:
log.info("Checking article {i} of {n_articles}".format(**locals()))
compare_with = self.get_articles(articleset_2, article, text_ratio)
if not skip_simple:
compare_with = self.get_simple_levenhstein(compare_wit
|
h, article, text_ratio)
compare_with = self.get_matching(compare_with, article, headline_ratio, "headline")
compare_with = set(self.get_matching(compare_with, article, text_ratio, "text"))
if not delete_same:
discard = None
for a in compare_with:
if a.id == article.id:
discard = a
compare_with.discard(discard)
if compare_with:
yield (article, compare_with)
def _run(self, dry_run, articleset_2, **kwargs):
duplicates = collections.defaultdict(list)
for art, dupes in self._get_deduplicates(articleset_2=articleset_2, **kwargs):
for dupe in dupes:
duplicates[art].append(dupe)
if not dry_run:
articleset_2.articles.through.objects.filter(articleset=articleset_2,
article__in=itertools.chain.from_iterable(duplicates.values())
).delete()
else:
pprint.pprint(dict(duplicates))
return duplicates
if __name__ == '__main__':
from amcat.scripts.tools import cli
cli.run_cli()
|
zstackio/zstack-woodpecker
|
integrationtest/vm/virtualrouter/test_update_vm_cpu_memory.py
|
Python
|
apache-2.0
| 2,076
| 0.003372
|
'''
Test change cpu and memory configuration when VM is running
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
#import zstackwoodpecker.operations
|
.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Test update instance offering')
vm = test_stub.create_basic_vm()
|
instance_offering = test_lib.lib_get_instance_offering_by_uuid(vm.get_vm().instanceOfferingUuid)
test_obj_dict.add_vm(vm)
vm_ops.update_vm(vm.get_vm().uuid, instance_offering.cpuNum * 2, None)
vm_ops.update_vm(vm.get_vm().uuid, None, instance_offering.memorySize * 2)
vm.update()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum is expected to change")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize is expected to change")
vm.stop()
vm.update()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum is expected to change")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize is expected to change")
vm.start()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum change is expected to take effect after Vm restart")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize change is expected to take effect after Vm restart")
vm.check()
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Test update instance cpu memory Pass')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/streamtube/starts/_x.py
|
Python
|
mit
| 391
| 0.002558
|
import _plotly_utils.ba
|
sevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="x", parent_name="streamtube.starts", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**
|
kwargs
)
|
jody-frankowski/ansible
|
test/units/TestModuleUtilsBasic.py
|
Python
|
gpl-3.0
| 13,044
| 0.003527
|
import os
import tempfile
import unittest
from nose.tools import raises
from nose.tools import timed
from ansible import errors
from ansible.module_common import ModuleReplacer
from ansible.utils import checksum as utils_checksum
TEST_MODULE_DATA = """
from ansible.module_utils.basic import *
def get_module():
return AnsibleModule(
argument_spec = dict(),
supports_check_mode = True,
no_log = True,
)
get_module()
"""
class TestModuleUtilsBasic(unittest.TestCase):
def cleanup_temp_file(self, fd, path):
try:
os.close(fd)
os.remove(path)
except:
pass
def cleanup_temp_dir(self, path):
try:
os.rmdir(path)
except:
pass
def setUp(self):
# create a temporary file for the test module
# we're about to generate
self.tmp_fd, self.tmp_path = tempfile.mkstemp()
os.write(self.tmp_fd, TEST_MODULE_DATA)
# template the module code and eval it
module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
d = {}
exec(module_data, d, d)
self.module = d['get_module']()
# module_utils/basic.py screws with CWD, let's save it and reset
self.cwd = os.getcwd()
def tearDown(self):
self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
# Reset CWD back to what it was before basic.py changed it
os.chdir(self.cwd)
#################################################################################
# run_command() tests
# test run_command with a string command
def test_run_command_string(self):
(rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'")
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
(rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with an array of args (with both use_unsafe_shell=True|False)
def test_run_command_args(self):
(rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"])
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
(rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with leading environment variables
@raises(SystemExit)
def test_run_command_string_with_env_variables(self):
self.module.run_command('FOO=bar /bin/echo -n "foo bar"')
@raises(SystemExit)
def test_run_command_args_with_env_variables(self):
self.module.run_command(['FOO=bar', '/bin/echo', '-n', 'foo bar'])
def test_run_command_string_unsafe_with_env_variables(self):
(rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with a command pipe (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_pipe(self):
(rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with a shell redirect in (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_redirect_in(self):
(rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with a shell redirect out (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_redirect_out(self):
tmp_fd, tmp_path = tempfile.mkstemp()
try:
(rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
checksum = utils_checksum(tmp_path)
self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
except:
raise
finally:
self.cleanup_temp_file(tmp_fd, tmp_path)
# test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_double_redirect_out(self):
tmp_fd, tmp_path = tempfile.mkstemp()
try:
(rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
checksum = utils_checksum(tmp_path)
self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
except:
raise
finally:
self.cleanup_temp_file(tmp_fd, tmp_path)
# test run_command with data
def test_run_command_string_with_data(self):
(rc, out, err) = self.module.run_command('cat', data='foo bar')
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with binary data
def test_run_command_string_with_binary_data(self):
(rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'ABCD')
# test run_command with a cwd set
def test_run_command_string_with_cwd(self):
tmp_path = tempfile.mkdtemp()
try:
(rc, out, err) = self.module.run_command('pwd', cwd=tmp_path)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
self.assertEqual(out.strip(), os.path.realpath(tmp_path))
except:
raise
finally:
self.cleanup_temp_dir(tmp_path)
class TestModuleUtilsBasicHelpers(unittest.TestCase):
''' Test some implementation details of AnsibleModule
Some pieces of AnsibleModule are implementation details but they have
potential cornercases that we need to check. Go ahead and test at
this level that the functions are behaving even though their API may
change and we'd have to rewrite these tests so that we know that we
need to check for those problems in any rewrite.
In the future we might want to restructure higher level code to be
|
friendlier to unittests so that we can test at the level that the public
is interacting with the APIs.
'''
MANY_RECORDS = 7000
URL_SECRET = 'http://username:pas:word@foo.com/data'
SSH_SECRET = 'username:pas:word@foo.com/data'
def cleanup_temp_file(self, fd, path):
try:
os.close(fd
|
)
os.remove(path)
except:
pass
def cleanup_temp_dir(self, path):
try:
os.rmdir(path)
except:
pass
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {'host%s' % i:
{'pstack':
{'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']},
}}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def setUp(self):
self.many_url = repr(self._gen_data(self.MANY_RECORDS, True, True,
self.URL_SECRET))
self.many_ssh = repr(self._gen_data(self.MANY_RECORDS, True, True,
self.SSH_SECRET))
self.one_url = repr(self._gen_data(self.MANY_RECORDS, False, True,
self.URL_SECRET))
self.one_ssh = repr(self._gen_data(self.MANY_RECORDS, False, True,
self.SSH_SECRET))
|
andrewgodwin/django-channels
|
channels/staticfiles.py
|
Python
|
bsd-3-clause
| 2,778
| 0.00108
|
from urllib.parse import urlparse
from urllib.request import url2pathname
from django.conf import settings
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
from django.http import Http404
from .http import AsgiHandler
class StaticFilesWrapper:
"""
ASGI application which wraps another and intercepts requests for static
files, passing them off to Django's static file serving.
"""
def __init__(self, application, staticfiles_handler=None):
self.application = application
self.staticfiles_handler_class = staticfiles_handler or StaticFilesHandler
self.base_url = urlparse(self.get_base_url())
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the static files path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
async def __call__(self, scope, receive, send):
# Only even look at HTTP requests
if scope["type"] == "http" and self._should_handle(scope["path"]):
# Serve static content
return await self.staticfiles_handler_class()(
dict(
|
scope, static_base_url=self.base_url), receive, send
)
# Hand off to the main app
return await self.application(scope, receive, send)
class StaticFilesHandler(AsgiHandler):
"""
Subclass of AsgiHandler that serves directly from its get_response.
"""
# TODO: Review hierarchy here. Do we NEED to inherit BaseHandler, AsgiHandler?
async def __call__(self, scope, receive, send):
self.static_base_url
|
= scope["static_base_url"][2]
return await super().__call__(scope, receive, send)
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.static_base_url) :]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
"""
Always tries to serve a static file as you don't even get into this
handler subclass without the wrapper directing you here.
"""
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
|
astrobin/astrobin
|
astrobin/forms/solar_system_acquisition_form.py
|
Python
|
agpl-3.0
| 1,528
| 0.003272
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from astrobin.models import SolarSystem_Acquisition
class SolarSystem_AcquisitionForm(forms.ModelForm):
error_css_class = 'error'
date = forms.DateField(
required=False,
input_formats=['%Y-%m-%d'],
widget=forms.TextInput(attrs={'class': 'datepickerclass', 'autocomplete': 'off'}),
help_text=_("Please use the following format: yyyy-mm-dd"),
label=_("Date"),
)
def clean_seeing(self):
data = self.cleaned_data['seeing']
if data and data not in list(range(1, 6)):
raise forms.ValidationError(_("Please enter a value between 1 and 5."))
return data
def clean_transparency
|
(self):
data = self.cleaned_data['transparency']
if dat
|
a and data not in list(range(1, 11)):
raise forms.ValidationError(_("Please enter a value between 1 and 10."))
return data
class Meta:
model = SolarSystem_Acquisition
fields = (
'date',
'time',
'frames',
'fps',
'exposure_per_frame',
'focal_length',
'cmi',
'cmii',
'cmiii',
'seeing',
'transparency',
)
widgets = {
'date': forms.TextInput(attrs={'class': 'datepickerclass', 'autocomplete': 'off'}),
'time': forms.TextInput(attrs={'class': 'timepickerclass', 'autocomplete': 'off'}),
}
|
asterix135/infonex_crm
|
marketing/migrations/0006_auto_20181221_1059.py
|
Python
|
mit
| 594
| 0
|
# Generated by Django 2.0.3 on 2018-12-21 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketing', '0005_auto_20180123_1157'),
]
operati
|
ons = [
migrations.AlterField(
model_name='uploadedcell',
name='content',
field=models.TextField
|
(blank=True, default=''),
),
migrations.AlterField(
model_name='uploadedrow',
name='error_message',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
b-jesch/service.fritzbox.callmonitor
|
resources/lib/PhoneBooks/pyicloud/vendorlibs/keyring/util/escape.py
|
Python
|
gpl-2.0
| 1,497
| 0.004008
|
"""
escape/unescape routines available for backends which need
alphanumeric usernames, services, or other values
"""
import re
import string
import sys
# True if we are running on Python 3.
# taken from six.py
PY3 = sys.version_info[0] == 3
# allow use of unicode literals
if PY3:
def u(s):
return s
def _unichr(c):
return chr(c)
|
else:
def u(s):
return unicode(s, "unicode_escape")
def _unichr(c):
return unichr(c)
LEGAL_CHARS = (
getattr(string, 'letters', None) # Py
|
thon 2
or getattr(string, 'ascii_letters') # Python 3
) + string.digits
ESCAPE_FMT = "_%02X"
def _escape_char(c):
"Single char escape. Return the char, escaped if not already legal"
if isinstance(c, int):
c = _unichr(c)
return c if c in LEGAL_CHARS else ESCAPE_FMT % ord(c)
def escape(value):
"""
Escapes given string so the result consists of alphanumeric chars and
underscore only.
"""
return "".join(_escape_char(c) for c in value.encode('utf-8'))
def _unescape_code(regex_match):
ordinal = int(regex_match.group('code'), 16)
if sys.version_info >= (3,):
return bytes([ordinal])
return chr(ordinal)
def unescape(value):
"""
Inverse of escape.
"""
re_esc = re.compile(
# the pattern must be bytes to operate on bytes
ESCAPE_FMT.replace('%02X', '(?P<code>[0-9A-F]{2})').encode('ascii')
)
return re_esc.sub(_unescape_code, value.encode('ascii')).decode('utf-8')
|
abhishekkr/nix-bootstrapper
|
commands/command_plugins/password/__init__.py
|
Python
|
apache-2.0
| 5,851
| 0.000855
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
password commands plugin
"""
import base64
import binascii
import os
import subprocess
from Crypto.Cipher import AES
import crypt
import commands
# This is to support older python versions that don't have hashlib
try:
import hashlib
except ImportError:
import md5
class hashlib(object):
"""Fake hashlib module as a class"""
@staticmethod
def md5():
return md5.new()
class PasswordError(Exception):
"""
Class for password command exceptions
"""
def __init__(self, response):
# Should be a (ResponseCode, ResponseMessage) tuple
self.response = response
def __str__(self):
return "%s: %s" % self.response
def get_response(self):
return self.response
def _make_salt(length):
"""Create a salt of appropriate length"""
salt_chars = 'abcdefghijklmnopqrstuvwxyz'
salt_chars += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
salt_chars += '0123456789./'
rand_data = os.urandom(length)
salt = ''
for c in rand_data:
salt += salt_chars[ord(c) % len(salt_chars)]
return salt
def _create_temp_password_file(user, password, filename):
"""Read original passwd file, generating a new temporary file.
Returns: The temporary filename
"""
with open(filename) as f:
file_data = f.readlines()
stat_info = os.stat(filename)
tmpfile = '%s.tmp.%d' % (filename, os.getpid())
# We have to use os.open() so that we can create the file with
# the appropriate modes. If we create it and set modes later,
# there's a small point of time where a non-root user could
# potentially open the file and wait for data to be written.
fd = os.open(tmpfile,
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
stat_info.st_mode)
f = None
success = False
try:
os.chown(tmpfile, stat_info.st_uid, stat_info.st_gid)
f = os.fdopen(fd, 'w')
for line in file_data:
if line.startswith('#'):
f.write(line)
continue
try:
(s_user, s_password, s_rest) = line.split(':', 2)
except ValueError:
f.write(line)
continue
if s_user != user:
f.write(line)
continue
if s_password.startswith('$'):
# Format is '$ID$SALT$HASH' where ID defines the
# ecnryption type. We'll re-use that, and make a salt
# that's the same size as the old
salt_data = s_password[1:].split('$')
salt = '$%s$%s$' % (salt_data[0],
_make_salt(len(salt_data[1])))
else:
# Default to MD5 as a minimum level of compatibility
salt = '$1$%s$' % _make_salt(8)
enc_pass = crypt.crypt(password, salt)
f.write("%s:%s:%s" % (s_user, enc_pass, s_rest))
f.close()
f = None
success = True
except Exception, e:
logging.error("Couldn't create temporary password file: %s" % str(e))
raise
finally:
if not success:
# Close the file if it's open
if f:
try:
os.unlink(tmpfile)
except Exception:
pass
# Make sure to unlink the tmpfile
try:
os.unlink(tmpfile)
except Exception:
pass
return tmpfile
def set_password(user, password):
"""Set the password for a particular user"""
INVALID = 0
PWD_MKDB = 1
RENAME = 2
files_to_try = {'/etc/shadow': RENAME,
'/etc/master.passwd': PWD_MKDB}
for filename, ftype in files_to_try.iteritems():
if not os.path.exists(filename):
continue
tmpfile = _create_temp_password_file(user, password, filename)
if ftype == RENAME:
bakfile = '/etc/shadow.bak.%d' % os.getpid()
os.rename(filename, bakfile)
os.rename(tmpfile, filename)
os.remove(bakfile)
return
if ftype == PWD_MKDB:
pipe = subprocess.PIPE
p = subprocess.Popen(['/usr/sbin/pwd_mkdb', tmpfile],
stdin=pipe, stdout=pipe, stderr=pipe)
(stdoutdata, stderrdata) = p.communicate()
if p.returncode != 0:
if stderrdata:
|
stderrdata.strip('\n')
else:
st
|
derrdata = '<None>'
logging.error("pwd_mkdb failed: %s" % stderrdata)
try:
os.unlink(tmpfile)
except Exception:
pass
raise PasswordError(
(500, "Rebuilding the passwd database failed"))
return
raise PasswordError((500, "Unknown password file format"))
@commands.command_add('password', 'password')
def password_cmd(data_values):
""" change password """
try:
set_password('root', data)
except PasswordError, e:
return e.get_response()
return True
|
sgonzalez/wsn-parking-project
|
sensor-pi/testimages.py
|
Python
|
gpl-2.0
| 764
| 0.027487
|
#!/usr/bin/env python
from time import sleep
import os
import RPi.GPIO as GPIO
import subprocess
import datetime
|
GPIO.setmode(GPIO.BCM)
GPIO.setup(24, GPIO.IN)
count = 0
up = False
down = False
command = ""
filename = ""
index = 0
camera_pause = "500"
def takepic(imageName):
print("picture")
command = "sudo raspistill -o " + imageName + " -q 100 -t " + camera_pa
|
use
print(command)
os.system(command)
while(True):
if(up==True):
if(GPIO.input(24)==False):
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d_%H%M%S")
filename = "photo-"+timeString+".jpg"
takepic(filename)
subprocess.call(['./processImage.sh', filename, '&'])
up = GPIO.input(24)
count = count+1
sleep(.1)
print "done"
|
dleicht/planx
|
manage.py
|
Python
|
mit
| 559
| 0.003578
|
# This file starts the WSGI web application.
# - Heroku starts gunicorn, which loads Procfile, whi
|
ch starts manage.py
# - Developers can run it from the command line: python runserver.py
import logging
from logging.handlers import RotatingFileHandler
from app import create_app
app = create_app()
# Start a development web server if executed from the command line
if __name__ == "__main__":
# Manage the command line parameters such as:
# - python manage.py runserver
# - pytho
|
n manage.py db
from app import manager
manager.run()
|
caneruguz/osf.io
|
addons/osfstorage/decorators.py
|
Python
|
apache-2.0
| 2,839
| 0.000704
|
import httplib
import functools
from modularodm.exceptions import NoResultsFound
from modularodm.storage.base import KeyExistsException
from framework.auth.decorators import must_be_signed
from framework.exceptions import HTTPError
from addons.osfstorage.models import OsfStorageFileNode, OsfStorageFolder
from osf.models import OSFU
|
ser, AbstractNode
from website.files import exceptions
from website.project.decorators import (
must_not_be_registration, must_have_addon,
)
def handle_odm_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except NoResultsFound:
raise HTTPError(httplib.NOT_FOUND)
e
|
xcept KeyExistsException:
raise HTTPError(httplib.CONFLICT)
except exceptions.VersionNotFoundError:
raise HTTPError(httplib.NOT_FOUND)
return wrapped
def autoload_filenode(must_be=None, default_root=False):
"""Implies both must_have_addon osfstorage node and
handle_odm_errors
Attempts to load fid as a OsfStorageFileNode with viable constraints
"""
def _autoload_filenode(func):
@handle_odm_errors
@must_have_addon('osfstorage', 'node')
@functools.wraps(func)
def wrapped(*args, **kwargs):
node = kwargs['node']
if 'fid' not in kwargs and default_root:
file_node = kwargs['node_addon'].get_root()
else:
file_node = OsfStorageFileNode.get(kwargs.get('fid'), node)
if must_be and file_node.kind != must_be:
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'incorrect type',
'message_long': 'FileNode must be of type {} not {}'.format(must_be, file_node.kind)
})
kwargs['file_node'] = file_node
return func(*args, **kwargs)
return wrapped
return _autoload_filenode
def waterbutler_opt_hook(func):
@must_be_signed
@handle_odm_errors
@must_not_be_registration
@must_have_addon('osfstorage', 'node')
@functools.wraps(func)
def wrapped(payload, *args, **kwargs):
try:
user = OSFUser.load(payload['user'])
dest_node = AbstractNode.load(payload['destination']['node'])
source = OsfStorageFileNode.get(payload['source'], kwargs['node'])
dest_parent = OsfStorageFolder.get(payload['destination']['parent'], dest_node)
kwargs.update({
'user': user,
'source': source,
'destination': dest_parent,
'name': payload['destination']['name'],
})
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
return func(*args, **kwargs)
return wrapped
|
baylee-d/osf.io
|
website/archiver/utils.py
|
Python
|
apache-2.0
| 11,737
| 0.003238
|
import functools
from framework.auth import Auth
from website.archiver import (
StatResult, AggregateStatResult,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
ARCHIVER_FORCED_FAILURE,
)
from website import (
mails,
settings
)
from osf.utils.sanitize import unescape_entities
def send_archiver_size_exceeded_mails(src, user, stat_result, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
user=user,
src=src,
stat_result=stat_result,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
user=user,
src=src,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_copy_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
user=user,
src=src,
results=results,
url=url,
can_change_preferences=False,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_COPY_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_file_not_found_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_FILE_NOT_FOUND_DESK,
can_change_preferences=False,
user=user,
src=src,
results=results,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_FILE_NOT_FOUND_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_uncaught_error_mails(src, user, results, url):
|
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIV
|
E_UNCAUGHT_ERROR_DESK,
user=user,
src=src,
results=results,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def handle_archive_fail(reason, src, dst, user, result):
url = settings.INTERNAL_DOMAIN + src._id
if reason == ARCHIVER_NETWORK_ERROR:
send_archiver_copy_error_mails(src, user, result, url)
elif reason == ARCHIVER_SIZE_EXCEEDED:
send_archiver_size_exceeded_mails(src, user, result, url)
elif reason == ARCHIVER_FILE_NOT_FOUND:
send_archiver_file_not_found_mails(src, user, result, url)
elif reason == ARCHIVER_FORCED_FAILURE: # Forced failure using scripts.force_fail_registration
pass
else: # reason == ARCHIVER_UNCAUGHT_ERROR
send_archiver_uncaught_error_mails(src, user, result, url)
dst.root.sanction.forcibly_reject()
dst.root.sanction.save()
dst.root.delete_registration_tree(save=True)
def archive_provider_for(node, user):
"""A generic function to get the archive provider for some node, user pair.
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.get_addon(settings.ARCHIVE_PROVIDER)
def has_archive_provider(node, user):
"""A generic function for checking whether or not some node, user pair has
an attached provider for archiving
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.has_addon(settings.ARCHIVE_PROVIDER)
def link_archive_provider(node, user):
"""A generic function for linking some node, user pair with the configured
archive provider
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
addon = node.get_or_add_addon(settings.ARCHIVE_PROVIDER, auth=Auth(user), log=False)
if hasattr(addon, 'on_add'):
addon.on_add()
node.save()
def aggregate_file_tree_metadata(addon_short_name, fileobj_metadata, user):
"""Recursively traverse the addon's file tree and collect metadata in AggregateStatResult
:param src_addon: AddonNodeSettings instance of addon being examined
:param fileobj_metadata: file or folder metadata of current point of reference
in file tree
:param user: archive initatior
:return: top-most recursive call returns AggregateStatResult containing addon file tree metadata
"""
disk_usage = fileobj_metadata.get('size')
if fileobj_metadata['kind'] == 'file':
result = StatResult(
target_name=fileobj_metadata['name'],
target_id=fileobj_metadata['path'].lstrip('/'),
disk_usage=disk_usage or 0,
)
return result
else:
return AggregateStatResult(
target_id=fileobj_metadata['path'].lstrip('/'),
target_name=fileobj_metadata['name'],
targets=[aggregate_file_tree_metadata(addon_short_name, child, user) for child in fileobj_metadata.get('children', [])],
)
def before_archive(node, user):
from osf.models import ArchiveJob
link_archive_provider(node, user)
job = ArchiveJob.objects.create(
src_node=node.registered_from,
dst_node=node,
initiator=user
)
job.set_targets()
def _do_get_file_map(file_tree):
"""Reduces a tree of folders and files into a list of (<sha256>, <file_metadata>) pairs
"""
file_map = []
stack = [file_tree]
while len(stack):
tree_node = stack.pop(0)
if tree_node['kind'] == 'file':
file_map.append((tree_node['extra']['hashes']['sha256'], tree_node))
else:
stack = stack + tree_node['children']
return file_map
def _memoize_get_file_map(func):
cache = {}
@functools.wraps(func)
def wrapper(node):
if node._id not in cache:
osf_storage = node.get_addon('osfstorage')
file_tree = osf_storage._get_file_tree(user=node.creator)
cache[node._id] = _do_get_file_map(file_tree)
return func(node, cache[node._id])
return wrapper
@_memoize_get_file_map
def get_file_map(node, file_map):
"""
note:: file_map is injected implictly by the decorator; this method is called like:
get_file_map(node)
"""
for (key, value) in file_map:
yield (key, value, node._id)
for child in node.nodes_primary:
for key, value, node_id in get_file_map(child):
yield (key, value, node_id)
def find_registration_file(value, node):
"""
some annotations:
- `value` is the `extra` from a file upload in `registered_meta`
(see `Uploader.addFile` in website/static/js/registrationEditorExtensions.js)
- `node` is a Registration instance
- returns a `(file_info, node_id)` or `(None, None)` tuple, where `file_info` is from waterbutler's api
(see `addons.base.models.BaseStorageAddon._get_fileobj_child_metadata` and `waterbutler.core.metadata.BaseMetadata`)
"""
from osf.models import AbstractNode
orig_sha256 = value['sha256']
orig_name = unescape_entities(
value['selectedFileName'],
safe={
'<': '<',
'>': '>'
}
)
orig_node = value['nodeId']
file_map = get_file_map(node)
for sha256, file_info, node_id in file_map:
registered_from_id = AbstractNode.load(node_id).registered_from._id
if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == file_info['name']:
return file_info, node_id
return None, None
def find_registration_files(values, node):
"""
some annotations:
- `values` is f
|
MikeLing/shogun
|
examples/undocumented/python/graphical/em_1d_gmm.py
|
Python
|
gpl-3.0
| 1,911
| 0.018315
|
from pylab import figure,show,connect,hist,plot,legend
from numpy import array, append, arange, empty, exp
from shogun import Gaussian, GMM
from shogun import RealFeatures
import util
util.set_title('EM for 1d GMM example')
#set the parameters
min_cov=1e-9
max_iter=1000
min_change=1e-9
#setup the real GMM
real_gmm=GMM(3)
real_gmm.set_nth_mean(array([-2.0]), 0)
real_gmm.set_nth_mean(array([0.0]), 1)
real_gmm.set_nth_mean(array([2.0]), 2)
real_gmm.set_nth_cov(array([[0.3]]), 0)
real_gmm.set_nth_cov(array([[0.1]]), 1)
real_gmm.set_nth_cov(array([[0.2]]), 2)
real_gmm.set_coef(array([0.3, 0.5, 0.2]))
#generate training set from real GMM
generated=array([real_gmm.sample()])
for i in range(199):
generated=append(generated, array([real_gmm.sample()]), axis=1)
feat_train=RealFeatures(generated)
#train GMM using EM
est_gmm=GMM(3)
est_gmm.train(feat_train)
est_gmm.train_em(min_cov, max_iter, min_change)
#get and print estimated means and cov
|
ariances
est_mean1=est_gmm.get_nth_mean(0)
est_mean2=est_gmm.get_nth_mean(1)
est_mean3=est_gmm.get_nth_mean(2)
est_cov1=est_gmm.get_nth_cov(0)
est_cov2=est_gmm.get_nth_cov(1)
est_cov3=est_gmm.get_nth_cov(2)
est_coef=est_gmm.get_coef()
print e
|
st_mean1
print est_cov1
print est_mean2
print est_cov2
print est_mean3
print est_cov3
print est_coef
#plot real GMM, data and estimated GMM
min_gen=min(min(generated))
max_gen=max(max(generated))
plot_real=empty(0)
plot_est=empty(0)
for i in arange(min_gen, max_gen, 0.001):
plot_real=append(plot_real, array([exp(real_gmm.cluster(array([i]))[3])]))
plot_est=append(plot_est, array([exp(est_gmm.cluster(array([i]))[3])]))
real_plot=plot(arange(min_gen, max_gen, 0.001), plot_real, "b")
est_plot=plot(arange(min_gen, max_gen, 0.001), plot_est, "r")
real_hist=hist(generated.transpose(), bins=50, normed=True, fc="gray")
legend(("Real GMM", "Estimated GMM"))
connect('key_press_event', util.quit)
show()
|
BioMedIA/irtk-legacy
|
wrapping/cython/irtk/ext/chanvese.py
|
Python
|
bsd-3-clause
| 9,929
| 0.022258
|
# http://www.creatis.insa-lyon.fr/~bernard/creaseg/
# http://ascratchpad.blogspot.com/2011/03/image-segmentation-using-active.html
#------------------------------------------------------------------------
# Region Based Active Contour Segmentation
#
# seg = region_seg(I,init_mask,max_its,alpha,display)
#
# Inputs: I 2D image
# init_mask Initialization (1 = foreground, 0 = bg)
# max_its Number of iterations to run segmentation for
# alpha (optional) Weight of smoothing term
# higer = smoother. default = 0.2
# display (optional) displays intermediate outputs
# default = true
#
# Outputs: seg Final segmentation mask (1=fg, 0=bg)
#
# Description: This code implements the paper: "Active Contours Without
# Edges" By Chan Vese. This is a nice way to segment images whose
# foregrounds and backgrounds are statistically different and homogeneous.
#
# Example:
# img = imread('tire.tif');
# m = zeros(size(img));
# m(33:33+117,44:44+128) = 1;
# seg = region_seg(img,m,500);
#
# Coded by: Shawn Lankton (www.shawnlankton.com)
#------------------------------------------------------------------------
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
eps = np.finfo(np.float).eps
def chanvese(I,init_mask,max_its=200,alpha=0.2,thresh=0,color='r',display=False):
I = I.astype('float')
#-- Create a signed distance map (SDF) from mask
phi = mask2phi(init_mask)
if display:
plt.ion()
showCurveAndPhi(I, phi, color)
plt.savefig('levelset_start.pdf',bbox_inches='tight')
#--main loop
its = 0
stop = False
prev_mask = init_mask
c = 0
while (its < max_its and not stop):
# get the curve's narrow band
idx = np.flatnonzero( np.logical_and( phi <= 1.2, phi >= -1.2) )
if len(idx) > 0:
#-- intermediate output
if display:
if np.mod(its,50) == 0:
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
print 'iteration:', its
showCurveAndPhi(I, phi, color)
else:
if np.mod(its,10) == 0:
print 'iteration:', its
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
#drawnow;
#-- find interior and exterior mean
upts = np.flatnonzero(phi<=0) # interior points
vpts = np.flatnonzero(phi>0) # exterior points
u = np.sum(I.flat[upts])/(len(upts)+eps) # interior mean
v = np.sum(I.flat[vpts])/(len(vpts)+eps) # exterior mean
F = (I.flat[idx]-u)**2-(I.flat[idx]-v)**2 # force from image in
|
formation
curvature = get_curvature(phi,idx) # force from curvature penalty
|
dphidt = F /np.max(np.abs(F)) + alpha*curvature # gradient descent to minimize energy
#-- maintain the CFL condition
dt = 0.45/(np.max(np.abs(dphidt))+eps)
#-- evolve the curve
phi.flat[idx] += dt*dphidt
#-- Keep SDF smooth
phi = sussman(phi, 0.5)
new_mask = phi<=0
c = convergence(prev_mask,new_mask,thresh,c)
if c <= 5:
its = its + 1
prev_mask = new_mask
else: stop = True
else:
break
#-- final output
if display:
showCurveAndPhi(I, phi, color)
#plt.savefig('levelset_end.pdf',bbox_inches='tight')
time.sleep(10)
#-- make mask from SDF
seg = phi<=0 #-- Get mask from levelset
return seg,phi,its
#---------------------------------------------------------------------
#---------------------------------------------------------------------
#-- AUXILIARY FUNCTIONS ----------------------------------------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
def bwdist(a):
"""
this is an intermediary function, 'a' has only True, False vals,
so we convert them into 0, 1 values -- in reverse. True is 0,
False is 1, distance_transform_edt wants it that way.
"""
return nd.distance_transform_edt(a == 0)
import time
#-- Displays the image with curve superimposed
def showCurveAndPhi(I, phi, color):
# subplot(numRows, numCols, plotNum)
#myplot = plt.subplot(121)
#fig, axes = plt.subplots()
#axes = myplot.axes
#axes.get_xaxis().set_visible(False)
#axes.get_yaxis().set_visible(False)
plt.clf()
plt.imshow(I, cmap='gray')
#plt.hold(True)
CS = plt.contour(phi, 0, colors=color)
plt.draw()
#plt.hold(False)
# myplot = plt.subplot(122)
# axes = myplot.axes
# axes.get_xaxis().set_visible(False)
# axes.get_yaxis().set_visible(False)
# plt.imshow(phi)
plt.draw()
#time.sleep(1)
def im2double(a):
a = a.astype('float')
a /= a.max()
return a
#-- converts a mask to a SDF
def mask2phi(init_a):
phi = bwdist(init_a)-bwdist(1-init_a)+im2double(init_a) -0.5
return phi
#-- compute curvature along SDF
def get_curvature(phi,idx):
dimy, dimx = phi.shape
yx = np.array([np.unravel_index(i, phi.shape)for i in idx]) # get subscripts
y = yx[:,0]
x = yx[:,1]
#-- get subscripts of neighbors
ym1 = y-1; xm1 = x-1; yp1 = y+1; xp1 = x+1;
#-- bounds checking
ym1[ym1<0] = 0; xm1[xm1<0] = 0;
yp1[yp1>=dimy]=dimy - 1; xp1[xp1>=dimx] = dimx - 1;
#-- get indexes for 8 neighbors
idup = np.ravel_multi_index( (yp1,x),phi.shape)
iddn = np.ravel_multi_index( (ym1,x),phi.shape)
idlt = np.ravel_multi_index( (y,xm1),phi.shape)
idrt = np.ravel_multi_index( (y,xp1),phi.shape)
idul = np.ravel_multi_index( (yp1,xm1),phi.shape)
idur = np.ravel_multi_index( (yp1,xp1),phi.shape)
iddl = np.ravel_multi_index( (ym1,xm1),phi.shape)
iddr = np.ravel_multi_index( (ym1,xp1),phi.shape)
#-- get central derivatives of SDF at x,y
phi_x = -phi.flat[idlt]+phi.flat[idrt]
phi_y = -phi.flat[iddn]+phi.flat[idup]
phi_xx = phi.flat[idlt]-2*phi.flat[idx]+phi.flat[idrt]
phi_yy = phi.flat[iddn]-2*phi.flat[idx]+phi.flat[idup]
phi_xy = (-0.25*phi.flat[iddl]-0.25*phi.flat[idur]
+0.25*phi.flat[iddr]+0.25*phi.flat[idul])
phi_x2 = phi_x**2
phi_y2 = phi_y**2
#-- compute curvature (Kappa)
curvature = ( ((phi_x2*phi_yy + phi_y2*phi_xx - 2*phi_x*phi_y*phi_xy)
/ (phi_x2 + phi_y2 +eps)**(3/2))
*(phi_x2 + phi_y2)**(1/2))
return curvature
#-- level set re-initialization by the sussman method
def sussman(D, dt):
# forward/backward differences
a = D - shiftR(D) # backward
b = shiftL(D) - D # forward
c = D - shiftD(D) # backward
d = shiftU(D) - D # forward
a_p = a.copy(); a_n = a.copy(); # a+ and a-
b_p = b.copy(); b_n = b.copy();
c_p = c.copy(); c_n = c.copy();
d_p = d.copy(); d_n = d.copy();
a_p[a < 0] = 0
a_n[a > 0] = 0
b_p[b < 0] = 0
b_n[b > 0] = 0
c_p[c < 0] = 0
c_n[c > 0] = 0
d_p[d < 0] = 0
d_n[d > 0] = 0
dD = np.zeros(D.shape)
D_neg_ind = np.flatnonzero(D < 0)
D_pos_ind = np.flatnonzero(D > 0)
dD.flat[D_pos_ind] = np.sqrt( np.max( np.concatenate( ([a_p.flat[D_pos_ind]**2],
[b_n.flat[D_pos_ind]**2]) ),
axis=0
)
+ np.max( np.concatenate( ([c_p.flat[D_pos_ind]**2],
[d_n.flat[D_pos_ind]**2])),
axis=0
)
) - 1
dD.flat[D_neg_ind] = np.sqrt( np.max( np.concatenate( ([a_n.flat[D_ne
|
mulkieran/storage_alerts
|
tests/sources/generic/by_line/recognizers_test.py
|
Python
|
gpl-2.0
| 7,550
| 0.000662
|
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Stre
|
et, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
""" Test recognizers
|
. """
import unittest
from storage_alerts.sources.generic.by_line.recognizers import LazyRecognizer
from storage_alerts.sources.generic.by_line.recognizers import ManyRecognizer
from storage_alerts.sources.generic.by_line.recognizers import NoRecognizer
from storage_alerts.sources.generic.by_line.recognizers import YesRecognizer
from storage_alerts.sources.generic.by_line.states import RecognizerStates
class YesRecognizerTestCase(unittest.TestCase):
""" Test the recognizer that says yes to any line. """
def testZero(self):
""" It always says no at start. """
rec = YesRecognizer()
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testOne(self):
""" It says yes whatever it reads. """
rec = YesRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
def testCopy(self):
""" Test that the copy does not behave like the original. """
rec = YesRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec2 = rec.initializeNew()
self.assertEqual(rec2.state, RecognizerStates.NO)
self.assertEqual(len(rec2.evidence), 0)
self.assertEqual(len(rec2.info), 0)
self.assertEqual(rec, rec2)
self.assertFalse(rec != YesRecognizer())
class MaybeYesRecognizerTestCase(unittest.TestCase):
""" Test the maybe yes recognizer. """
def testZero(self):
""" It always says no at start. """
rec = LazyRecognizer()
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testOne(self):
""" It says maybe whatever it reads. """
rec = LazyRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
def testCopy(self):
""" Test that copy really resets. """
rec = LazyRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec2 = rec.initializeNew()
self.assertEqual(rec2.state, RecognizerStates.NO)
self.assertEqual(rec2.evidence, [])
self.assertEqual(len(rec2.info), 0)
self.assertEqual(rec, rec2)
self.assertFalse(rec != LazyRecognizer())
class NoRecognizerTestCase(unittest.TestCase):
""" Test the recognizer that always says no. """
def testZero(self):
""" It always says no at start. """
rec = NoRecognizer()
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testOne(self):
""" It says no whatever it reads. """
rec = NoRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testCopy(self):
""" Test copying. """
rec = NoRecognizer()
rec2 = rec.initializeNew()
self.assertFalse(rec is rec2)
self.assertEqual(rec, rec2)
self.assertFalse(rec != NoRecognizer())
class ManyRecognizerTestCase(unittest.TestCase):
""" Test the many recognizer. """
def testZero(self):
""" If zero are enough it should be in yes state already. """
rec = ManyRecognizer(0)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(rec.evidence, [])
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(rec.evidence, [])
def testOne(self):
""" Should behave just like the yes recognizer. """
rec = ManyRecognizer(1)
self.assertEqual(rec.state, RecognizerStates.NO)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
def testTwo(self):
""" If two are required it should pass through the maybe state. """
rec = ManyRecognizer(2)
self.assertEqual(rec.state, RecognizerStates.NO)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_NO)
self.assertEqual(len(rec.evidence), 1)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 2)
def testInfo(self):
""" The info is a bunch of key/value pairs. """
rec = ManyRecognizer(2)
info = rec.info
self.assertEqual(info['COUNT'], 0)
self.assertEqual(info['REQUIRED'], 2)
def testStr(self):
""" The description contains some relevant information. """
rec = ManyRecognizer(2)
self.assertIn(str(rec.NUMBER), str(rec))
def testCopy(self):
""" Test copying. """
rec = ManyRecognizer(1)
self.assertEqual(rec.state, RecognizerStates.NO)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
rec2 = rec.initializeNew()
self.assertEqual(rec2.state, RecognizerStates.NO)
self.assertEqual(rec, rec2)
self.assertNotEqual(rec, ManyRecognizer(2))
class HashTestCase(unittest.TestCase):
""" Test hashing properties. """
_RECS = [
LazyRecognizer(),
ManyRecognizer(2),
NoRecognizer(),
YesRecognizer()
]
def setUp(self):
self._recs = [r.initializeNew() for r in self._RECS]
def testEqualNew(self):
""" Test that newly initialized recognizers hash to same value. """
for r in self._RECS:
self.assertEqual(hash(r), hash(r.initializeNew()))
|
riyas-org/sdr
|
bc_rx_recorded.py
|
Python
|
gpl-2.0
| 3,006
| 0.012641
|
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Bc Rx Recorded
# Generated: Sun Jun 8 13:46:34 2014
##################################################
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.wxgui import fftsink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class bc_rx_recorded(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Bc Rx Recorded")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 32000
##################################################
# Blocks
##################################################
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.GetWin(),
baseband_freq=0,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title="FFT Plot",
peak_hold=False,
)
self.Add(self.wxgui_fftsink2_0.win)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, "/home/sinu/sdr/fcd_capture_sample.raw", True)
self.audio_sink_0 = audio.sink(48000, "", True)
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=192000,
audio_decimation=4,
)
##################################################
# Connections
#################################
|
#################
self.connect((self.blocks_file_source_0, 0), (self.wxgui_ff
|
tsink2_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.analog_wfm_rcv_0, 0), (self.audio_sink_0, 0))
# QT sink close method reimplementation
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = bc_rx_recorded()
tb.Start(True)
tb.Wait()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.