text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
from django.contrib.auth.models import User
from django.db import models
from oauth2client.django_orm import FlowField
from oauth2client.django_orm import CredentialsField
class Flow(models.Model):
"""
class to save flow objects in a multitreaded environment
"""
id = models.ForeignKey(User, primary_key=True)
flow = FlowField()
class Credentials(models.Model):
"""
saves user oauth credentials for later use
"""
id = models.ForeignKey(User, primary_key=True)
credential = CredentialsField()
|
rochapps/django-google-oauth
|
google_oauth/models.py
|
Python
|
bsd-3-clause
| 545
| 0
|
#!/usr/bin/env python
# encoding: utf-8
import logging
import telegram
import random
import json
from giphypop import translate
with open('quotes.json') as data_file:
quotes = json.load(data_file)
quotes = quotes["quotes"]
def main():
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
bot = telegram.Bot(token='YOUR BOT AUTHORIZATION TOKEN')
try:
LAST_UPDATE_ID = bot.getUpdates()[-1].update_id
except IndexError:
LAST_UPDATE_ID = None
while True:
for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=5):
text = update.message.text
chat_id = update.message.chat.id
update_id = update.update_id
if '/start' in text:
custom_keyboard = [["/quote", "/gif"]]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard, resize_keyboard=True)
bot.sendMessage(chat_id=chat_id,
text="Chose.",
reply_markup=reply_markup)
LAST_UPDATE_ID = update_id + 1
elif '/quote' in text:
answer = quote()
bot.sendMessage(chat_id=chat_id,
text=answer)
LAST_UPDATE_ID = update_id + 1
elif '/gif' in text:
bot.sendMessage(chat_id=chat_id,
text="Nick is searching for an awesome gif.")
img = translate('nicolas cage')
bot.sendDocument(chat_id=chat_id,
document=img.fixed_height.url)
print "Enviar Gif " + img.fixed_height.url
LAST_UPDATE_ID = update_id + 1
def quote():
return random.choice(quotes)
if __name__ == '__main__':
main()
|
dobladov/NickCage-TelegramBot
|
main.py
|
Python
|
mit
| 1,853
| 0.001079
|
#!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import sys, math
import argparse
from volk_test_funcs import (create_connection, list_tables, get_results,
helper, timeit, format_results)
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = list(range(len(name_reg)))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / (M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
|
skoslowski/gnuradio
|
gnuradio-runtime/examples/volk_benchmark/volk_plot.py
|
Python
|
gpl-3.0
| 6,303
| 0.003332
|
from ConfigParser import SafeConfigParser
import os.path
import pytest
import re
import textwrap
from amazonproduct import utils
def pytest_addoption(parser):
group = parser.getgroup('amazonproduct',
'custom options for testing python-amazon-product-api')
group._addoption('--locale', action='append', dest='locales',
metavar='LOCALE', help='Locale to use (e.g. "de" or "us"). This option '
'can be used more than once. Note that tests with specific locales '
'defined which do not match the ones specified by this option will '
'NOT be run.')
group._addoption('--api-version', action='append', dest='versions',
metavar='VERSION', help='API version to use (e.g. "2010-09-01"). This '
'option can be used more than once. Note that tests with specific '
'versions defined which do not match the ones specified by this '
'option will NOT be run.')
group._addoption('--refetch', action='store', type='choice', dest='fetch',
metavar='method', choices=['no', 'missing', 'outdated', 'all'],
default='no', help='Fetch responses from live server and overwrite '
'previously cached XML file: one of no (default)|missing|outdated|'
'all.')
group._addoption('--processor', action='append', dest='processors',
metavar='PROCESSOR', choices=['objectify', 'etree', 'elementtree', 'minidom'],
help='Result processor to use: one of objectify|etree|minidom.')
def pytest_funcarg__server(request):
"""
Is the same as funcarg `httpserver` from plugin pytest-localserver with the
difference that it has a module-wide scope.
"""
def setup():
try:
localserver = request.config.pluginmanager.getplugin('localserver')
except KeyError:
raise pytest.skip('This test needs plugin pytest-localserver!')
server = localserver.http.Server()
server.start()
return server
def teardown(server):
server.stop()
return request.cached_setup(setup, teardown, 'module')
class DummyConfig (object):
"""
Dummy config to which to which you can add config files which in turn will
be created on the file system as temporary files.
"""
_file_counter = 0
def __init__(self, tmpdir):
self.tmpdir = tmpdir
self.files = []
def add_file(self, content, path):
"""
Writes one temporary file.
"""
if not path:
path = 'config-%i' % self._file_counter
self._file_counter += 1
p = self.tmpdir.ensure(os.path.expanduser(path))
p.write(textwrap.dedent(content))
self.files += [p.strpath]
_REG = re.compile(r'^#\s*file:\s+(.+?)\n', re.DOTALL | re.MULTILINE)
def load_from_string(self, content):
"""
Creates config files from string which is split up into file blocks and
written to temporary files.
"""
last = 0 # end of the last matching '# file: XXX'
path = None # path of the last matching '# file: XXX'
for m in self._REG.finditer(content):
if path is not None:
self.add_file(content[last:m.start()], path)
path = m.group(1)
last = m.end()
if path is not None:
self.add_file(content[last:], path)
else:
raise ValueError('Where are the file paths?')
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def __repr__(self):
return '<DummyConfig %s files=%r>' % (hex(id(self)), self.files)
def pytest_funcarg__configfiles(request):
"""
Returns a dummy config to which you can add config files which in turn will
be created on the file system as temporary files. You can use the following
methods:
To add a single config file use ``configfiles.add_file(content, path)``. If
you omit the ``path``, some arbitrary file name is used. ::
configfiles.add_file('''
[Credentials]
access_key = ABCDEFGH12345
secret_key = abcdegf43
locale = de''', path='/etc/amazon-product-api.cfg')
In order to add multiple config files at once, you can use the following
method::
configfiles.load_from_string('''
# file: /etc/boto.cfg
[Credentials]
aws_access_key_id = Hhdjksaiunkljfl
aws_secret_access_key = difioemLjdks02
# file: /home/user/.amazon-product-api
[Credentials]
locale = de
''')
"""
tmpdir = request.getfuncargvalue('tmpdir')
monkeypatch = request.getfuncargvalue('monkeypatch')
def prepend_tmppath(dir, files):
return [tmpdir.join(os.path.expanduser(fn)).strpath for fn in files]
monkeypatch.setattr(utils, 'CONFIG_FILES',
prepend_tmppath(tmpdir, utils.CONFIG_FILES))
cfg = DummyConfig(tmpdir)
return cfg
|
prats226/python-amazon-product-api-0.2.8
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 5,765
| 0.004163
|
#!/usr/bin/env python
import server
import time
from Sensoria.stereotypes.TimeControlData import TimeControlData
from Sensoria.stereotypes.InstantMessageData import InstantMessageData
class TemperatureSensor (server.TemperatureSensor):
def __init__ (self):
super (TemperatureSensor, self).__init__ ("HD", "Heater Temperature")
class HeaterController (server.ControlledRelayActuator):
def __init__ (self):
super (HeaterController, self).__init__ ("HC", "Heater Controller")
#TL1:10 TL2:18 TL3:21
class HeaterTimer (server.TimedActuator):
def __init__ (self):
super (HeaterTimer, self).__init__ ("HT", "Heater Timer")
initData = TimeControlData ()
initData.unmarshal ("PMO:000000001000000003222110 PTU:000000001000000003222110 PWE:000000001000000003222110 PTH:000000001000000003222110 PFR:000000001000000003222111 PSA:000000000322222222222211 PSU:000000000322222222222210")
ok, msg = self.write (initData)
print msg
assert ok
class HeaterSettings (server.ValueSetActuator):
def __init__ (self):
super (HeaterSettings, self).__init__ ("HS", "Heater Settings")
self.levels = [10, 18, 21]
@property
def values (self):
return self.levels
@values.setter
def values (self, v):
self.levels = v[0:3]
hd = TemperatureSensor ()
hc = HeaterController ()
ht = HeaterTimer ()
hs = HeaterSettings ()
listener = server.CommandListener ("HeatingSystem")
listener.register_sensor (hd)
listener.register_sensor (hc)
listener.register_sensor (ht)
listener.register_sensor (hs)
listener.start ()
while True:
time.sleep (1)
|
SukkoPera/Arduino-Sensoria
|
python/server3.py
|
Python
|
gpl-3.0
| 1,544
| 0.036269
|
"""
Standardize names of data files on Maryland State Board of Elections.
File-name convention on MD site (2004-2012):
general election
precinct: countyname_by_precinct_year_general.csv
state leg. district: state_leg_districts_year_general.csv
county: countyname_party_year_general.csv
primary election
precinct: countyname_by_Precinct_party_year_Primary.csv
state leg. district: state_leg_districts_party_year_primary.csv
county: countyname_party_year_primary.csv
Exceptions: 2000 + 2002
To run mappings from a shell:
openelex datasource.mappings -s md
"""
import re
from openelex.base.datasource import BaseDatasource
class Datasource(BaseDatasource):
base_url = "http://www.elections.state.md.us/elections/%(year)s/election_data/"
# PUBLIC INTERFACE
def mappings(self, year=None):
"""Return array of dicts containing source url and
standardized filename for raw results file, along
with other pieces of metadata
"""
mappings = []
for yr, elecs in list(self.elections(year).items()):
mappings.extend(self._build_metadata(yr, elecs))
return mappings
def target_urls(self, year=None):
"Get list of source data urls, optionally filtered by year"
return [item['raw_url'] for item in self.mappings(year)]
def filename_url_pairs(self, year=None):
return [(item['generated_filename'], item['raw_url'])
for item in self.mappings(year)]
# PRIVATE METHODS
def _races_by_type(self, elections):
"Filter races by type and add election slug"
races = {
'special': None,
'general': None,
}
for elec in elections:
rtype = self._race_type(elec)
elec['slug'] = self._election_slug(elec)
races[rtype] = elec
return races['general'], races['primary'], races['special']
def _race_type(self, election):
if election['special']:
return 'special'
return election['race_type'].lower()
def _build_metadata(self, year, elections):
year_int = int(year)
if year_int == 2002:
general, primary, special = self._races_by_type(elections)
meta = [
{
"generated_filename": "__".join((general['start_date'].replace('-',''), self.state, "general.txt")),
"raw_url": self._get_2002_source_urls('general'),
"ocd_id": 'ocd-division/country:us/state:md',
"name": 'Maryland',
"election": general['slug']
},
{
"generated_filename": "__".join((primary['start_date'].replace('-',''), self.state, "primary.txt")),
"raw_url": self._get_2002_source_urls('primary'),
"ocd_id": 'ocd-division/country:us/state:md',
"name": 'Maryland',
"election": primary['slug']
}
]
else:
meta = self._state_leg_meta(year, elections) + self._county_meta(year, elections)
if year_int == 2000:
general, primary, special = self._races_by_type(elections)
meta.append({
"generated_filename": "__".join((primary['start_date'].replace('-',''), self.state, "primary.csv")),
"raw_url": 'http://www.elections.state.md.us/elections/2000/results/prepaa.csv',
"ocd_id": 'ocd-division/country:us/state:md',
"name": 'Maryland',
"election": primary['slug']
})
elif year_int == 2008:
meta.append(self._special_meta_2008(elections))
return meta
def _state_leg_meta(self, year, elections):
payload = []
meta = {
'ocd_id': 'ocd-division/country:us/state:md/sldl:all',
'name': 'State Legislative Districts',
}
general, primary, special = self._races_by_type(elections)
if general is not None:
# Add General meta to payload
general_url = self._build_state_leg_url(year)
general_filename = self._generate_state_leg_filename(general_url, general['start_date'])
gen_meta = meta.copy()
gen_meta.update({
'raw_url': general_url,
'generated_filename': general_filename,
'election': general['slug']
})
payload.append(gen_meta)
# Add Primary meta to payload
if primary and int(year) > 2000:
for party in ['Democratic', 'Republican']:
pri_meta = meta.copy()
primary_url = self._build_state_leg_url(year, party)
primary_filename = self._generate_state_leg_filename(primary_url, primary['start_date'])
pri_meta.update({
'raw_url': primary_url,
'generated_filename': primary_filename,
'election': primary['slug']
})
payload.append(pri_meta)
return payload
def _build_state_leg_url(self, year, party=""):
tmplt = self.base_url + "State_Legislative_Districts"
kwargs = {'year': year}
year_int = int(year)
# PRIMARY
# Assume it's a primary if party is present
if party and year_int > 2000:
kwargs['party'] = party
if year_int == 2004:
tmplt += "_%(party)s_Primary_%(year)s"
else:
tmplt += "_%(party)s_%(year)s_Primary"
# GENERAL
else:
# 2000 and 2004 urls end in the 4-digit year
if year_int in (2000, 2004):
tmplt += "_General_%(year)s"
# All others have the year preceding the race type (General/Primary)
else:
tmplt += "_%(year)s_General"
tmplt += ".csv"
return tmplt % kwargs
def _generate_state_leg_filename(self, url, start_date):
bits = [
start_date.replace('-',''),
self.state.lower(),
]
matches = self._apply_party_racetype_regex(url)
if matches['party']:
bits.append(matches['party'].lower())
bits.extend([
matches['race_type'].lower(),
'state_legislative.csv',
])
name = "__".join(bits)
return name
def _county_meta(self, year, elections):
payload = []
general, primary, special = self._races_by_type(elections)
for jurisdiction in self._jurisdictions():
meta = {
'ocd_id': jurisdiction['ocd_id'],
'name': jurisdiction['name'],
}
county = jurisdiction['url_name']
# GENERALS
# Create countywide and precinct-level metadata for general
for precinct_val in (True, False):
if general is not None:
general_url = self._build_county_url(year, county, precinct=precinct_val)
general_filename = self._generate_county_filename(general_url, general['start_date'], jurisdiction)
gen_meta = meta.copy()
gen_meta.update({
'raw_url': general_url,
'generated_filename': general_filename,
'election': general['slug']
})
payload.append(gen_meta)
# PRIMARIES
# For each primary and party and party combo, generate countywide and precinct metadata
# Primary results not available in 2000
if primary and int(year) > 2000:
for party in ['Democratic', 'Republican']:
for precinct_val in (True, False):
pri_meta = meta.copy()
primary_url = self._build_county_url(year, county, party, precinct_val)
primary_filename = self._generate_county_filename(primary_url, primary['start_date'], jurisdiction)
# Add Primary metadata to payload
pri_meta.update({
'raw_url': primary_url,
'generated_filename': primary_filename,
'election': primary['slug']
})
payload.append(pri_meta)
return payload
def _build_county_url(self, year, name, party='', precinct=False):
url_kwargs = {
'year': year,
'race_type': 'General'
}
# In 2000, 2004 the files for St. Mary's county are prefixed
# with "Saint_Marys" instead of "St._Marys".
if name == "St._Marys" and int(year) in (2000, 2004):
name = "Saint_Marys"
tmplt = self.base_url + name
if precinct:
tmplt += "_By_Precinct"
else:
# 2000/2004 don't use "_County" in file names
if int(year) not in (2000, 2004):
tmplt += "_County"
if party:
url_kwargs['party'] = party
url_kwargs['race_type'] = 'Primary'
tmplt += "_%(party)s"
if int(year) in (2000, 2004):
tmplt += "_%(race_type)s_%(year)s.csv"
else:
tmplt += "_%(year)s_%(race_type)s.csv"
return tmplt % url_kwargs
def _generate_county_filename(self, url, start_date, jurisdiction):
bits = [
start_date.replace('-',''),
self.state,
]
matches = self._apply_party_racetype_regex(url)
if matches['party']:
bits.append(matches['party'].lower())
bits.extend([
matches['race_type'].lower(),
jurisdiction['url_name'].lower()
])
if 'by_precinct' in url.lower():
bits.append('precinct')
filename = "__".join(bits) + '.csv'
return filename
def _apply_party_racetype_regex(self, url):
if re.search(r'(2000|2004)', url):
pattern = re.compile(r"""
(?P<party>Democratic|Republican)?
_
(?P<race_type>General|Primary)""", re.IGNORECASE | re.VERBOSE)
else:
pattern = re.compile(r"""
(?P<party>Democratic|Republican)?
_\d{4}_
(?P<race_type>General|Primary)""", re.IGNORECASE | re.VERBOSE)
matches = re.search(pattern, url).groupdict()
return matches
def _get_2002_source_urls(self, race_type=''):
urls = {
"general": "http://www.elections.state.md.us/elections/2002/results/g_all_offices.txt",
"primary": "http://www.elections.state.md.us/elections/2002/results/p_all_offices.txt"
}
if race_type:
return urls[race_type]
else:
return list(urls.values())
def _generate_2002_filename(self, url):
if url.endswith('g_all_offices.txt'):
filename = "20021105__md__general.txt"
else:
filename = "20020910__md__primary.txt"
return filename
def _jurisdictions(self):
"""Maryland counties, plus Baltimore City"""
m = self.jurisdiction_mappings()
mappings = [x for x in m if x['url_name'] != ""]
return mappings
def _special_meta_2008(self, elections):
"""
Return metadata for 2008 Special 4th Congressional General Election.
"""
general, primary, special = self._races_by_type(elections)
filename_bits = [
special['start_date'].replace('-', ''),
self.state,
'special',
'general',
'us_house_of_representatives__4',
]
return {
"generated_filename":
"__".join(filename_bits) + ".html",
"raw_url": special['direct_links'][0],
"ocd_id": 'ocd-division/country:us/state:md',
"name": 'Maryland',
"election": special['slug']
}
|
openelections/openelections-core
|
openelex/us/md/datasource.py
|
Python
|
mit
| 12,263
| 0.001957
|
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .models import ActivateCode
import uuid
import os
from django.http import HttpResponse
import datetime
def register(request):
error = ""
if request.method == "GET":
return render(request, "user_register.html")
else:
username = request.POST['username'].strip()
email = request.POST['email'].strip()
password = request.POST['password'].strip()
re_password = request.POST['re_password'].strip()
if not username or not password or not email:
error = "任何字段都不能为空"
if password != re_password:
error = "两次密码不一致"
if User.objects.filter(username=username).count() > 0:
error = "用户已存在"
if User.objects.filter(email=email).count() > 0:
error = "该邮箱已注册"
if not error:
user = User.objects.create_user(username=username,
email=email, password=password)
user.is_active = False
user.save()
new_code = str(uuid.uuid4()).replace("-", "")
expire_time = datetime.datetime.now() + datetime.timedelta(days=2)
code_record = ActivateCode(owner=user, code=new_code,
expire_timestamp=expire_time)
code_record.save()
activate_link = "http://%s%s" % (request.get_host(), reverse(
"user_activate", args=[new_code]))
send_mail('[python论坛]激活邮件',
'您的激活链接为: %s' % activate_link,
'huyuanxuan@163.com',
[email],
fail_silently=False)
else:
return render(request, "user_register.html", {"error": error})
return HttpResponse("请查收邮件激活帐户!")
def activate(request, code):
query = ActivateCode.objects.filter(code=code,
expire_timestamp__gte=datetime.datetime.now())
if query.count() > 0:
code_record = query[0]
code_record.owner.is_active = True
code_record.owner.save()
return HttpResponse("激活成功")
else:
return HttpResponse("激活失败")
@login_required
def upload_avatar(request):
if request.method == "GET":
return render(request, "upload_avatar.html")
else:
profile = request.user.userprofile
avatar_file = request.FILES.get("avatar", None)
if not avatar_file:
return HttpResponse("未选择文件")
file_name = request.user.username + avatar_file.name
if avatar_file.size > 50000:
return HttpResponse("图片大小不能超过500KB")
file_path = os.path.join("/usr/share/userres/avatar/", file_name)
with open(file_path, 'wb+') as destination:
for chunk in avatar_file.chunks():
destination.write(chunk)
url = "http://res.myforum.com/avatar/%s" % file_name
profile.avatar = url
profile.save()
return redirect("/")
|
littleghosty/forum
|
mysite/usercenter/views.py
|
Python
|
gpl-3.0
| 3,333
| 0.000627
|
# Copyright (c) 2020-2021, Manfred Moitzi
# License: MIT License
from typing import (
TYPE_CHECKING,
Optional,
Iterable,
Tuple,
List,
Set,
Dict,
cast,
Sequence,
Any,
)
import sys
import struct
import math
from enum import IntEnum
from itertools import repeat
from ezdxf.lldxf import const
from ezdxf.tools.binarydata import bytes_to_hexstr, ByteStream, BitStream
from ezdxf import colors
from ezdxf.math import (
Vec3,
Matrix44,
Z_AXIS,
ConstructionCircle,
ConstructionArc,
OCS,
UCS,
X_AXIS,
)
from ezdxf.entities import factory
import logging
if TYPE_CHECKING:
from ezdxf.eztypes import (
Tags,
TagWriter,
Drawing,
DXFGraphic,
Polymesh,
Polyface,
Polyline,
Hatch,
LWPolyline,
)
logger = logging.getLogger("ezdxf")
CHUNK_SIZE = 127
class ProxyGraphicError(Exception):
pass
def load_proxy_graphic(
tags: "Tags", length_code: int = 160, data_code: int = 310
) -> Optional[bytes]:
binary_data = [
tag.value
for tag in tags.pop_tags(codes=(length_code, data_code))
if tag.code == data_code
]
return b"".join(binary_data) if len(binary_data) else None # type: ignore
def export_proxy_graphic(
data: bytes,
tagwriter: "TagWriter",
length_code: int = 160,
data_code: int = 310,
) -> None:
# Do not export proxy graphic for DXF R12 files
assert tagwriter.dxfversion > const.DXF12
length = len(data)
if length == 0:
return
tagwriter.write_tag2(length_code, length)
index = 0
while index < length:
hex_str = bytes_to_hexstr(data[index : index + CHUNK_SIZE])
tagwriter.write_tag2(data_code, hex_str)
index += CHUNK_SIZE
def has_prim_traits(flags: int) -> bool:
return bool(flags & 0xFFFF)
def prims_have_colors(flags: int) -> bool:
return bool(flags & 0x0001)
def prims_have_layers(flags: int) -> bool:
return bool(flags & 0x0002)
def prims_have_linetypes(flags: int) -> bool:
return bool(flags & 0x0004)
def prims_have_markers(flags: int) -> bool:
return bool(flags & 0x0020)
def prims_have_visibilities(flags: int) -> bool:
return bool(flags & 0x0040)
def prims_have_normals(flags: int) -> bool:
return bool(flags & 0x0080)
def prims_have_orientation(flags: int) -> bool:
return bool(flags & 0x0400)
TRAIT_TESTER = {
"colors": (prims_have_colors, "RL"),
"layers": (prims_have_layers, "RL"),
"linetypes": (prims_have_linetypes, "RL"),
"markers": (prims_have_markers, "RL"),
"visibilities": (prims_have_visibilities, "RL"),
"normals": (prims_have_normals, "3RD"),
}
def read_prim_traits(
bs: ByteStream, types: Sequence[str], prim_flags: int, count: int
) -> Dict:
def read_float_list():
return [bs.read_long() for _ in range(count)]
def read_vertices():
return [Vec3(bs.read_vertex()) for _ in range(count)]
data = dict()
for t in types:
test_trait, data_type = TRAIT_TESTER[t]
if test_trait(prim_flags):
if data_type == "3RD":
data[t] = read_vertices()
elif data_type == "RL":
data[t] = read_float_list()
else:
raise TypeError(data_type)
return data
def read_mesh_traits(
bs: ByteStream, edge_count: int, face_count: int, vertex_count: int
):
# Traits data format:
# all entries are optional
# traits: Dict[str, Dict]
# "edges": Dict[str, List]
# "colors": List[int]
# "layers": List[int] as layer ids
# "linetypes": List[int] as linetype ids
# "markers": List[int]
# "visibilities": List[int]
# "faces": Dict[str, List]
# "colors": List[int]
# "layers": List[int] as layer ids
# "markers": List[int]
# "normals": List[Vec3]
# "visibilities": List[int]
# "vertices": Dict
# "normals": List[Vec3]
# "orientation": bool
traits = dict()
edge_flags = bs.read_long()
if has_prim_traits(edge_flags):
traits["edges"] = read_prim_traits(
bs,
["colors", "layers", "linetypes", "markers", "visibilities"],
edge_flags,
edge_count,
)
face_flags = bs.read_long()
if has_prim_traits(face_flags):
traits["faces"] = read_prim_traits(
bs,
["colors", "layers", "markers", "normals", "visibilities"],
face_flags,
face_count,
)
# Note: DXF entities PolyFaceMesh and Mesh do not support vertex normals!
# disable useless reading process by vertex_count = 0
if vertex_count > 0:
vertex_flags = bs.read_long()
if has_prim_traits(vertex_flags):
vertices = dict()
if prims_have_normals(vertex_flags):
vertices["normals"] = [
Vec3(bs.read_vertex()) for _ in range(vertex_count)
]
if prims_have_orientation(vertex_flags):
vertices["orientation"] = bool(bs.read_long()) # type: ignore
traits["vertices"] = vertices
return traits
class ProxyGraphicTypes(IntEnum):
EXTENTS = 1
CIRCLE = 2
CIRCLE_3P = 3
CIRCULAR_ARC = 4
CIRCULAR_ARC_3P = 5
POLYLINE = 6
POLYGON = 7
MESH = 8
SHELL = 9
TEXT = 10
TEXT2 = 11
XLINE = 12
RAY = 13
ATTRIBUTE_COLOR = 14
UNUSED_15 = 15
ATTRIBUTE_LAYER = 16
UNUSED_17 = 17
ATTRIBUTE_LINETYPE = 18
ATTRIBUTE_MARKER = 19
ATTRIBUTE_FILL = 20
UNUSED_21 = 21
ATTRIBUTE_TRUE_COLOR = 22
ATTRIBUTE_LINEWEIGHT = 23
ATTRIBUTE_LTSCALE = 24
ATTRIBUTE_THICKNESS = 25
ATTRIBUTE_PLOT_STYLE_NAME = 26
PUSH_CLIP = 27
POP_CLIP = 28
PUSH_MATRIX = 29
PUSH_MATRIX2 = 30
POP_MATRIX = 31
POLYLINE_WITH_NORMALS = 32
LWPOLYLINE = 33
ATTRIBUTE_MATERIAL = 34
ATTRIBUTE_MAPPER = 35
UNICODE_TEXT = 36
UNKNOWN_37 = 37
UNICODE_TEXT2 = 38
class ProxyGraphic:
def __init__(self, data: bytes, doc: "Drawing" = None):
self._doc = doc
self._factory = factory.new
self._buffer: bytes = data
self._index: int = 8
self.dxfversion = doc.dxfversion if doc else "AC1015"
self.color: int = const.BYLAYER
self.layer: str = "0"
self.linetype: str = "BYLAYER"
self.marker_index: int = 0
self.fill: bool = False
self.true_color: Optional[int] = None
self.lineweight: int = const.LINEWEIGHT_DEFAULT
self.ltscale: float = 1.0
self.thickness: float = 0.0
# Layer list in storage order
self.layers: List[str] = []
# Linetypes list in storage order
self.linetypes: List[str] = []
# List of text styles, with font name as key
self.textstyles: Dict[str, str] = dict()
self.required_fonts: Set[str] = set()
self.matrices: List[Matrix44] = []
if self._doc:
self.layers = list(layer.dxf.name for layer in self._doc.layers)
self.linetypes = list(
linetype.dxf.name for linetype in self._doc.linetypes
)
self.textstyles = {
style.dxf.font: style.dxf.name for style in self._doc.styles
}
def info(self) -> Iterable[Tuple[int, int, str]]:
index = self._index
buffer = self._buffer
while index < len(buffer):
size, type_ = struct.unpack_from("<2L", self._buffer, offset=index)
try:
name = ProxyGraphicTypes(type_).name
except ValueError:
name = f"UNKNOWN_TYPE_{type_}"
yield index, size, name
index += size
def virtual_entities(self) -> Iterable["DXFGraphic"]:
return self.__virtual_entities__()
def __virtual_entities__(self) -> Iterable["DXFGraphic"]:
"""Implements the SupportsVirtualEntities protocol."""
try:
yield from self.unsafe_virtual_entities()
except Exception as e:
raise ProxyGraphicError(f"Proxy graphic error: {str(e)}")
def unsafe_virtual_entities(self) -> Iterable["DXFGraphic"]:
def transform(entity):
if self.matrices:
return entity.transform(self.matrices[-1])
else:
return entity
index = self._index
buffer = self._buffer
while index < len(buffer):
size, type_ = struct.unpack_from("<2L", self._buffer, offset=index)
try:
name = ProxyGraphicTypes(type_).name.lower()
except ValueError:
logger.debug(f"Unsupported Type Code: {type_}")
index += size
continue
method = getattr(self, name, None)
if method:
result = method(self._buffer[index + 8 : index + size])
if isinstance(result, tuple):
for entity in result:
yield transform(entity)
elif result:
yield transform(result)
if result: # reset fill after each graphic entity
self.fill = False
else:
logger.debug(f"Unsupported feature ProxyGraphic.{name}()")
index += size
def push_matrix(self, data: bytes):
values = struct.unpack("<16d", data)
m = Matrix44(values)
m.transpose()
self.matrices.append(m)
def pop_matrix(self, data: bytes):
if self.matrices:
self.matrices.pop()
def reset_colors(self):
self.color = const.BYLAYER
self.true_color = None
def attribute_color(self, data: bytes):
self.reset_colors()
self.color = struct.unpack("<L", data)[0]
if self.color < 0 or self.color > 256:
self.color = const.BYLAYER
def attribute_layer(self, data: bytes):
if self._doc:
index = struct.unpack("<L", data)[0]
if index < len(self.layers):
self.layer = self.layers[index]
def attribute_linetype(self, data: bytes):
if self._doc:
index = struct.unpack("<L", data)[0]
if index < len(self.linetypes):
self.linetype = self.linetypes[index]
def attribute_marker(self, data: bytes):
self.marker_index = struct.unpack("<L", data)[0]
def attribute_fill(self, data: bytes):
self.fill = bool(struct.unpack("<L", data)[0])
def attribute_true_color(self, data: bytes):
self.reset_colors()
code, value = colors.decode_raw_color(struct.unpack("<L", data)[0])
if code == colors.COLOR_TYPE_RGB:
self.true_color = colors.rgb2int(value) # type: ignore
else: # ACI colors, BYLAYER, BYBLOCK
self.color = value # type: ignore
def attribute_lineweight(self, data: bytes):
lw = struct.unpack("<L", data)[0]
if lw > const.MAX_VALID_LINEWEIGHT:
self.lineweight = max(lw - 0x100000000, const.LINEWEIGHT_DEFAULT)
else:
self.lineweight = lw
def attribute_ltscale(self, data: bytes):
self.ltscale = struct.unpack("<d", data)[0]
def attribute_thickness(self, data: bytes):
self.thickness = struct.unpack("<d", data)[0]
def circle(self, data: bytes):
bs = ByteStream(data)
attribs = self._build_dxf_attribs()
attribs["center"] = Vec3(bs.read_vertex())
attribs["radius"] = bs.read_float()
attribs["extrusion"] = bs.read_vertex()
return self._factory("CIRCLE", dxfattribs=attribs)
def circle_3p(self, data: bytes):
bs = ByteStream(data)
attribs = self._build_dxf_attribs()
p1 = Vec3(bs.read_vertex())
p2 = Vec3(bs.read_vertex())
p3 = Vec3(bs.read_vertex())
circle = ConstructionCircle.from_3p(p1, p2, p3)
attribs["center"] = circle.center
attribs["radius"] = circle.radius
return self._factory("CIRCLE", dxfattribs=attribs)
def circular_arc(self, data: bytes):
bs = ByteStream(data)
attribs = self._build_dxf_attribs()
center = Vec3(bs.read_vertex()) # in WCS
attribs["radius"] = bs.read_float()
normal = Vec3(bs.read_vertex()) # UCS z-axis
start_vec = Vec3(bs.read_vertex()) # UCS x-axis
# sweep angle around normal vector!
sweep_angle = bs.read_float() # in radians
# arc_type = bs.read_long() # unused yet - meaning?
start_angle: float # in degrees
end_angle: float # in degrees
if not normal.isclose(Z_AXIS):
# local UCS
ucs = UCS(ux=start_vec, uz=normal)
# target OCS
ocs = OCS(normal)
# convert start angle == UCS x-axis to OCS
start_angle = ocs.from_wcs(ucs.to_wcs(X_AXIS)).angle_deg # type: ignore
# convert end angle to OCS
end_vec = Vec3.from_angle(sweep_angle)
end_angle = ocs.from_wcs(ucs.to_wcs(end_vec)).angle_deg # type: ignore
# setup OCS for ARC entity
attribs["extrusion"] = normal
# convert WCS center to OCS center
center = ocs.from_wcs(center)
else:
start_angle = start_vec.angle_deg
end_angle = start_angle + math.degrees(sweep_angle)
attribs["center"] = center
attribs["start_angle"] = start_angle
attribs["end_angle"] = end_angle
return self._factory("ARC", dxfattribs=attribs)
def circular_arc_3p(self, data: bytes):
bs = ByteStream(data)
attribs = self._build_dxf_attribs()
p1 = Vec3(bs.read_vertex())
p2 = Vec3(bs.read_vertex())
p3 = Vec3(bs.read_vertex())
# arc_type = bs.read_long() # unused yet
arc = ConstructionArc.from_3p(p1, p3, p2)
attribs["center"] = arc.center
attribs["radius"] = arc.radius
attribs["start_angle"] = arc.start_angle
attribs["end_angle"] = arc.end_angle
return self._factory("ARC", dxfattribs=attribs)
def _filled_polygon(self, vertices, attribs):
hatch = cast("Hatch", self._factory("HATCH", dxfattribs=attribs))
hatch.paths.add_polyline_path(vertices, is_closed=True)
return hatch
def _polyline(self, vertices, *, close=False, normal=Z_AXIS):
# Polyline without bulge values!
# Current implementation ignores the normal vector!
attribs = self._build_dxf_attribs()
count = len(vertices)
if count == 1 or (count == 2 and vertices[0].isclose(vertices[1])):
attribs["location"] = vertices[0]
return self._factory("POINT", dxfattribs=attribs)
if self.fill and count > 2:
polyline = self._filled_polygon(vertices, attribs)
else:
attribs["flags"] = const.POLYLINE_3D_POLYLINE
polyline = cast(
"Polyline", self._factory("POLYLINE", dxfattribs=attribs)
)
polyline.append_vertices(vertices)
if close:
polyline.close()
return polyline
def polyline_with_normals(self, data: bytes):
# Polyline without bulge values!
vertices, normal = self._load_vertices(data, load_normal=True)
return self._polyline(vertices, normal=normal)
def polyline(self, data: bytes):
# Polyline without bulge values!
vertices, normal = self._load_vertices(data, load_normal=False)
return self._polyline(vertices)
def polygon(self, data: bytes):
# Polyline without bulge values!
vertices, normal = self._load_vertices(data, load_normal=False)
return self._polyline(vertices, close=True)
def lwpolyline(self, data: bytes):
# OpenDesign Specs LWPLINE: 20.4.85 Page 211
# TODO: MLEADER exploration example "explore_mleader_block.dxf" has
# LWPOLYLINE proxy graphic and raises an exception!
bs = BitStream(data)
flag: int = bs.read_bit_short()
attribs = self._build_dxf_attribs()
if flag & 4:
attribs["const_width"] = bs.read_bit_double()
if flag & 8:
attribs["elevation"] = bs.read_bit_double()
if flag & 2:
attribs["thickness"] = bs.read_bit_double()
if flag & 1:
attribs["extrusion"] = Vec3(bs.read_bit_double(3))
num_points = bs.read_bit_long()
if flag & 16:
num_bulges = bs.read_bit_long()
else:
num_bulges = 0
if self.dxfversion >= "AC1024": # R2010+
vertex_id_count = bs.read_bit_long()
else:
vertex_id_count = 0
if flag & 32:
num_width = bs.read_bit_long()
else:
num_width = 0
# ignore DXF R13/14 special vertex order
vertices: List[Tuple[float, float]] = [bs.read_raw_double(2)] # type: ignore
prev_point = vertices[-1]
for _ in range(num_points - 1):
x = bs.read_bit_double_default(default=prev_point[0]) # type: ignore
y = bs.read_bit_double_default(default=prev_point[1]) # type: ignore
prev_point = (x, y)
vertices.append(prev_point)
bulges: List[float] = [bs.read_bit_double() for _ in range(num_bulges)]
vertex_ids: List[int] = [
bs.read_bit_long() for _ in range(vertex_id_count)
]
widths: List[Tuple[float, float]] = [
(bs.read_bit_double(), bs.read_bit_double())
for _ in range(num_width)
]
if len(bulges) == 0:
bulges = list(repeat(0, num_points))
if len(widths) == 0:
widths = list(repeat((0, 0), num_points))
points: List[Sequence[float]] = []
for v, w, b in zip(vertices, widths, bulges):
points.append((v[0], v[1], w[0], w[1], b))
lwpolyline = cast(
"LWPolyline", self._factory("LWPOLYLINE", dxfattribs=attribs)
)
lwpolyline.set_points(points)
return lwpolyline
def mesh(self, data: bytes):
# Limitations of the PolyFacMesh entity:
# - all VERTEX entities have to reside on the same layer
# - does not support vertex normals
# - all faces have the same color (no face record)
logger.warning("Untested proxy graphic entity: MESH - Need examples!")
bs = ByteStream(data)
rows, columns = bs.read_struct("<2L")
total_edge_count = (rows - 1) * columns + (columns - 1) * rows
total_face_count = (rows - 1) * (columns - 1)
total_vertex_count = rows * columns
vertices = [Vec3(bs.read_vertex()) for _ in range(total_vertex_count)]
traits = dict()
try:
traits = read_mesh_traits(
bs, total_edge_count, total_face_count, vertex_count=0
)
except struct.error:
logger.error(
"Structure error while parsing traits for MESH proxy graphic"
)
if traits:
# apply traits
pass
# create PolyMesh entity
attribs = self._build_dxf_attribs()
attribs["m_count"] = rows
attribs["n_count"] = columns
attribs["flags"] = const.POLYLINE_3D_POLYMESH
polymesh = cast(
"Polymesh", self._factory("POLYLINE", dxfattribs=attribs)
)
polymesh.append_vertices(vertices)
return polymesh
def shell(self, data: bytes):
# Limitations of the PolyFacMesh entity:
# - all VERTEX entities have to reside on the same layer
# - does not support vertex normals
bs = ByteStream(data)
attribs = self._build_dxf_attribs()
attribs["flags"] = const.POLYLINE_POLYFACE
polyface = cast(
"Polyface", self._factory("POLYLINE", dxfattribs=attribs)
)
total_vertex_count = bs.read_long()
vertices = [Vec3(bs.read_vertex()) for _ in range(total_vertex_count)]
face_entry_count = bs.read_long()
faces = []
read_count: int = 0
total_face_count: int = 0
total_edge_count: int = 0
while read_count < face_entry_count:
edge_count = abs(bs.read_signed_long())
read_count += 1 + edge_count
face_indices = [bs.read_long() for _ in range(edge_count)]
face = [vertices[index] for index in face_indices]
total_face_count += 1
total_edge_count += edge_count
faces.append(face)
traits = dict()
try:
traits = read_mesh_traits(
bs, total_edge_count, total_face_count, vertex_count=0
)
except struct.error:
logger.error(
"Structure error while parsing traits for SHELL proxy graphic"
)
polyface.append_faces(faces)
if traits:
face_traits = traits.get("faces")
if face_traits:
face_colors = face_traits.get("colors")
if face_colors:
logger.warning(
"Untested proxy graphic feature for SHELL: "
"apply face colors - Need examples!"
)
assert isinstance(face_colors, list)
_apply_face_colors(polyface, face_colors)
polyface.optimize()
return polyface
def text(self, data: bytes):
return self._text(data, unicode=False)
def unicode_text(self, data: bytes):
return self._text(data, unicode=True)
def _text(self, data: bytes, unicode: bool = False):
bs = ByteStream(data)
start_point = Vec3(bs.read_vertex())
normal = Vec3(bs.read_vertex())
text_direction = Vec3(bs.read_vertex())
height, width_factor, oblique_angle = bs.read_struct("<3d")
if unicode:
text = bs.read_padded_unicode_string()
else:
text = bs.read_padded_string()
attribs = self._build_dxf_attribs()
attribs["insert"] = start_point
attribs["text"] = text
attribs["height"] = height
attribs["width"] = width_factor
attribs["rotation"] = text_direction.angle_deg
attribs["oblique"] = math.degrees(oblique_angle)
attribs["extrusion"] = normal
return self._factory("TEXT", dxfattribs=attribs)
def text2(self, data: bytes):
bs = ByteStream(data)
start_point = Vec3(bs.read_vertex())
normal = Vec3(bs.read_vertex())
text_direction = Vec3(bs.read_vertex())
text = bs.read_padded_string()
ignore_length_of_string, raw = bs.read_struct("<2l")
(
height,
width_factor,
oblique_angle,
tracking_percentage,
) = bs.read_struct("<4d")
(
is_backwards,
is_upside_down,
is_vertical,
is_underline,
is_overline,
) = bs.read_struct("<5L")
font_filename = bs.read_padded_string()
big_font_filename = bs.read_padded_string()
attribs = self._build_dxf_attribs()
attribs["insert"] = start_point
attribs["text"] = text
attribs["height"] = height
attribs["width"] = width_factor
attribs["rotation"] = text_direction.angle_deg
attribs["oblique"] = math.degrees(oblique_angle)
attribs["style"] = self._get_style(font_filename, big_font_filename)
attribs["text_generation_flag"] = 2 * is_backwards + 4 * is_upside_down
attribs["extrusion"] = normal
return self._factory("TEXT", dxfattribs=attribs)
def unicode_text2(self, data: bytes):
bs = ByteStream(data)
start_point = Vec3(bs.read_vertex())
normal = Vec3(bs.read_vertex())
text_direction = Vec3(bs.read_vertex())
text = bs.read_padded_unicode_string()
ignore_length_of_string, ignore_raw = bs.read_struct("<2l")
(
height,
width_factor,
oblique_angle,
tracking_percentage,
) = bs.read_struct("<4d")
(
is_backwards,
is_upside_down,
is_vertical,
is_underline,
is_overline,
) = bs.read_struct("<5L")
is_bold, is_italic, charset, pitch = bs.read_struct("<4L")
type_face = bs.read_padded_unicode_string()
font_filename = bs.read_padded_unicode_string()
big_font_filename = bs.read_padded_unicode_string()
attribs = self._build_dxf_attribs()
attribs["insert"] = start_point
attribs["text"] = text
attribs["height"] = height
attribs["width"] = width_factor
attribs["rotation"] = text_direction.angle_deg
attribs["oblique"] = math.degrees(oblique_angle)
attribs["style"] = self._get_style(font_filename, big_font_filename)
attribs["text_generation_flag"] = 2 * is_backwards + 4 * is_upside_down
attribs["extrusion"] = normal
return self._factory("TEXT", dxfattribs=attribs)
def xline(self, data: bytes):
return self._xline(data, "XLINE")
def ray(self, data: bytes):
return self._xline(data, "RAY")
def _xline(self, data: bytes, type_: str):
logger.warning(
"Untested proxy graphic entity: RAY/XLINE - Need examples!"
)
bs = ByteStream(data)
attribs = self._build_dxf_attribs()
start_point = Vec3(bs.read_vertex())
other_point = Vec3(bs.read_vertex())
attribs["start"] = start_point
attribs["unit_vector"] = (other_point - start_point).normalize()
return self._factory(type_, dxfattribs=attribs)
def _get_style(self, font: str, bigfont: str) -> str:
self.required_fonts.add(font)
if font in self.textstyles:
style = self.textstyles[font]
else:
style = font
if self._doc and not self._doc.styles.has_entry(style):
self._doc.styles.new(
font, dxfattribs={"font": font, "bigfont": bigfont}
)
self.textstyles[font] = style
return style
def _load_vertices(self, data: bytes, load_normal=False):
normal = Z_AXIS
bs = ByteStream(data)
count = bs.read_long()
if load_normal:
count += 1
vertices = []
while count > 0:
vertices.append(Vec3(bs.read_struct("<3d")))
count -= 1
if load_normal:
normal = vertices.pop()
return vertices, normal
def _build_dxf_attribs(self) -> Dict:
attribs: Dict[str, Any] = dict()
if self.layer != "0":
attribs["layer"] = self.layer
if self.color != const.BYLAYER:
attribs["color"] = self.color
if self.linetype != "BYLAYER":
attribs["linetype"] = self.linetype
if self.lineweight != const.LINEWEIGHT_DEFAULT:
attribs["lineweight"] = self.lineweight
if self.ltscale != 1.0:
attribs["ltscale"] = self.ltscale
if self.true_color is not None:
attribs["true_color"] = self.true_color
return attribs
class ProxyGraphicDebugger(ProxyGraphic):
def __init__(self, data: bytes, doc: "Drawing" = None, debug_stream=None):
super(ProxyGraphicDebugger, self).__init__(data, doc)
if debug_stream is None:
debug_stream = sys.stdout
self._debug_stream = debug_stream
def log_entities(self):
self.log_separator(char="=", newline=False)
self.log_message("Create virtual DXF entities:")
self.log_separator(newline=False)
for entity in self.virtual_entities():
self.log_message(f"\n * {entity.dxftype()}")
self.log_message(f" * {entity.graphic_properties()}\n")
self.log_separator(char="=")
def log_commands(self):
self.log_separator(char="=", newline=False)
self.log_message("Raw proxy commands:")
self.log_separator(newline=False)
for index, size, cmd in self.info():
self.log_message(f"Command: {cmd} Index: {index} Size: {size}")
self.log_separator(char="=")
def log_separator(self, char="-", newline=True):
self.log_message(char * 79)
if newline:
self.log_message("")
def log_message(self, msg: str):
print(msg, file=self._debug_stream)
def log_state(self):
self.log_message("> " + self.get_state())
def get_state(self) -> str:
return (
f"ly: '{self.layer}', clr: {self.color}, lt: {self.linetype}, "
f"lw: {self.lineweight}, ltscale: {self.ltscale}, "
f"rgb: {self.true_color}, fill: {self.fill}"
)
def attribute_color(self, data: bytes):
self.log_message("Command: set COLOR")
super().attribute_color(data)
self.log_state()
def attribute_layer(self, data: bytes):
self.log_message("Command: set LAYER")
super().attribute_layer(data)
self.log_state()
def attribute_linetype(self, data: bytes):
self.log_message("Command: set LINETYPE")
super().attribute_linetype(data)
self.log_state()
def attribute_true_color(self, data: bytes):
self.log_message("Command: set TRUE-COLOR")
super().attribute_true_color(data)
self.log_state()
def attribute_lineweight(self, data: bytes):
self.log_message("Command: set LINEWEIGHT")
super().attribute_lineweight(data)
self.log_state()
def attribute_ltscale(self, data: bytes):
self.log_message("Command: set LTSCALE")
super().attribute_ltscale(data)
self.log_state()
def attribute_fill(self, data: bytes):
self.log_message("Command: set FILL")
super().attribute_fill(data)
self.log_state()
def _apply_face_colors(polyface: "Polyface", colors: List[int]) -> None:
color_count: int = len(colors)
if color_count == 0:
return
index: int = 0
for vertex in polyface.vertices:
if vertex.is_face_record:
vertex.dxf.color = colors[index]
index += 1
if index >= color_count:
return
|
mozman/ezdxf
|
src/ezdxf/proxygraphic.py
|
Python
|
mit
| 30,574
| 0.000229
|
import logging
import os
# URL to clone product_details JSON files from.
# Include trailing slash.
PROD_DETAILS_URL = 'http://svn.mozilla.org/libs/product-details/json/'
# Target dir to drop JSON files into (must be writable)
PROD_DETAILS_DIR = os.path.join(os.path.dirname(__file__), 'json')
# log level.
LOG_LEVEL = logging.INFO
|
pmclanahan/django-mozilla-product-details
|
product_details/settings_defaults.py
|
Python
|
bsd-3-clause
| 335
| 0
|
from collections import Counter
def answer(q,inf):
s = Counter(q.split(' ')); r = [-1,-1]
for i,j in enumerate(inf):
check = sum(s.get(w,0) for w in j.split(' '))
if check != 0 and check > r[1]: r = [i,check]
return None if r == [-1,-1] else inf[r[0]]
|
Orange9000/Codewars
|
Solutions/beta/beta_answer_the_students_questions.py
|
Python
|
mit
| 293
| 0.040956
|
# Standard Library Imports
from datetime import datetime
# 3rd Party Imports
# Local Imports
from PokeAlarm import Unknown
from . import BaseEvent
from PokeAlarm.Utils import get_gmaps_link, get_applemaps_link, \
get_waze_link, get_dist_as_str, get_base_types, get_type_emoji
from PokeAlarm.Utilities.QuestUtils import reward_string, get_item_id, \
get_quest_image
class QuestEvent(BaseEvent):
""" Event representing the discovery of a Quest. """
def __init__(self, data):
""" Creates a new Quest Event based on the given dict. """
super(QuestEvent, self).__init__('quests')
check_for_none = BaseEvent.check_for_none
# Identification
self.stop_id = data['pokestop_id']
self.stop_name = check_for_none(
str, data.get('pokestop_name', data.get('name')), Unknown.REGULAR)
self.stop_image = check_for_none(
str, data.get('pokestop_url', data.get('url')), Unknown.REGULAR)
# Location
self.lat = float(data['latitude'])
self.lng = float(data['longitude'])
# Completed by Manager
self.distance = Unknown.SMALL
self.direction = Unknown.TINY
# Used to reject
self.name = self.stop_id
self.geofence = Unknown.REGULAR
self.custom_dts = {}
# Quest Details
self.quest_type_raw = data['quest_type']
self.quest_type_id = data.get('quest_type_raw')
self.quest_target = data.get('quest_target')
self.quest_task_raw = data.get('quest_task')
self.quest_condition_raw = data.get('quest_condition')
self.quest_template = data.get('quest_template')
self.last_modified = datetime.utcfromtimestamp(data['timestamp'])
# Reward Details
self.reward_type_id = data['quest_reward_type_raw']
self.reward_type_raw = data.get('quest_reward_type')
self.reward_amount = data.get('item_amount', 1)
# Monster Reward Details
self.monster_id = data.get('pokemon_id', 0)
self.monster_form_id = data.get('pokemon_form', 0)
self.monster_costume_id = data.get('pokemon_costume', 0)
self.monster_types = get_base_types(self.monster_id) \
if self.monster_id != 0 else [0, 0]
# Item Reward Details
self.item_amount = self.reward_amount
self.item_type = data.get('item_type')
self.item_id = data.get('item_id', 0)
def generate_dts(self, locale, timezone, units):
""" Return a dict with all the DTS for this event. """
form_name = locale.get_form_name(self.monster_id, self.monster_form_id)
costume_name = locale.get_costume_name(
self.monster_id, self.monster_costume_id)
type1 = locale.get_type_name(self.monster_types[0])
type2 = locale.get_type_name(self.monster_types[1])
dts = self.custom_dts.copy()
dts.update({
# Identification
'stop_id': self.stop_id,
'stop_name': self.stop_name,
'stop_image': self.stop_image,
# Location
'lat': self.lat,
'lng': self.lng,
'lat_5': "{:.5f}".format(self.lat),
'lng_5': "{:.5f}".format(self.lng),
'distance': (
get_dist_as_str(self.distance, units)
if Unknown.is_not(self.distance) else Unknown.SMALL),
'direction': self.direction,
'gmaps': get_gmaps_link(self.lat, self.lng),
'applemaps': get_applemaps_link(self.lat, self.lng),
'waze': get_waze_link(self.lat, self.lng),
'geofence': self.geofence,
# Quest Details
# ToDo: Interpret the `quest_condition` field and use that instead
# of `quest_type`
# Will be able to better serve manager specific locales
# also do this for `quest_task`
'quest_type': self.quest_type_raw,
'quest_type_id': self.quest_type_id,
'quest_target': self.quest_target,
'quest_task': self.quest_task_raw,
'quest_template': self.quest_template,
'last_modified': self.last_modified,
'quest_condition': self.quest_condition_raw,
'quest_image': get_quest_image(self),
# Reward Details
'reward_type_id': self.reward_type_id,
'reward_type': locale.get_quest_type_name(self.reward_type_id),
'reward_type_raw': self.reward_type_raw,
'reward_amount': self.item_amount,
'reward': reward_string(self, locale),
# Monster Reward Details
'mon_name': locale.get_pokemon_name(self.monster_id),
'mon_id': self.monster_id,
'mon_id_3': "{:03}".format(self.monster_id),
'form': form_name,
'form_or_empty': Unknown.or_empty(form_name),
'form_id': self.monster_form_id,
'form_id_2': "{:02d}".format(self.monster_form_id),
'form_id_3': "{:03d}".format(self.monster_form_id),
'costume': costume_name,
'costume_or_empty': Unknown.or_empty(costume_name),
'costume_id': self.monster_costume_id,
'costume_id_2': "{:02d}".format(self.monster_costume_id),
'costume_id_3': "{:03d}".format(self.monster_costume_id),
'type1': type1,
'type1_or_empty': Unknown.or_empty(type1),
'type1_emoji': Unknown.or_empty(get_type_emoji(
self.monster_types[0])),
'type2': type2,
'type2_or_empty': Unknown.or_empty(type2),
'type2_emoji': Unknown.or_empty(get_type_emoji(
self.monster_types[1])),
'types': (
"{}/{}".format(type1, type2)
if Unknown.is_not(type2) else type1),
'types_emoji': (
"{}{}".format(
get_type_emoji(self.monster_types[0]),
get_type_emoji(self.monster_types[1]))
if Unknown.is_not(type2)
else get_type_emoji(self.monster_types[0])),
# Item Reward Details
'raw_item_type': self.item_type,
'item': get_item_id(self.item_id),
'item_id': self.item_id,
'item_id_4': "{:04d}".format(self.item_id)
})
return dts
|
kvangent/PokeAlarm
|
PokeAlarm/Events/QuestEvent.py
|
Python
|
agpl-3.0
| 6,377
| 0
|
from llvmlite import binding as ll
from llvmlite import ir
from warnings import warn
from numba.core import config, serialize
from numba.core.codegen import Codegen, CodeLibrary
from numba.core.errors import NumbaInvalidConfigWarning
from .cudadrv import devices, driver, nvvm
import ctypes
import numpy as np
import os
import subprocess
import tempfile
CUDA_TRIPLE = 'nvptx64-nvidia-cuda'
def disassemble_cubin(cubin):
# nvdisasm only accepts input from a file, so we need to write out to a
# temp file and clean up afterwards.
fd = None
fname = None
try:
fd, fname = tempfile.mkstemp()
with open(fname, 'wb') as f:
f.write(cubin)
try:
cp = subprocess.run(['nvdisasm', fname], check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except FileNotFoundError as e:
if e.filename == 'nvdisasm':
msg = ("nvdisasm is required for SASS inspection, and has not "
"been found.\n\nYou may need to install the CUDA "
"toolkit and ensure that it is available on your "
"PATH.\n")
raise RuntimeError(msg)
return cp.stdout.decode('utf-8')
finally:
if fd is not None:
os.close(fd)
if fname is not None:
os.unlink(fname)
class CUDACodeLibrary(serialize.ReduceMixin, CodeLibrary):
"""
The CUDACodeLibrary generates PTX, SASS, cubins for multiple different
compute capabilities. It also loads cubins to multiple devices (via
get_cufunc), which may be of different compute capabilities.
"""
def __init__(self, codegen, name, entry_name=None, max_registers=None,
nvvm_options=None):
"""
codegen:
Codegen object.
name:
Name of the function in the source.
entry_name:
Name of the kernel function in the binary, if this is a global
kernel and not a device function.
max_registers:
The maximum register usage to aim for when linking.
nvvm_options:
Dict of options to pass to NVVM.
"""
super().__init__(codegen, name)
# The llvmlite module for this library.
self._module = None
# CodeLibrary objects that will be "linked" into this library. The
# modules within them are compiled from NVVM IR to PTX along with the
# IR from this module - in that sense they are "linked" by NVVM at PTX
# generation time, rather than at link time.
self._linking_libraries = set()
# Files to link with the generated PTX. These are linked using the
# Driver API at link time.
self._linking_files = set()
# Maps CC -> PTX string
self._ptx_cache = {}
# Maps CC -> cubin
self._cubin_cache = {}
# Maps CC -> linker info output for cubin
self._linkerinfo_cache = {}
# Maps Device numeric ID -> cufunc
self._cufunc_cache = {}
self._max_registers = max_registers
if nvvm_options is None:
nvvm_options = {}
self._nvvm_options = nvvm_options
self._entry_name = entry_name
def get_llvm_str(self):
return str(self._module)
def get_asm_str(self, cc=None):
return self._join_ptxes(self._get_ptxes(cc=cc))
def _get_ptxes(self, cc=None):
if not cc:
ctx = devices.get_context()
device = ctx.device
cc = device.compute_capability
ptxes = self._ptx_cache.get(cc, None)
if ptxes:
return ptxes
arch = nvvm.get_arch_option(*cc)
options = self._nvvm_options.copy()
options['arch'] = arch
if not nvvm.NVVM().is_nvvm70:
# Avoid enabling debug for NVVM 3.4 as it has various issues. We
# need to warn the user that we're doing this if any of the
# functions that they're compiling have `debug=True` set, which we
# can determine by checking the NVVM options.
for lib in self.linking_libraries:
if lib._nvvm_options.get('debug'):
msg = ("debuginfo is not generated for CUDA versions "
f"< 11.2 (debug=True on function: {lib.name})")
warn(NumbaInvalidConfigWarning(msg))
options['debug'] = False
irs = [str(mod) for mod in self.modules]
if options.get('debug', False):
# If we're compiling with debug, we need to compile modules with
# NVVM one at a time, because it does not support multiple modules
# with debug enabled:
# https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#source-level-debugging-support
ptxes = [nvvm.llvm_to_ptx(ir, **options) for ir in irs]
else:
# Otherwise, we compile all modules with NVVM at once because this
# results in better optimization than separate compilation.
ptxes = [nvvm.llvm_to_ptx(irs, **options)]
# Sometimes the result from NVVM contains trailing whitespace and
# nulls, which we strip so that the assembly dump looks a little
# tidier.
ptxes = [x.decode().strip('\x00').strip() for x in ptxes]
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" % self._name).center(80, '-'))
print(self._join_ptxes(ptxes))
print('=' * 80)
self._ptx_cache[cc] = ptxes
return ptxes
def _join_ptxes(self, ptxes):
return "\n\n".join(ptxes)
def get_cubin(self, cc=None):
if cc is None:
ctx = devices.get_context()
device = ctx.device
cc = device.compute_capability
cubin = self._cubin_cache.get(cc, None)
if cubin:
return cubin
linker = driver.Linker(max_registers=self._max_registers, cc=cc)
ptxes = self._get_ptxes(cc=cc)
for ptx in ptxes:
linker.add_ptx(ptx.encode())
for path in self._linking_files:
linker.add_file_guess_ext(path)
cubin_buf, size = linker.complete()
# We take a copy of the cubin because it's owned by the linker
cubin_ptr = ctypes.cast(cubin_buf, ctypes.POINTER(ctypes.c_char))
cubin = bytes(np.ctypeslib.as_array(cubin_ptr, shape=(size,)))
self._cubin_cache[cc] = cubin
self._linkerinfo_cache[cc] = linker.info_log
return cubin
def get_cufunc(self):
if self._entry_name is None:
msg = "Missing entry_name - are you trying to get the cufunc " \
"for a device function?"
raise RuntimeError(msg)
ctx = devices.get_context()
device = ctx.device
cufunc = self._cufunc_cache.get(device.id, None)
if cufunc:
return cufunc
cubin = self.get_cubin(cc=device.compute_capability)
module = ctx.create_module_image(cubin)
# Load
cufunc = module.get_function(self._entry_name)
# Populate caches
self._cufunc_cache[device.id] = cufunc
return cufunc
def get_linkerinfo(self, cc):
try:
return self._linkerinfo_cache[cc]
except KeyError:
raise KeyError(f'No linkerinfo for CC {cc}')
def get_sass(self, cc=None):
return disassemble_cubin(self.get_cubin(cc=cc))
def add_ir_module(self, mod):
self._raise_if_finalized()
if self._module is not None:
raise RuntimeError('CUDACodeLibrary only supports one module')
self._module = mod
def add_linking_library(self, library):
library._ensure_finalized()
# We don't want to allow linking more libraries in after finalization
# because our linked libraries are modified by the finalization, and we
# won't be able to finalize again after adding new ones
self._raise_if_finalized()
self._linking_libraries.add(library)
def add_linking_file(self, filepath):
self._linking_files.add(filepath)
def get_function(self, name):
for fn in self._module.functions:
if fn.name == name:
return fn
raise KeyError(f'Function {name} not found')
@property
def modules(self):
return [self._module] + [mod for lib in self._linking_libraries
for mod in lib.modules]
@property
def linking_libraries(self):
# Libraries we link to may link to other libraries, so we recursively
# traverse the linking libraries property to build up a list of all
# linked libraries.
libs = []
for lib in self._linking_libraries:
libs.extend(lib.linking_libraries)
libs.append(lib)
return libs
def finalize(self):
# Unlike the CPUCodeLibrary, we don't invoke the binding layer here -
# we only adjust the linkage of functions. Global kernels (with
# external linkage) have their linkage untouched. Device functions are
# set linkonce_odr to prevent them appearing in the PTX.
self._raise_if_finalized()
# Note in-place modification of the linkage of functions in linked
# libraries. This presently causes no issues as only device functions
# are shared across code libraries, so they would always need their
# linkage set to linkonce_odr. If in a future scenario some code
# libraries require linkonce_odr linkage of functions in linked
# modules, and another code library requires another linkage, each code
# library will need to take its own private copy of its linked modules.
#
# See also discussion on PR #890:
# https://github.com/numba/numba/pull/890
#
# We don't adjust the linkage of functions when compiling for debug -
# because the device functions are in separate modules, we need them to
# be externally visible.
for library in self._linking_libraries:
for mod in library.modules:
for fn in mod.functions:
if not fn.is_declaration:
if self._nvvm_options.get('debug', False):
fn.linkage = 'weak_odr'
else:
fn.linkage = 'linkonce_odr'
self._finalized = True
def _reduce_states(self):
"""
Reduce the instance for serialization. We retain the PTX and cubins,
but loaded functions are discarded. They are recreated when needed
after deserialization.
"""
if self._linking_files:
msg = ('cannot pickle CUDACodeLibrary function with additional '
'libraries to link against')
raise RuntimeError(msg)
return dict(
codegen=self._codegen,
name=self.name,
entry_name=self._entry_name,
module=self._module,
linking_libraries=self._linking_libraries,
ptx_cache=self._ptx_cache,
cubin_cache=self._cubin_cache,
linkerinfo_cache=self._linkerinfo_cache,
max_registers=self._max_registers,
nvvm_options=self._nvvm_options
)
@classmethod
def _rebuild(cls, codegen, name, entry_name, module, linking_libraries,
ptx_cache, cubin_cache, linkerinfo_cache, max_registers,
nvvm_options):
"""
Rebuild an instance.
"""
instance = cls.__new__(cls)
super(cls, instance).__init__(codegen, name)
instance._entry_name = entry_name
instance._module = module
instance._linking_libraries = linking_libraries
instance._linking_files = set()
instance._ptx_cache = ptx_cache
instance._cubin_cache = cubin_cache
instance._linkerinfo_cache = linkerinfo_cache
instance._cufunc_cache = {}
instance._max_registers = max_registers
instance._nvvm_options = nvvm_options
class JITCUDACodegen(Codegen):
"""
This codegen implementation for CUDA only generates optimized LLVM IR.
Generation of PTX code is done separately (see numba.cuda.compiler).
"""
_library_class = CUDACodeLibrary
def __init__(self, module_name):
self._data_layout = nvvm.default_data_layout
self._target_data = ll.create_target_data(self._data_layout)
def _create_empty_module(self, name):
ir_module = ir.Module(name)
ir_module.triple = CUDA_TRIPLE
if self._data_layout:
ir_module.data_layout = self._data_layout
nvvm.add_ir_version(ir_module)
return ir_module
def _add_module(self, module):
pass
|
cpcloud/numba
|
numba/cuda/codegen.py
|
Python
|
bsd-2-clause
| 12,947
| 0
|
# debuggin
from lxml import etree
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.srx.shared_ab_addr import SharedAddrBookAddr
from jnpr.junos.cfg.srx.shared_ab_set import SharedAddrBookSet
class SharedAddrBook(Resource):
"""
[edit security address-book <name>]
Resource <name>
The address book name, string
Manages:
addr - SharedAddrBookAddr resources
set - SharedAddrBookAddrSet resources
"""
PROPERTIES = [
'description',
'$addrs', # read-only addresss
'$sets', # read-only address-sets
'zone_list' # attached zone
]
def __init__(self, junos, name=None, **kvargs):
if name is None:
# resource-manager
Resource.__init__(self, junos, name, **kvargs)
return
self.addr = SharedAddrBookAddr(junos, parent=self)
self.set = SharedAddrBookSet(junos, parent=self)
self._manages = ['addr', 'set']
Resource.__init__(self, junos, name, **kvargs)
def _xml_at_top(self):
return E.security(
E('address-book', E.name(self._name))
)
# -----------------------------------------------------------------------
# XML reading
# -----------------------------------------------------------------------
def _xml_hook_read_begin(self, xml):
ab = xml.find('.//address-book')
ab.append(E('description'))
ab.append(E('address', JXML.NAMES_ONLY))
ab.append(E('address-set', JXML.NAMES_ONLY))
ab.append(E('attach'))
return True
def _xml_at_res(self, xml):
return xml.find('.//address-book')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
to_py['$addrs'] = [name.text for name in as_xml.xpath('address/name')]
to_py['$sets'] = [
name.text for name in as_xml.xpath('address-set/name')]
# -----------------------------------------------------------------------
# XML writing
# -----------------------------------------------------------------------
def _xml_change_zone_list(self, xml):
x_attach = E('attach')
self._xml_list_property_add_del_names(x_attach,
prop_name='zone_list',
element_name='zone')
xml.append(x_attach)
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
raise RuntimeError("Need to implement!")
def _r_catalog(self):
raise RuntimeError("Need to implement!")
|
dgjnpr/py-junos-eznc
|
lib/jnpr/junos/cfg/srx/shared_ab.py
|
Python
|
apache-2.0
| 2,925
| 0
|
from django.db.models import Aggregate, CharField
class Sql_GroupConcat(Aggregate):
function = 'GROUP_CONCAT'
allow_distinct = True
def __init__(self, expression, separator, distinct=False, ordering=None, **extra):
self.separator = separator
super(Sql_GroupConcat, self).__init__(expression,
distinct='DISTINCT ' if distinct else '',
ordering=' ORDER BY %s' % ordering if ordering is not None else '',
separator=' SEPARATOR "%s"' % separator,
output_field=CharField(),
**extra)
def as_mysql(self, compiler, connection):
return super().as_sql(compiler,
connection,
template='%(function)s(%(distinct)s%(expressions)s%(ordering)s%(separator)s)',
separator=' SEPARATOR \'%s\'' % self.separator)
def as_sql(self, compiler, connection, **extra):
return super().as_sql(compiler,
connection,
template='%(function)s(%(distinct)s%(expressions)s%(ordering)s)',
**extra)
|
rackerlabs/django-DefectDojo
|
dojo/components/sql_group_concat.py
|
Python
|
bsd-3-clause
| 1,326
| 0.004525
|
import functools
import os
import re
import shutil
import subprocess
import sys
import tempfile
from io import StringIO
from subprocess import TimeoutExpired
from catkin_tools.commands.catkin import main as catkin_main
TESTS_DIR = os.path.dirname(__file__)
MOCK_DIR = os.path.join(TESTS_DIR, 'mock_resources')
def catkin_success(args, env={}):
orig_environ = dict(os.environ)
try:
os.environ.update(env)
catkin_main(args)
except SystemExit as exc:
ret = exc.code
if ret != 0:
import traceback
traceback.print_exc()
finally:
os.environ = orig_environ
return ret == 0
def catkin_failure(args, env={}):
orig_environ = dict(os.environ)
try:
os.environ.update(env)
catkin_main(args)
except SystemExit as exc:
ret = exc.code
finally:
os.environ = orig_environ
return ret != 0
class AssertRaisesContext(object):
def __init__(self, expected, expected_regex=None):
self.expected = expected
self.expected_regex = expected_regex
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if self.expected is None:
if exc_type is None:
return True
else:
raise
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise AssertionError("{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
raise
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
expected_regex = re.compile(expected_regex)
if not expected_regex.search(str(exc_value)):
raise AssertionError("'{0}' does not match '{1}'".format(expected_regex.pattern, str(exc_value)))
return True
class redirected_stdio(object):
def __enter__(self):
self.original_stdout = sys.stdout
self.original_stderr = sys.stderr
self.out = StringIO()
self.err = StringIO()
sys.stdout = self.out
sys.stderr = self.err
return self.out, self.err
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.original_stdout
sys.stderr = self.original_stderr
print(self.out.getvalue())
class temporary_directory(object):
def __init__(self, prefix=''):
self.prefix = prefix
self.delete = False
def __enter__(self):
self.original_cwd = os.getcwd()
self.temp_path = tempfile.mkdtemp(prefix=self.prefix)
os.chdir(self.temp_path)
return self.temp_path
def __exit__(self, exc_type, exc_value, traceback):
if self.delete and self.temp_path and os.path.exists(self.temp_path):
print('Deleting temporary testind directory: %s' % self.temp_path)
shutil.rmtree(self.temp_path)
if self.original_cwd and os.path.exists(self.original_cwd):
os.chdir(self.original_cwd)
def in_temporary_directory(f):
@functools.wraps(f)
def decorated(*args, **kwds):
with temporary_directory() as directory:
from inspect import getargspec
# If it takes directory of kwargs and kwds does already have
# directory, inject it
if 'directory' not in kwds and 'directory' in getargspec(f)[0]:
kwds['directory'] = directory
return f(*args, **kwds)
decorated.__name__ = f.__name__
return decorated
def run(args, **kwargs):
"""
Call to Popen, returns (errcode, stdout, stderr)
"""
print("run:", args)
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
cwd=kwargs.get('cwd', os.getcwd()))
print("P==", p.__dict__)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout, stderr)
def assert_cmd_success(cmd, **kwargs):
"""
Asserts that running a command returns zero.
returns: stdout
"""
print(">>>", cmd, kwargs)
(r, out, err) = run(cmd, **kwargs)
print("<<<", str(out))
assert r == 0, "cmd failed with result %s:\n %s " % (r, str(cmd))
return out
def assert_cmd_failure(cmd, **kwargs):
"""
Asserts that running a command returns non-zero.
returns: stdout
"""
print(">>>", cmd, kwargs)
(r, out, err) = run(cmd, withexitstatus=True, **kwargs)
print("<<<", str(out))
assert 0 != r, "cmd succeeded, but it should fail: %s result=%u\noutput=\n%s" % (cmd, r, out)
return out
def assert_files_exist(prefix, files):
"""
Assert that all files exist in the prefix.
"""
for f in files:
p = os.path.join(prefix, f)
print("Checking for", p)
assert os.path.exists(p), "%s doesn't exist" % p
|
catkin/catkin_tools
|
tests/utils.py
|
Python
|
apache-2.0
| 4,974
| 0.000402
|
#!/usr/bin/env python
#
# Copyright (c) 2007-2009 Corey Goldberg (corey@goldb.org)
# License: GNU GPLv3
#
#
# This file is part of Pylot.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See the GNU General Public License
# for more details.
#
#
# Original code by David Solomon (dave.c.solomon@gmail.com)
#
#
# Only works on Windows.
# Browser capture tool. Builds Pylot test cases from an IE browsing session.
# You must have the Win32 Extensions for Python installed
# http://sourceforge.net/projects/pywin32/
import sys
import threading
import pythoncom
from win32com.client import Dispatch, WithEvents
stop_event = threading.Event()
finished = False
class EventSink(object):
def OnBeforeNavigate2(self, *args):
print ' <case>'
url = args[1]
post_data = args[4]
headers = args[5]
print ' <url>%s</url>' % url
if post_data:
print ' <method>POST</method>'
print ' <body><![CDATA[%s]]></body>' % post_data
if headers:
print ' <add_header>%s</add_header>' % headers
print " </case>"
stop_event.set()
def OnQuit(self):
global finished
finished = True
ie.Visible = 0
stop_event.set()
ie = Dispatch('InternetExplorer.Application', EventSink)
ev = WithEvents(ie, EventSink)
ie.Visible = 1
print '<testcases>'
while not finished:
pythoncom.PumpWaitingMessages()
stop_event.wait(.05)
stop_event.clear()
print '</testcases>'
|
Xavierwei/porsche_lemans
|
web/api/performance/util/pylot_win_recorder.py
|
Python
|
mit
| 1,833
| 0.00491
|
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from addons.urls import ADDON_ID
from amo.urlresolvers import reverse
from . import views
urlpatterns = patterns('',
# AMO stuff.
url('^$', views.index, name='zadmin.index'),
url('^models$', lambda r: redirect('admin:index'), name='zadmin.home'),
url('^addon/manage/%s/$' % ADDON_ID,
views.addon_manage, name='zadmin.addon_manage'),
url('^addon/recalc-hash/(?P<file_id>\d+)/', views.recalc_hash,
name='zadmin.recalc_hash'),
url('^env$', views.env, name='amo.env'),
url('^hera', views.hera, name='zadmin.hera'),
url('^memcache$', views.memcache, name='zadmin.memcache'),
url('^settings', views.show_settings, name='zadmin.settings'),
url('^fix-disabled', views.fix_disabled_file, name='zadmin.fix-disabled'),
url(r'^email_preview/(?P<topic>.*)\.csv$',
views.email_preview_csv, name='zadmin.email_preview_csv'),
url('^mail$', views.mail, name='zadmin.mail'),
url('^email-devs$', views.email_devs, name='zadmin.email_devs'),
url('^generate-error$', views.generate_error,
name='zadmin.generate-error'),
url('^export_email_addresses$', views.export_email_addresses,
name='zadmin.export_email_addresses'),
url('^email_addresses_file$', views.email_addresses_file,
name='zadmin.email_addresses_file'),
url('^price-tiers$', views.price_tiers, name='zadmin.price_tiers'),
# The Django admin.
url('^models/', include(admin.site.urls)),
url('^models/(?P<app_id>.+)/(?P<model_id>.+)/search.json$',
views.general_search, name='zadmin.search'),
)
# Hijack the admin's login to use our pages.
def login(request):
# If someone is already auth'd then they're getting directed to login()
# because they don't have sufficient permissions.
if request.user.is_authenticated():
raise PermissionDenied
else:
return redirect('%s?to=%s' % (reverse('users.login'), request.path))
admin.site.login = login
|
jinankjain/zamboni
|
apps/zadmin/urls.py
|
Python
|
bsd-3-clause
| 2,133
| 0.001406
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
import colander
from pyramid.view import view_config
from dace.objectofcollaboration.principal.util import get_current
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.form import FormView
from pontus.schema import Schema, select
from pontus.widget import RadioChoiceWidget
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from lac.views.widget import EmailInputWidget
from lac.content.processes.lac_view_manager.behaviors import (
Improve)
from lac.content.lac_application import CreationCulturelleApplication
from lac import _
class ImproveStudyReport(BasicView):
title = 'Alert improve'
name = 'alertimprove'
template = 'lac:views/lac_view_manager/questionnaire/templates/improve_info.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class Improve1Schema(Schema):
id = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title="ID",
missing="improve"
)
url = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title="URL",
missing="None"
)
improvement = colander.SchemaNode(
colander.String(),
widget=deform.widget.TextAreaWidget(rows=3, cols=30),
title=_('Vos suggestions')
)
email = colander.SchemaNode(
colander.String(),
widget=EmailInputWidget(),
validator=colander.All(
colander.Email(),
colander.Length(max=100)
),
title=_('Email')
)
class ImproveFormView(FormView):
title = _('Votre avis')
schema = select(Improve1Schema(),
['id', 'url', 'improvement', 'email'])
behaviors = [Improve]
formid = 'formimprove'
name = 'improveform'
def before_update(self):
user = get_current()
if getattr(user, 'email', ''):
self.schema.get('email').widget = deform.widget.HiddenWidget()
def default_data(self):
user = get_current()
return {'email': getattr(user, 'email', '')}
@view_config(
name='improve',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class ImproveView(MultipleView):
title = _('Votre avis')
name = 'improve'
viewid = 'improve'
template = 'daceui:templates/simple_mergedmultipleview.pt'
views = (ImproveStudyReport, ImproveFormView)
validators = [Improve.get_validator()]
requirements = {'css_links': [],
'js_links': ['lac:static/js/questionnaire.js']}
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{Improve: ImproveView})
|
ecreall/lagendacommun
|
lac/views/lac_view_manager/questionnaire/improve.py
|
Python
|
agpl-3.0
| 3,097
| 0.000969
|
# -*- coding: utf-8 -*-
from ...query.expression import QueryExpression
from .relation import Relation
class HasManyThrough(Relation):
_first_key = None
_second_key = None
_far_parent = None
def __init__(self, query, far_parent, parent, first_key, second_key):
"""
:param query: A Builder instance
:type query: Builder
:param far_parent: The far parent model
:type far_parent: Model
:param parent: The parent model
:type parent: Model
:type first_key: str
:type second_key: str
"""
self._first_key = first_key
self._second_key = second_key
self._far_parent = far_parent
super(HasManyThrough, self).__init__(query, parent)
def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
parent_table = self._parent.get_table()
self._set_join()
if self._constraints:
self._query.where('%s.%s' % (parent_table, self._first_key), '=', self._far_parent.get_key())
def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
parent_table = self._parent.get_table()
self._set_join(query)
query.select(QueryExpression('COUNT(*)'))
key = self.wrap('%s.%s' % (parent_table, self._first_key))
return query.where(self.get_has_compare_key(), '=', QueryExpression(key))
def _set_join(self, query=None):
"""
Set the join clause for the query.
"""
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key)
def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
table = self._parent.get_table()
self._query.where_in('%s.%s' % (table, self._first_key), self.get_keys(models))
def init_relation(self, models, relation):
"""
Initialize the relation on a set of models.
:type models: list
:type relation: str
"""
for model in models:
model.set_relation(relation, self._related.new_collection())
return models
def match(self, models, results, relation):
"""
Match the eagerly loaded results to their parents.
:type models: list
:type results: Collection
:type relation: str
"""
dictionary = self._build_dictionary(results)
for model in models:
key = model.get_key()
relationship = self.new_instance(model)
if key in dictionary:
value = self._related.new_collection(dictionary[key])
else:
value = self._related.new_collection()
relationship.set_results(value)
model.set_relation(relation, relationship)
return models
def _build_dictionary(self, results):
"""
Build model dictionary keyed by the relation's foreign key.
:param results: The results
:type results: Collection
:rtype: dict
"""
foreign = self._first_key
dictionary = {}
for result in results:
key = getattr(result, foreign)
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(result)
return dictionary
def get_results(self):
"""
Get the results of the relationship.
"""
return self.get()
def get(self, columns=None):
"""
Execute the query as a "select" statement.
:type columns: list
:rtype: orator.Collection
"""
if columns is None:
columns = ['*']
select = self._get_select_columns(columns)
models = self._query.add_select(*select).get_models()
if len(models) > 0:
models = self._query.eager_load_relations(models)
return self._related.new_collection(models)
def _get_select_columns(self, columns=None):
"""
Set the select clause for the relation query.
:param columns: The columns
:type columns: list
:rtype: list
"""
if columns == ['*'] or columns is None:
columns = ['%s.*' % self._related.get_table()]
return columns + ['%s.%s' % (self._parent.get_table(), self._first_key)]
def get_has_compare_key(self):
return self._far_parent.get_qualified_key_name()
def new_instance(self, model):
return HasManyThrough(
self._related.new_query(),
model,
self._parent,
self._first_key,
self._second_key
)
|
MakarenaLabs/Orator-Google-App-Engine
|
orator/orm/relations/has_many_through.py
|
Python
|
mit
| 5,076
| 0.000985
|
"""
WSGI config for first_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "first_app.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
IlyaSergeev/taxi_service
|
first_app/wsgi.py
|
Python
|
mit
| 393
| 0.002545
|
# Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Darwin Session collectors.
"""
__author__ = "Adam Sindelar <adamsh@google.com>"
from rekall.entities import definitions
from rekall.plugins.collectors.darwin import common
from rekall.plugins.collectors.darwin import zones
class DarwinTerminalUserInferor3000(common.DarwinEntityCollector):
"""Infers the relationship between usernames and UIDs using tty sessions."""
outputs = ["User"]
collect_args = dict(
terminals=("Terminal/file matches (has component Permissions) and "
"Terminal/session"))
complete_input = True
def collect(self, hint, terminals):
for terminal in terminals:
owner = terminal["Terminal/file"]["Permissions/owner"]
user = terminal["Terminal/session"]["Session/user"]
# Now tell the manager that these two users are the same user.
if owner and user:
yield user.identity | owner.identity
class DarwinTTYZoneCollector(zones.DarwinZoneElementCollector):
outputs = ["Struct/type=tty"]
zone_name = "ttys"
type_name = "tty"
def validate_element(self, tty):
return tty.t_lock == tty
class DarwinClistParser(common.DarwinEntityCollector):
outputs = ["Buffer/purpose=terminal_input",
"Buffer/purpose=terminal_output"]
collect_args = dict(clists="Struct/type is 'clist'")
def collect(self, hint, clists):
for entity in clists:
clist = entity["Struct/base"]
yield [entity.identity,
definitions.Buffer(kind="ring",
state="freed",
contents=clist.recovered_contents,
start=clist.c_cs,
end=clist.c_ce,
size=clist.c_cn)]
class DarwinTTYParser(common.DarwinEntityCollector):
outputs = ["Terminal", "Struct/type=vnode", "Struct/type=clist",
"Buffer/purpose=terminal_input",
"Buffer/purpose=terminal_output"]
collect_args = dict(ttys="Struct/type is 'tty'")
def collect(self, hint, ttys):
for entity in ttys:
file_identity = None
session_identity = None
tty = entity["Struct/base"]
session = tty.t_session.deref()
vnode = session.s_ttyvp
if session:
session_identity = self.manager.identify({
"Struct/base": session})
if vnode:
# Look, it has a vnode!
yield definitions.Struct(base=vnode,
type="vnode")
file_identity = self.manager.identify({
"Struct/base": vnode})
# Yield just the stubs of the input and output ring buffers.
# DarwinClistParser will grab these if it cares.
yield [definitions.Struct(base=tty.t_rawq,
type="clist"),
definitions.Buffer(purpose="terminal_input",
context=entity.identity)]
yield [definitions.Struct(base=tty.t_outq,
type="clist"),
definitions.Buffer(purpose="terminal_output",
context=entity.identity)]
# Last, but not least, the Terminal itself.
yield [entity.identity,
definitions.Terminal(
session=session_identity,
file=file_identity)]
class DarwinSessionParser(common.DarwinEntityCollector):
"""Collects session entities from the memory objects."""
_name = "sessions"
outputs = ["Session",
"User",
"Struct/type=tty",
"Struct/type=proc"]
collect_args = dict(sessions="Struct/type is 'session'")
def collect(self, hint, sessions):
for entity in sessions:
session = entity["Struct/base"]
# Have to sanitize the usernames to prevent issues when comparing
# them later.
username = str(session.s_login).replace("\x00", "")
if username:
user_identity = self.manager.identify({
"User/username": username})
yield [user_identity,
definitions.User(
username=username)]
else:
user_identity = None
sid = session.s_sid
# Turns out, SID is not always unique. This is disabled as it is
# not being currently used, and I need to investigate the causes
# of duplicate sessions occurring on 10.10.
# session_identity = self.manager.identify({
# "Session/sid": sid}) | entity.identity
session_identity = entity.identity
if session.s_ttyp:
yield definitions.Struct(
base=session.s_ttyp,
type="tty")
if session.s_leader and session.s_leader.validate():
yield definitions.Struct(
base=session.s_leader.deref(),
type="proc")
yield [session_identity,
definitions.Session(
user=user_identity,
sid=sid),
definitions.Named(
name="SID %d" % int(sid),
kind="Session")]
class DarwinSessionZoneCollector(zones.DarwinZoneElementCollector):
"""Collects sessions from the sessions allocation zone."""
outputs = ["Struct/type=session"]
zone_name = "session"
type_name = "session"
def validate_element(self, session):
return session.s_count > 0 and session.s_leader.p_argc > 0
class DarwinSessionCollector(common.DarwinEntityCollector):
"""Collects sessions."""
outputs = ["Struct/type=session"]
def collect(self, hint):
session_hash_table_size = self.profile.get_constant_object(
"_sesshash", "unsigned long")
# The hashtable is an array to session list heads.
session_hash_table = self.profile.get_constant_object(
"_sesshashtbl",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="sesshashhead",
count=session_hash_table_size.v())))
for sesshashhead in session_hash_table:
for session in sesshashhead.lh_first.walk_list("s_hash.le_next"):
yield definitions.Struct(
base=session,
type="session")
|
chen0031/rekall
|
rekall-core/rekall/plugins/collectors/darwin/sessions.py
|
Python
|
gpl-2.0
| 7,561
| 0.000132
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from designate.central import rpcapi as central_rpcapi
from designate.openstack.common import log as logging
from designate.api.v2.controllers import rest
from designate.api.v2.views import limits as limits_view
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
class LimitsController(rest.RestController):
_view = limits_view.LimitsView()
@pecan.expose(template='json:', content_type='application/json')
def get_all(self):
request = pecan.request
context = pecan.request.environ['context']
absolute_limits = central_api.get_absolute_limits(context)
return self._view.show(context, request, absolute_limits)
|
NeCTAR-RC/designate
|
designate/api/v2/controllers/limits.py
|
Python
|
apache-2.0
| 1,347
| 0
|
import json
from collections import OrderedDict
from django import forms
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from pretix.base.payment import BasePaymentProvider
class BankTransfer(BasePaymentProvider):
identifier = 'banktransfer'
verbose_name = _('Bank transfer')
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('bank_details',
forms.CharField(
widget=forms.Textarea,
label=_('Bank account details'),
))
]
)
def payment_form_render(self, request) -> str:
template = get_template('pretixplugins/banktransfer/checkout_payment_form.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def checkout_prepare(self, request, total):
return True
def payment_is_valid_session(self, request):
return True
def checkout_confirm_render(self, request):
form = self.payment_form(request)
template = get_template('pretixplugins/banktransfer/checkout_payment_confirm.html')
ctx = {'request': request, 'form': form, 'settings': self.settings}
return template.render(ctx)
def order_pending_mail_render(self, order) -> str:
template = get_template('pretixplugins/banktransfer/email/order_pending.txt')
ctx = {'event': self.event, 'order': order, 'settings': self.settings}
return template.render(ctx)
def order_pending_render(self, request, order) -> str:
template = get_template('pretixplugins/banktransfer/pending.html')
ctx = {'request': request, 'order': order, 'settings': self.settings}
return template.render(ctx)
def order_control_render(self, request, order) -> str:
if order.payment_info:
payment_info = json.loads(order.payment_info)
else:
payment_info = None
template = get_template('pretixplugins/banktransfer/control.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings,
'payment_info': payment_info, 'order': order}
return template.render(ctx)
|
akuks/pretix
|
src/pretix/plugins/banktransfer/payment.py
|
Python
|
apache-2.0
| 2,341
| 0.002136
|
# This file is part of ICLS.
#
# ICLS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ICLS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ICLS. If not, see <http://www.gnu.org/licenses/>.
import xml.dom.minidom
from time import strftime, strptime
from sys import exit
from textwrap import wrap
from os import path
def colorize(the_color='blue',entry='',new_line=0):
color={'gray':30,'green':32,'red':31,'blue':34,'magenta':35,'cyan':36,'white':37,'highgreen':42,'highblue':44,'highred':41,'highgray':47}
if new_line==1:
new_line='\n'
else:
new_line=''
return_me='\033[1;'+str(color[the_color])+'m'+entry+'\033[1;m'+new_line
return return_me
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
# Only if error is one that halts things, stop script
def aws_print_error(error_obj):
error_code=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Code')[0].childNodes)
error_message=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Message')[0].childNodes)
error_message=colorize('red',"ERROR",1)+colorize('red',"AWS Error Code: ")+error_code+colorize('red',"\nError Message: ")+error_message
print error_message
exit()
return True
def print_error(error_text):
error_message=colorize('red',"ERROR",1)+colorize('red',"\nError Message: ")+error_text
print error_message
exit()
return True
#takes an entry, and makes it pretty!
def makeover(entry,ismonochrome=False):
if ismonochrome==False:
output=colorize('gray','========================================',1)
output+=colorize('cyan',entry['entry'],1)
output+=colorize('cyan',strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000")),1)
output+=colorize('gray','ID: '+entry.name,0)
else:
output="========================================\n"
output+=entry['entry']+"\n"
output+=strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000"))+"\n"
output+='ID: '+entry.name
return output
#If, during parsing, help was flagged print out help text and then exit TODO read it from a md file
def print_help():
filepath = path.join(path.dirname(path.abspath(__file__)), 'DOCUMENTATION.mkd')
f = open(filepath,'r')
print f.read()
f.close()
exit()
|
timbotron/ICLS
|
framework.py
|
Python
|
gpl-3.0
| 2,817
| 0.048988
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic.edit import FormView
from .forms import SignUpForm, ExamSignUpForm
from .models import Notification
from .utils import send_email, send_sms
class ExamSignUpView(FormView):
kind = 'exam'
form_class = ExamSignUpForm
template_name = 'home.html'
def form_valid(self, form):
notification, sms = form.draft_notification()
if sms:
send_sms(
sms,
'Dein Code fuer die Pruefungsplanbenachrichtigung \
lautet: %s' % (notification.sms_code)
)
send_email(
notification.email,
'Bitte bestaetige deine Pruefungsplanbenachrichtigung',
'Wenn du per Email benachrichtigt werden moechtest, \
klicke bitte auf den folgenden Link: %s' % (
self.request.build_absolute_uri('/confirm/%s/?mail_code=%s' % (
notification.password,
notification.email_token
))
)
)
self.success_url = '/confirm/%s' % notification.password
return super(ExamSignUpView, self).form_valid(form)
class SignUpView(FormView):
kind = 'pruefungsplan'
form_class = SignUpForm
template_name = 'home.html'
def form_valid(self, form):
notification, sms = form.draft_notification()
if sms:
send_sms(
sms,
'Dein Code fuer die Pruefungsplanbenachrichtigung \
lautet: %s' % (notification.sms_code)
)
send_email(
notification.email,
'Bitte bestaetige deine Pruefungsplanbenachrichtigung',
'Wenn du per Email benachrichtigt werden moechtest wenn der \
Pruefungsplan %s online ist, klicke bitte auf den folgenden \
Link: %s' % (
notification.pruefungsplan.name,
self.request.build_absolute_uri('/confirm/%s/?mail_code=%s' % (
notification.password,
notification.email_token
))
)
)
self.success_url = '/confirm/%s' % notification.password
return super(SignUpView, self).form_valid(form)
def confirm(request, password):
notification = get_object_or_404(Notification, password=password)
sms_error = False
sms_code = request.GET.get('sms_code')
if sms_code:
if sms_code == notification.sms_code:
notification.sms_verified = True
notification.save()
else:
sms_error = True
mail_error = False
mail_code = request.GET.get('mail_code')
if mail_code:
if mail_code == notification.email_token:
notification.email_verified = True
notification.save()
else:
mail_error = True
return render_to_response('confirm.html', {
'notification': notification,
'sms_error': sms_error,
'mail_error': mail_error,
}, context_instance=RequestContext(request))
|
lukasklein/pruefungsplan
|
pruefungsplan/notifier/views.py
|
Python
|
bsd-3-clause
| 3,148
| 0
|
import logging
import os
from collections import namedtuple
from zope.interface import Interface
from bowerstatic import (
Bower,
InjectorTween,
PublisherTween,
)
from pyramid.interfaces import IApplicationCreated
from pyramid.path import AssetResolver
from pyramid.exceptions import ConfigurationError
log = logging.getLogger('djed.static')
BowerComponentsInfo = namedtuple('BowerComponentsInfo', 'name path')
BowerComponentInfo = namedtuple('BowerComponentInfo', 'path components_name')
class IBower(Interface):
""" Bower interface
"""
class IBowerComponents(Interface):
""" Bower components interface
"""
class IBowerComponent(Interface):
""" Bower component interface for local components
"""
def bower_factory_from_settings(settings):
prefix = settings.get('djed.static.prefix', 'djed.static.')
bower = Bower()
bower.initialized = False
bower.publisher_signature = settings.get(
prefix + 'publisher_signature', 'bowerstatic')
bower.components_path = settings.get(
prefix + 'components_path', None)
bower.components_name = settings.get(
prefix + 'components_name', 'components')
return bower
def get_bower(request):
registry = getattr(request, 'registry', None)
if registry is None:
registry = request
return registry.getUtility(IBower)
def bowerstatic_tween_factory(handler, registry):
bower = get_bower(registry)
def bowerstatic_tween(request):
injector_handler = InjectorTween(bower, handler)
publisher_handler = PublisherTween(bower, injector_handler)
return publisher_handler(request)
return bowerstatic_tween
def add_bower_components(config, path, name=None):
"""
"""
registry = config.registry
resolver = AssetResolver()
directory = resolver.resolve(path).abspath()
if not os.path.isdir(directory):
raise ConfigurationError(
"Directory '{0}' does not exist".format(directory)
)
bower = get_bower(registry)
if name is None:
name = bower.components_name
discr = ('djed:static', name)
def register():
info = BowerComponentsInfo(name, directory)
registry.registerUtility(info, IBowerComponents, name=name)
config.action(discr, register)
def add_bower_component(config, path, components_name=None):
"""
"""
registry = config.registry
resolver = AssetResolver()
directory = resolver.resolve(path).abspath()
if not os.path.isfile(os.path.join(directory, 'bower.json')):
raise ConfigurationError(
"Directory '{0}' does not contain 'bower.json' file"
.format(directory)
)
bower = get_bower(registry)
if components_name is None:
components_name = bower.components_name
discr = ('djed:static', directory, components_name)
def register():
info = BowerComponentInfo(directory, components_name)
registry.registerUtility(info, IBowerComponent, name='-'.join(discr))
config.action(discr, register)
def include(request, path_or_resource, components_name=None):
"""
"""
registry = request.registry
bower = get_bower(registry)
if components_name is None:
components_name = bower.components_name
collection = bower._component_collections.get(components_name)
if collection is None:
raise ConfigurationError("Bower components '{0}' not found."
.format(components_name))
include = collection.includer(request.environ)
include(path_or_resource)
def init_static(event):
registry = event.app.registry
bower = get_bower(registry)
if not bower.initialized:
log.info("Initialize static resources...")
for name, info in registry.getUtilitiesFor(IBowerComponents):
bower.components(info.name, info.path)
log.info("Add static bower components '{0}': {1}"
.format(info.name, info.path))
for name, info in registry.getUtilitiesFor(IBowerComponent):
collection = bower._component_collections.get(info.components_name)
if collection is None:
raise ConfigurationError(
"Bower components '{0}' not found.".format(
info.components_name))
component = collection.load_component(
info.path, 'bower.json')
collection.add(component)
log.info("Add local bower component: {0}".format(info.path))
bower.initialized = True
def includeme(config):
bower = bower_factory_from_settings(config.registry.settings)
config.registry.registerUtility(bower, IBower)
config.add_tween('djed.static.bowerstatic_tween_factory')
config.add_subscriber(init_static, IApplicationCreated)
config.add_directive('add_bower_components', add_bower_components)
config.add_directive('add_bower_component', add_bower_component)
config.add_request_method(include, 'include')
config.add_request_method(get_bower, 'get_bower')
if bower.components_path is not None:
config.add_bower_components(bower.components_path)
|
djedproject/djed.static
|
djed/static/__init__.py
|
Python
|
isc
| 5,214
| 0
|
#-*- coding: utf-8 -*-
import unittest
import ModelADDAC as adda
import api_convertors.type_conv as tc
''' Просто заглушка '''
def printRpt( value, valueDisplacemented, valueScaled, valueCode, Kda ):
#print '\nvalueDisplacemented : '+str(valueDisplacemented)
pass
''' Класс тестов '''
class TestCaseModelADDAC(unittest.TestCase):
_valueDict = { 'value' : 0, 'zeroDisplacement' : 0, 'converter' : 0,
'scale' : 0, 'capacity' : 0, 'Vmax' : 0 }
def setUp
''' АЦП '''
def testADC( self ):
self._valueDict[ 'value' ] = 2.5
self._valueDict['displacement'] = 0
self._valueDict['converter' ] = 1
self._valueDict['scale'] = 1
self._valueDict['capacity'] = 8
self._valueDict['Vmax'] = 5.0 # V
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Проерка цифрового кода 8 бит!!, но разрядность может быть и больше0
self.assertEqual( tc.byte2strhex( code ), '0x80' )
# проверка коэфф. передачи
#self.assertEqual( Kda, 51.2 ) #? как сравнивать доубле! просто выражение посчитать
''' Проверка расчета по току, со смещением '''
def testCurrADCZeroX( self ):
# Constants and coeff.
R1 = 5.11
R2 = 10.0
Splitter = R2/(R1+R2)
Vmax = 5000.0 #mV
capacity = 10
Kiu = 188.0 # mV/A
I = 10 # A
dI = -1.0 # A положение нуля на Х
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dI
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentX )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x00E5' )
''' Проверка расчета по току, без смещения '''
def testCurrADCZeroXZ0( self ):
# Constants and coeff.
R1 = 5.11
R2 = 10.0
Splitter = R2/(R1+R2)
Vmax = 5000.0 #mV
capacity = 10
Kiu = 188.0 # mV/A
dI = 0.0 # A
I = 10 # A
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dI
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentX )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x00FE' )
''' Проверка расчета по току, со смещением по напряжению'''
def testCurrADCZeroY( self ):
# Constants and coeff.
R1 = 5.11
R2 = 10.0
Splitter = R2/(R1+R2)
Vmax = 5000.0 #mV
capacity = 10
Kiu = 188.0 # mV/A
dU = 500.0 # mV
I = 10 # A
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dU
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x0142' )
''' Проверка расчета по току, со смещением '''
def testCurrADCZeroYZ0( self ):
# Constants and coeff.
R1 = 5.11 # Om
R2 = 10.0 # Om
Splitter = R2/(R1+R2)
Vmax = 5000.0 # mV
capacity = 10 # bits
Kiu = 188.0 # mV/A
dU = 0.0 # mV
I = 10 # A
# проверяем
self._valueDict[ 'value' ] = I
self._valueDict['displacement'] = dU
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Проверка кода числ
self.assertEqual( tc.byte4strhex( code ), '0x00FE' )
# Проверка множителя
''' Проверка ЦАП '''
def testDAC( self ):
# Constants and coeff.
R1 = 5.11 # Om
R2 = 10.0 # Om
Splitter = R2/(R1+R2)
Vmax = 5000.0 # mV
capacity = 10 # bits
Kiu = 188.0 # mV/A
Udig = 322 # V ue
# проверяем
self._valueDict[ 'value' ] = 0
self._valueDict['displacement'] = 500
self._valueDict['converter' ] = Kiu
self._valueDict['scale'] = Splitter
self._valueDict['capacity'] = capacity
self._valueDict['Vmax'] = Vmax
# сперва получаем поправочный код
code, Kda = adda.modelADC( self._valueDict, printRpt, adda.calcZeroDisplacmentY )
# Запускаем
self._valueDict[ 'value' ] = Udig-code
self._valueDict['displacement'] = None
analog = adda.modelDAC( self._valueDict, printRpt )
# проверка значения! float трудно сравнить, пока округляем
self.assertEqual( int( analog ), 10 )
# Run tests
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase( TestCaseModelADDAC )
unittest.TextTestRunner(verbosity=2).run(suite)
|
zaqwes8811/matlab_ext
|
measurement/mc-assistant/projects/py_hw_models/trash/testModelADDAC.py
|
Python
|
apache-2.0
| 5,245
| 0.061971
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version_info = (4, 12)
version = '.'.join(map(str, version_info))
|
shinpeimuraoka/ryu
|
ryu/__init__.py
|
Python
|
apache-2.0
| 680
| 0
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
from urbansim.length_constants import UrbanSimLength, UrbanSimLengthConstants
from numpy import array
class is_near_arterial(Variable):
"""Boolean indicating if this gridcell is near an arterial, as specified by the arterial
threshold (a constant). Distance is assumed to be measured from the "border" of the gridcell."""
distance_to_arterial = "distance_to_arterial"
def dependencies(self):
return [my_attribute_label(self.distance_to_arterial)]
def compute(self, dataset_pool):
return get_is_near_arterial(self.get_dataset().get_attribute(self.distance_to_arterial),
dataset_pool.get_dataset('urbansim_constant'))
def post_check(self, values, dataset_pool):
self.do_check("x == False or x == True", values)
def get_is_near_arterial(distance, urbansim_constant):
length = UrbanSimLength(distance, urbansim_constant["gridcell_width"].units)
return length.less_than(urbansim_constant["near_arterial_threshold_unit"])
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs( self ):
# Assumes distance is measured from the gridcell border to the arterial.
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
'gridcell':{
'grid_id': array([1,2,3,4,5,6]),
'distance_to_arterial': array([0.0, 50.0, 99.0, 100.0, 101.0, 200.0]),
},
'urbansim_constant':{
'cell_size': array([150]),
'near_arterial_threshold': array([100]),
'units': array(['meters']),
}
}
)
should_be = array( [True, True, True, False, False, False] )
tester.test_is_equal_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main()
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/urbansim/gridcell/is_near_arterial.py
|
Python
|
gpl-2.0
| 2,343
| 0.012804
|
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from selection.algorithms.lasso import instance
from selection.algorithms.forward_step import forward_stepwise, info_crit_stop, sequential, data_carving_IC
def test_FS(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n))
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3)
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
print FS.model_quadratic(3)
def test_FS_unknown(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y)
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
def test_subset(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
subset = np.ones(n, np.bool)
subset[-10:] = 0
FS = forward_stepwise(X, Y, subset=subset,
covariance=0.5**2 * np.identity(n))
for i in range(k):
FS.next()
print 'first %s variables selected' % k, FS.variables
print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k
print FS.model_pivots(3, saturated=True)
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
FS = forward_stepwise(X, Y, subset=subset)
for i in range(k):
FS.next()
print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000)
def test_BIC(k=10, do_sample=True):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = info_crit_stop(Y, X, 0.5, cost=np.log(n))
final_model = len(FS.variables) - 1
if do_sample:
return [p[-1] for p in FS.model_pivots(final_model, saturated=False, burnin=5000, ndraw=5000)]
else:
saturated_pivots = FS.model_pivots(final_model)
return [p[-1] for p in saturated_pivots]
def test_sequential(k=10):
n, p = 100, 200
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
print sequential(X, Y, sigma=0.5, saturated=True)[1]
print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000)[1]
print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000)[1]
# now use a subset of cases
subset = np.ones(n, np.bool)
subset[-10:] = 0
print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000,
subset=subset)[1]
print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000, subset=subset)[1]
def simulate_null(saturated=True):
n, p = 100, 40
X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y = np.random.standard_normal(100) * 0.5
FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n))
for i in range(5):
FS.next()
return [p[-1] for p in FS.model_pivots(3, saturated=saturated,
use_new=False)]
def test_ecdf(nsim=1000, BIC=False,
saturated=True):
P = []
for _ in range(nsim):
if not BIC:
P.extend(simulate_null(saturated=saturated))
else:
P.extend(test_BIC(do_sample=True))
P = np.array(P)
ecdf = sm.distributions.ECDF(P)
plt.clf()
plt.plot(ecdf.x, ecdf.y, linewidth=4, color='black')
plt.show()
def test_data_carving_IC(n=100,
p=200,
s=7,
sigma=5,
rho=0.3,
snr=7.,
split_frac=0.9,
ndraw=5000,
burnin=1000,
df=np.inf,
coverage=0.90,
compute_intervals=False):
counter = 0
while True:
counter += 1
X, y, beta, active, sigma = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
snr=snr,
df=df)
mu = np.dot(X, beta)
splitn = int(n*split_frac)
indices = np.arange(n)
np.random.shuffle(indices)
stage_one = indices[:splitn]
FS = info_crit_stop(y, X, sigma, cost=np.log(n), subset=stage_one)
if set(range(s)).issubset(FS.active):
results, FS = data_carving_IC(y, X, sigma,
stage_one=stage_one,
splitting=True,
ndraw=ndraw,
burnin=burnin,
coverage=coverage,
compute_intervals=compute_intervals,
cost=np.log(n))
carve = [r[1] for r in results]
split = [r[3] for r in results]
Xa = X[:,FS.variables[:-1]]
truth = np.dot(np.linalg.pinv(Xa), mu)
split_coverage = []
carve_coverage = []
for result, t in zip(results, truth):
_, _, ci, _, si = result
carve_coverage.append((ci[0] < t) * (t < ci[1]))
split_coverage.append((si[0] < t) * (t < si[1]))
return ([carve[j] for j, i in enumerate(FS.active) if i >= s],
[split[j] for j, i in enumerate(FS.active) if i >= s],
[carve[j] for j, i in enumerate(FS.active) if i < s],
[split[j] for j, i in enumerate(FS.active) if i < s],
counter, carve_coverage, split_coverage)
def test_full_pvals(n=100, p=40, rho=0.3, snr=4):
X, y, beta, active, sigma = instance(n=n, p=p, snr=snr, rho=rho)
FS = forward_stepwise(X, y, covariance=sigma**2 * np.identity(n))
from scipy.stats import norm as ndist
pval = []
completed_yet = False
for i in range(min(n, p)):
FS.next()
var_select, pval_select = FS.model_pivots(i+1, alternative='twosided',
which_var=[FS.variables[-1]],
saturated=False,
burnin=2000,
ndraw=8000)[0]
pval_saturated = FS.model_pivots(i+1, alternative='twosided',
which_var=[FS.variables[-1]],
saturated=True)[0][1]
# now, nominal ones
LSfunc = np.linalg.pinv(FS.X[:,FS.variables])
Z = np.dot(LSfunc[-1], FS.Y) / (np.linalg.norm(LSfunc[-1]) * sigma)
pval_nominal = 2 * ndist.sf(np.fabs(Z))
pval.append((var_select, pval_select, pval_saturated, pval_nominal))
if set(active).issubset(np.array(pval)[:,0]) and not completed_yet:
completed_yet = True
completion_index = i + 1
return X, y, beta, active, sigma, np.array(pval), completion_index
|
stefanv/selective-inference
|
selection/algorithms/tests/test_forward_step.py
|
Python
|
bsd-3-clause
| 8,482
| 0.00896
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase, ET
class UserGaming(ElementBase):
name = 'gaming'
namespace = 'urn:xmpp:gaming:0'
plugin_attrib = 'gaming'
interfaces = set(['character_name', 'character_profile', 'name',
'level', 'server_address', 'server_name', 'uri'])
sub_interfaces = interfaces
|
danielvdao/facebookMacBot
|
venv/lib/python2.7/site-packages/sleekxmpp/plugins/xep_0196/stanza.py
|
Python
|
mit
| 536
| 0.003731
|
'''
A segment which is a literal string
A FakeLargeFile composed entirely of LiteralSegments is not fake, but may
still be more useful than a plain old file.
'''
COPYING = """\
Copyright 2014 Lauritz Vesteraas Thaulow
This file is part of the FakeLargeFile python package.
FakeLargeFile is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License version 3,
as published by the Free Software Foundation.
FakeLargeFile is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU General Affero Public License
along with FakeLargeFile. If not, see <http://www.gnu.org/licenses/>.
"""
import pkg_resources
from fakelargefile.segment.abc import AbstractSegment, register_segment
from fakelargefile.tools import parse_unit, Slice
@register_segment
class LiteralSegment(AbstractSegment):
"""
A segment containing exactly a given string.
"""
def __init__(self, start, string):
"""
Initialize a LiteralSegment instance.
:param int start: The start pos of the segment.
:param str string: The string this segment should contain.
"""
start = parse_unit(start)
super(LiteralSegment, self).__init__(start, start + len(string))
self.string = string
def subsegment(self, start, stop):
sl = Slice(start, stop, self.start, self.stop)
if sl.size:
return type(self)(sl.start, self.string[sl.local_slice])
else:
return None
@classmethod
def example(cls, start, stop):
basis = pkg_resources.resource_stream(
"fakelargefile", "GPLv3.txt").read()
start = parse_unit(start)
stop = parse_unit(stop)
size = stop - start
basis = basis * (size // len(basis) + 1)
return cls(start, basis[:size])
def copy(self, start=None):
if start is None:
start = self.start
return type(self)(start, self.string)
def index(self, string, start=None, stop=None, end_pos=False):
sl = Slice(start, stop, self.start, self.stop)
index = self.string.index(string, sl.local_start, sl.local_stop)
if end_pos:
index += len(string)
return self.start + index
def substring(self, start, stop):
sl = Slice(start, stop, self.start, self.stop, clamp=False)
return self.string[sl.local_slice]
def __str__(self):
return self.string
|
LauritzThaulow/fakelargefile
|
fakelargefile/segment/literal.py
|
Python
|
agpl-3.0
| 2,718
| 0.001104
|
"""
Project: flask-rest
Author: Saj Arora
Description: All of the rest methods...
"""
class SageMethod:
GET = 'get'
POST = 'post'
DELETE = 'delete'
PUT = 'put'
ALL = [GET, POST, DELETE, PUT]
|
aroraenterprise/projecteos
|
backend/api/v1/fundamentals/sage_methods.py
|
Python
|
mit
| 212
| 0.009434
|
#!/usr/bin/env python
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_content(pluginname, "Replica set status")
|
cflq3/getcms
|
plugins/mongodb_dbs.py
|
Python
|
mit
| 138
| 0.007246
|
from vertex.filters import IdListFilterSet
from ..models import Note
class NoteFilterSet(IdListFilterSet):
class Meta:
model = Note
|
zapcoop/vertex
|
vertex_api/service/filters/note.py
|
Python
|
agpl-3.0
| 147
| 0
|
import threading
import solr
from django.conf import settings
class SolrConnection(threading.local):
_connection = None
def __init__(self, core):
threading.local.__init__(core)
self.core = core
@property
def connection(self):
if self._connection:
return self._connection
else:
return solr.Solr(settings.SOLR_HOST + '/' + self.core)
valid_core_names = ['comment', 'group']
local = {}
def get_local(core):
assert core in valid_core_names
if not core in local:
local[core] = SolrConnection(core)
return local[core]
def escape(input):
escapes = '\\+-&|!(){}[]^~*?:"; '
return "".join(
(char if char not in escapes else '\\' + char)
for char
in input
)
def query(core, *args, **kwargs):
return get_local(core).connection.select(*args, **kwargs)
def add(core, *args, **kwargs):
return get_local(core).connection.add(*args, **kwargs)
def update(core, doc, *args, **kwargs):
get_local(core).connection.delete(doc['id'])
return add(core, doc)
|
canvasnetworks/canvas
|
website/canvas/search.py
|
Python
|
bsd-3-clause
| 1,105
| 0.01086
|
#!/usr/bin/env python3
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/testcases/OpTestPCI.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015,2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
'''
OpTestPCI: PCI checks
-------------------------------
Perform various PCI validations and checks
--run-suite BasicPCI (includes skiroot_suite and host_suite)
--run-suite pci-regression
Sample naming conventions below, see each test method for
the applicable options per method.
--run testcases.OpTestPCI.PCISkiroot.pcie_link_errors
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^ ^^^^^^^^^^^^^^^^
module name subclass test method
--run testcases.OpTestPCI.PCIHost.pcie_link_errors
^^^^^^^^^^^^^^^^^^^ ^^^^^^^ ^^^^^^^^^^^^^^^^
module name subclass test method
'''
import unittest
import logging
import pexpect
import time
import re
import difflib
from distutils.version import LooseVersion
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed, UnexpectedCase
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
skiroot_done = 0
host_done = 0
skiroot_lspci = None
host_lspci = None
reset_console = 0
class OpClassPCI(unittest.TestCase):
'''
Main Parent class
We cannot guarantee a soft boot entry, so need to force to PS or OS
'''
@classmethod
def setUpClass(cls, desired=None, power_cycle=0):
'''
Main setUpClass, this is shared across all subclasses.
This is called once when the subclass is instantiated.
'''
if desired is None:
cls.desired = OpSystemState.PETITBOOT_SHELL
else:
cls.desired = desired
cls.power_cycle = power_cycle
cls.conf = OpTestConfiguration.conf
cls.cv_SYSTEM = cls.conf.system()
cls.cv_HOST = cls.conf.host()
cls.my_connect = None
if cls.power_cycle == 1:
cls.cv_SYSTEM.goto_state(OpSystemState.OFF)
cls.power_cycle = 0
try:
if cls.desired == OpSystemState.OS:
# set bootdev for reboot cases
cls.cv_SYSTEM.sys_set_bootdev_no_override()
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
cls.c = cls.cv_SYSTEM.host().get_ssh_connection()
else:
cls.cv_SYSTEM.sys_set_bootdev_setup()
cls.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
cls.c = cls.cv_SYSTEM.console
cls.pty = cls.cv_SYSTEM.console.get_console()
except Exception as e:
log.debug("Unable to find cls.desired, probably a test code problem")
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
@classmethod
def tearDownClass(cls):
'''
Main tearDownClass, this is shared across all subclasses.
This is called once when the subclass is taken down.
'''
global skiroot_done
global host_done
global skiroot_lspci
global host_lspci
global reset_console
if reset_console == 1:
cls.refresh_console()
@classmethod
def set_console(cls):
'''
This method allows setting the shared class console to the real
console when needed, i.e. driver_bind tests which unbind the
ethernet drivers.
'''
cls.c = cls.cv_SYSTEM.console
@classmethod
def refresh_console(cls):
'''
This method is used to set the shared class console back to the proper
object (this gets set to the real console when we unbind the ethernet)
in the driver_bind test as an example.
'''
# this done after a reboot
global reset_console
if cls.cv_SYSTEM.get_state() == OpSystemState.PETITBOOT_SHELL:
cls.c = cls.cv_SYSTEM.console
else:
cls.c = cls.cv_SYSTEM.host().get_ssh_connection()
reset_console = 0
def setUp(self):
'''
All variables common to a subclass need to be defined here since
this method gets called before each subclass test
'''
pass
def tearDown(self):
'''
This is done at the end of each subclass test.
'''
global reset_console
if reset_console == 1:
self.refresh_console()
def get_lspci(self):
'''
Usually used internally, can be run for query of system
Case A --run testcases.OpTestPCI.PCISkiroot.get_lspci
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.get_lspci
Case C --run testcases.OpTestPCI.PCISkirootHardboot.get_lspci
Case D --run testcases.OpTestPCI.PCIHost.get_lspci
Case E --run testcases.OpTestPCI.PCIHostSoftboot.get_lspci
Case F --run testcases.OpTestPCI.PCIHostHardboot.get_lspci
'''
lspci_data = self.c.run_command("lspci -mm -n")
return lspci_data
def check_commands(self):
'''
Checks for general capability to run commands
Case A --run testcases.OpTestPCI.PCISkiroot.check_commands
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.check_commands
Case C --run testcases.OpTestPCI.PCISkirootHardboot.check_commands
Case D --run testcases.OpTestPCI.PCIHost.check_commands
Case E --run testcases.OpTestPCI.PCIHostSoftboot.check_commands
Case F --run testcases.OpTestPCI.PCIHostHardboot.check_commands
'''
list_pci_devices_commands = ["lspci -mm -n",
"lspci -m",
"lspci -t",
"lspci -n",
"lspci -nn",
"cat /proc/bus/pci/devices",
"ls --color=never /sys/bus/pci/devices/ -l",
"lspci -vvxxx",
]
for cmd in list_pci_devices_commands:
self.c.run_command(cmd, timeout=300)
list_usb_devices_commands = ["lsusb",
"lsusb -t",
"lsusb -v",
]
for cmd in list_usb_devices_commands:
self.c.run_command(cmd)
# Test that we do not EEH on reading all config space
self.c.run_command(
"hexdump -C /sys/bus/pci/devices/*/config", timeout=600)
def get_lspci_file(self):
'''
Usually used internally, can be run for query of system
Case A --run testcases.OpTestPCI.PCISkiroot.get_lspci_file
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.get_lspci_file
Case C --run testcases.OpTestPCI.PCISkirootHardboot.get_lspci_file
Case D --run testcases.OpTestPCI.PCIHost.get_lspci_file
Case E --run testcases.OpTestPCI.PCIHostSoftboot.get_lspci_file
Case F --run testcases.OpTestPCI.PCIHostHardboot.get_lspci_file
'''
if self.conf.lspci_file():
with open(self.conf.lspci_file(), 'r') as f:
file_content = f.read().splitlines()
log.debug("file_content={}".format(file_content))
return file_content
def _diff_my_devices(self,
listA=None,
listA_name=None,
listB=None,
listB_name=None):
'''
Performs unified diff of two lists
'''
unified_output = difflib.unified_diff(
[_f for _f in listA if _f],
[_f for _f in listB if _f],
fromfile=listA_name,
tofile=listB_name,
lineterm="")
unified_list = list(unified_output)
log.debug("unified_list={}".format(unified_list))
return unified_list
def compare_boot_devices(self):
'''
This is best leveraged in the suite pci-regression,
where both the skiroot/host softboot and the
skiroot/host hardboot get done in the same wave,
so that the global variables carry over to compare.
If both skiroot and host lspci completed, will
compare lspci results.
If you want to compare against an input file, use
compare_live_devices.
Case A --run testcases.OpTestPCI.PCISkiroot.compare_boot_devices
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.compare_boot_devices
Case C --run testcases.OpTestPCI.PCISkirootHardboot.compare_boot_devices
Case D --run testcases.OpTestPCI.PCIHost.compare_boot_devices
Case E --run testcases.OpTestPCI.PCIHostSoftboot.compare_boot_devices
Case F --run testcases.OpTestPCI.PCIHostHardboot.compare_boot_devices
'''
global skiroot_done
global host_done
global skiroot_lspci
global host_lspci
lspci_output = self.get_lspci()
if self.cv_SYSTEM.get_state() == OpSystemState.PETITBOOT_SHELL:
skiroot_lspci = lspci_output
skiroot_done = 1
else:
host_lspci = lspci_output
host_done = 1
if host_done and skiroot_done:
compare_results = self._diff_my_devices(listA=skiroot_lspci,
listA_name="skiroot_lspci",
listB=host_lspci,
listB_name="host_lspci")
if len(compare_results):
self.assertEqual(len(compare_results), 0,
"skiroot_lspci and host_lspci devices differ:\n{}"
.format(self.conf.lspci_file(), ('\n'.join(i for i in compare_results))))
# refresh so next pair can be matched up, i.e. soft or hard
skiroot_done = 0
host_done = 0
skiroot_lspci = None
host_lspci = None
def compare_live_devices(self):
'''
Compares the live system lspci against an input file, host-lspci
provided either in conf file or via command line.
"ssh user@host lspci -mm -n > host-lspci.txt"
--host-lspci host-lspci.txt on command line
or
host_lspci=host-lspci.txt in conf file
Case A --run testcases.OpTestPCI.PCISkiroot.compare_live_devices
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.compare_live_devices
Case C --run testcases.OpTestPCI.PCISkirootHardboot.compare_live_devices
Case D --run testcases.OpTestPCI.PCIHost.compare_live_devices
Case E --run testcases.OpTestPCI.PCIHostSoftboot.compare_live_devices
Case F --run testcases.OpTestPCI.PCIHostHardboot.compare_live_devices
'''
active_lspci = self.get_lspci()
file_lspci = self.get_lspci_file()
if file_lspci:
compare_results = self._diff_my_devices(listA=file_lspci,
listA_name=self.conf.lspci_file(),
listB=active_lspci,
listB_name="Live System")
log.debug("compare_results={}".format(compare_results))
if len(compare_results):
self.assertEqual(len(compare_results), 0,
"Stored ({}) and Active PCI devices differ:\n{}"
.format(self.conf.lspci_file(), ('\n'.join(i for i in compare_results))))
def pcie_link_errors(self):
'''
Checks for link errors
Case A --run testcases.OpTestPCI.PCISkiroot.pcie_link_errors
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.pcie_link_errors
Case C --run testcases.OpTestPCI.PCISkirootHardboot.pcie_link_errors
Case D --run testcases.OpTestPCI.PCIHost.pcie_link_errors
Case E --run testcases.OpTestPCI.PCIHostSoftboot.pcie_link_errors
Case F --run testcases.OpTestPCI.PCIHostHardboot.pcie_link_errors
'''
total_entries = link_down_entries = timeout_entries = []
try:
link_down_entries = self.c.run_command(
"grep ',[432]\].*PHB#.* Link down' /sys/firmware/opal/msglog")
except CommandFailed as cf:
pass
if link_down_entries:
log.debug("link_down_entries={}".format(link_down_entries))
total_entries = total_entries + link_down_entries
log.debug(
"total_entries with link_down_entries={}".format(total_entries))
try:
timeout_entries = self.c.run_command(
"grep ',[432]\].*Timeout waiting for' /sys/firmware/opal/msglog")
except CommandFailed as cf:
pass
if timeout_entries:
log.debug("timeout_entries={}".format(timeout_entries))
total_entries = total_entries + timeout_entries
log.debug(
"total_entries with timeout_entries={}".format(total_entries))
platform = self.c.run_command("cat /proc/device-tree/compatible")
filter_out = [
'PHB#00(00|30|33|34)\[(0|8):(0|4|3)\]: LINK: Timeout waiting for link up',
'Timeout waiting for downstream link',
]
log.debug("STARTING total_entries={}".format(total_entries))
if re.search(r'p9dsu', platform[0]):
# No presence detect on some p9dsu slots :/
for f in filter_out:
fre = re.compile(f)
total_entries = [l for l in total_entries if not fre.search(l)]
log.debug("P9DSU FILTERED OUT total_entries={}".format(total_entries))
msg = '\n'.join([_f for _f in total_entries if _f])
log.debug("total_entries={}".format(total_entries))
self.assertTrue(len(total_entries) == 0,
"pcie link down/timeout Errors in OPAL log:\n{}".format(msg))
def _get_list_of_pci_devices(self):
cmd = "ls --color=never /sys/bus/pci/devices/ | awk {'print $1'}"
res = self.c.run_command(cmd)
return res
def _get_driver(self, pe):
cmd = "lspci -ks {}".format(pe)
output = self.c.run_command(cmd, timeout=120)
if output:
for line in output:
if 'Kernel driver in use:' in line:
return (line.rsplit(":")[1]).strip(" ")
return None
def _get_list_of_slots(self):
cmd = "ls --color=never /sys/bus/pci/slots/ -1"
res = self.c.run_command(cmd)
return res
def _get_root_pe_address(self):
cmd = "df -h /boot | awk 'END {print $1}'"
res = self.c.run_command(cmd)
boot_disk = ''.join(res).split("/dev/")[1]
boot_disk = boot_disk.replace("\r\n", "")
awk_string = "awk '{print $(NF-2)}'"
pre_cmd = "ls --color=never -l /dev/disk/by-path/ | grep {} | ".format(
boot_disk)
cmd = pre_cmd + awk_string
res = self.c.run_command(cmd)
root_pe = res[0].split("-")[1]
return root_pe
def _gather_errors(self):
# Gather all errors from kernel and opal logs
try:
self.c.run_command("dmesg -r|grep '<[4321]>'")
except CommandFailed:
pass
try:
self.c.run_command("grep ',[0-4]\]' /sys/firmware/opal/msglog")
except CommandFailed:
pass
def driver_bind(self):
'''
Unbind and then bind the devices
Case A --run testcases.OpTestPCI.PCISkiroot.driver_bind
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.driver_bind
Case C --run testcases.OpTestPCI.PCISkirootHardboot.driver_bind
Case D --run testcases.OpTestPCI.PCIHost.driver_bind
Case E --run testcases.OpTestPCI.PCIHostSoftboot.driver_bind
Case F --run testcases.OpTestPCI.PCIHostHardboot.driver_bind
Special note on unbinding shared bmc ethernet ports, caution.
'''
# since we will be unbinding ethernet drivers, override the console
global reset_console
reset_console = 1
self.set_console()
if self.cv_SYSTEM.get_state() == OpSystemState.PETITBOOT_SHELL:
root_pe = "xxxx"
else:
root_pe = self._get_root_pe_address()
self.c.run_command("dmesg -D")
list = self._get_list_of_pci_devices()
failure_list = {}
for slot in list:
rc = 0
driver = self._get_driver(slot)
if root_pe in slot:
continue
if driver is None:
continue
index = "{}_{}".format(driver, slot)
cmd = "echo -n {} > /sys/bus/pci/drivers/{}/unbind".format(
slot, driver)
log.debug("unbind driver={} slot={} cmd={}".format(
driver, slot, cmd))
try:
self.c.run_command(cmd)
except CommandFailed as cf:
msg = "Driver unbind operation failed for driver {}, slot {}".format(
slot, driver)
failure_list[index] = msg
time.sleep(5)
cmd = 'ls --color=never /sys/bus/pci/drivers/{}'.format(driver)
self.c.run_command(cmd)
path = "/sys/bus/pci/drivers/{}/{}".format(driver, slot)
try:
self.c.run_command("test -d {}".format(path))
rc = 1
except CommandFailed as cf:
pass
cmd = "echo -n {} > /sys/bus/pci/drivers/{}/bind".format(
slot, driver)
log.debug("bind driver={} slot={} cmd={}".format(driver, slot, cmd))
try:
self.c.run_command(cmd)
except CommandFailed as cf:
msg = "Driver bind operation failed for driver {}, slot {}".format(
slot, driver)
failure_list[index] = msg
time.sleep(5)
cmd = 'ls --color=never /sys/bus/pci/drivers/{}'.format(driver)
self.c.run_command(cmd)
try:
self.c.run_command("test -d {}".format(path))
except CommandFailed as cf:
rc = 2
self._gather_errors()
if rc == 1:
msg = "{} not unbound for driver {}".format(slot, driver)
failure_list[index] = msg
if rc == 2:
msg = "{} not bound back for driver {}".format(slot, driver)
failure_list[index] = msg
self.assertEqual(failure_list, {},
"Driver bind/unbind failures {}".format(failure_list))
def hot_plug_host(self):
'''
NEEDS TESTING
Case A --run testcases.OpTestPCI.PCIHost.hot_plug_host
Case B --run testcases.OpTestPCI.PCIHostSoftboot.hot_plug_host
Case C --run testcases.OpTestPCI.PCIHostHardboot.hot_plug_host
'''
# Currently this feature enabled only for fsp systems
if "FSP" not in self.conf.args.bmc_type:
log.debug(
"Skipping test, currently only OPAL FSP Platform supported for hot_plug_host")
self.skipTest(
"Skipping test, currently only OPAL FSP Platform supported for hot_plug_host")
res = self.c.run_command("uname -r")[-1].split("-")[0]
if LooseVersion(res) < LooseVersion("4.10.0"):
log.debug(
"Skipping test, Kernel does not support hotplug {}".format(res))
self.skipTest(
"Skipping test, Kernel does not support hotplug={}".format(res))
self.cv_HOST.host_load_module("pnv_php")
device_list = self._get_list_of_pci_devices()
root_pe = self._get_root_pe_address()
slot_list = self._get_list_of_slots()
self.c.run_command("dmesg -D")
pair = {} # Pair of device vs slot location code
for device in device_list:
cmd = "lspci -k -s {} -vmm".format(device)
res = self.c.run_command(cmd)
for line in res:
# if "PhySlot:\t" in line:
obj = re.match('PhySlot:\t(.*)', line)
if obj:
pair[device] = obj.group(1)
failure_list = {}
for device, phy_slot in list(pair.items()):
if root_pe in device:
continue
index = "{}_{}".format(device, phy_slot)
path = "/sys/bus/pci/slots/{}/power".format(phy_slot)
try:
self.c.run_command("test -f {}".format(path))
except CommandFailed as cf:
log.debug("Slot {} does not support hotplug".format(phy_slot))
continue # slot does not support hotplug
try:
self.c.run_command("echo 0 > {}".format(path))
except CommandFailed as cf:
msg = "PCI device/slot power off operation failed"
failure_list[index] = msg
time.sleep(5)
cmd = "lspci -k -s {}".format(device)
res = self.c.run_command(cmd)
if device in "\n".join(res):
msg = "PCI device failed to remove after power off operation"
failure_list[index] = msg
try:
self.c.run_command("echo 1 > {}".format(path))
except CommandFailed as cf:
msg = "PCI device/slot power on operation failed"
failure_list[index] = msg
res = self.c.run_command(cmd)
if device not in "\n".join(res):
msg = "PCI device failed to attach back after power on operation"
failure_list[index] = msg
self._gather_errors()
self.assertEqual(failure_list, {},
"PCI Hotplug failures {}".format(failure_list))
def pci_link_check(self):
'''
PCI link checks
Case A --run testcases.OpTestPCI.PCISkiroot.pci_link_check
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.pci_link_check
Case C --run testcases.OpTestPCI.PCISkirootHardboot.pci_link_check
Case D --run testcases.OpTestPCI.PCIHost.pci_link_check
Case E --run testcases.OpTestPCI.PCIHostSoftboot.pci_link_check
Case F --run testcases.OpTestPCI.PCIHostHardboot.pci_link_check
'''
lspci_output = self.c.run_command("lspci")
# List of devices that won't be checked
blacklist = [
"Broadcom Limited NetXtreme BCM5719 Gigabit Ethernet PCIe (rev 01)"]
# Populating device id list
device_ids = []
for line in lspci_output:
if line:
line = line.strip().split(' ')
device_ids.append(line[0])
class Device:
def __init__(self, device_info):
self.domain = ""
self.primary = ""
self.slotfunc = ""
self.name = ""
self.secondary = ""
self.capability = ""
self.capspeed = 0
self.capwidth = 0
self.staspeed = 0
self.stawidth = 0
# 0000:00:00.0 PCI bridge: IBM Device 03dc
id_components = device_info[0].split(":")
self.domain = id_components[0]
self.primary = id_components[1]
self.slotfunc = id_components[2].split()[0]
self.name = id_components[-1].strip()
for line in device_info[1:]:
if line:
line = line.strip()
if "Bus:" in line:
line = line.split("secondary=")
self.secondary = line[1][:2]
if "Express (v" in line:
self.capability = "Endpoint"
if "Root Port" in line:
self.capability = "Root"
if "Upstream" in line:
self.capability = "Upstream"
if "Downstream" in line:
self.capability = "Downstream"
if "LnkCap:" in line:
# LnkCap: Port #0, Speed 8GT/s, Width x16, ASPM L0s, Exit Latency L0s unlimited, L1 unlimited
line = line.split("GT/s, Width x")
self.capspeed = float(line[0].split()[-1])
self.capwidth = float(line[1].split(",")[0])
if "LnkSta:" in line:
# LnkSta: Speed 8GT/s, Width x8, TrErr- Train- SlotClk+ DLActive+ BWMgmt- ABWMgmt+
line = line.split("GT/s, Width x")
self.staspeed = float(line[0].split()[-1])
self.stawidth = float(line[1].split(",")[0])
def get_details(self):
msg = ("{}, capability={}, secondary={} \n"
.format(self.get_id(), self.capability, self.secondary))
msg += ("capspeed={}, capwidth={}, staspeed={}, stawidth={}"
.format(self.capspeed, self.capwidth, self.staspeed, self.stawidth))
return msg
def get_id(self):
return "{}:{}:{}".format(self.domain, self.primary, self.slotfunc)
# Checking if two devices are linked together
def devicesLinked(upstream, downstream):
if upstream.domain == downstream.domain:
if upstream.secondary == downstream.primary:
if upstream.capability == "Root":
if downstream.capability == "Upstream":
return True
if downstream.capability == "Endpoint":
return True
if upstream.capability == "Downstream":
if downstream.capability == "Endpoint":
return True
return False
# Checking if LnkSta matches LnkCap - speed
def optimalSpeed(upstream, downstream):
if upstream.capspeed > downstream.capspeed:
optimal_speed = downstream.capspeed
else:
optimal_speed = upstream.capspeed
if optimal_speed > upstream.staspeed:
return False
return True
# Checking if LnkSta matches LnkCap - width
def optimalWidth(upstream, downstream):
if upstream.capwidth > downstream.capwidth:
optimal_width = downstream.capwidth
else:
optimal_width = upstream.capwidth
if optimal_width > upstream.stawidth:
return False
return True
device_list = []
# Filling device objects' details
for device in device_ids:
device_info = self.c.run_command("lspci -s {} -vv".format(device))
device_list.append(Device(device_info))
checked_devices = []
suboptimal_links = ""
blacklist_links = ""
# Returns a string containing details of the suboptimal link
def subLinkInfo(upstream, downstream):
msg = "\nSuboptimal link between {} and {} - ".format(
upstream.get_id(), downstream.get_id())
if not optimalSpeed(upstream, downstream):
if upstream.capspeed > downstream.capspeed:
optimal_speed = downstream.capspeed
else:
optimal_speed = upstream.capspeed
actual_speed = upstream.staspeed
msg += "Link speed capability is {}GT/s but status was {}GT/s. ".format(
optimal_speed, actual_speed)
if not optimalWidth(upstream, downstream):
if upstream.capwidth > downstream.capwidth:
optimal_width = downstream.capwidth
else:
optimal_width = upstream.capwidth
actual_width = upstream.stawidth
msg += "Link width capability is x{} but status was x{}. ".format(
optimal_width, actual_width)
return msg
# Searching through devices to check for links and testing to see if they're optimal
for device in device_list:
if device not in checked_devices:
checked_devices.append(device)
for endpoint in device_list:
if endpoint not in checked_devices:
if devicesLinked(device, endpoint):
checked_devices.append(endpoint)
log.debug("checking link between {} and {}".format(
device.get_id(), endpoint.get_id()))
log.debug(device.get_details())
log.debug(endpoint.get_details())
if endpoint.name in blacklist:
no_check_msg = ("Link between {} and {} not checked as {} is in the list of blacklisted devices"
.format(device.get_id(), endpoint.get_id(), endpoint.get_id()))
log.info(no_check_msg)
blacklist_links += "{}\n".format(no_check_msg)
else:
if(not optimalSpeed(device, endpoint)) or (not optimalWidth(device, endpoint)):
suboptimal_links += subLinkInfo(
device, endpoint)
log.debug("")
log.debug("Finished testing links")
log.debug("blacklist_links={}".format(blacklist_links))
log.debug("suboptimal_links={}".format(suboptimal_links))
# Assert suboptimal list is empty
self.assertEqual(len(suboptimal_links), 0, suboptimal_links)
class PCISkirootSoftboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCISkirootSoftboot, cls).setUpClass()
cls.pty.sendline("reboot")
cls.cv_SYSTEM.set_state(OpSystemState.IPLing)
# clear the states since we rebooted outside the state machine
cls.cv_SYSTEM.util.clear_state(cls.cv_SYSTEM)
cls.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
@classmethod
def tearDownClass(cls):
super(PCISkirootSoftboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCISkirootSoftboot, self).setUp()
class PCISkirootHardboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCISkirootHardboot, cls).setUpClass(power_cycle=1)
@classmethod
def tearDownClass(cls):
super(PCISkirootHardboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCISkirootHardboot, self).setUp()
class PCISkiroot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
def setUp(self):
# this left as placeholder for per test setUp
super(PCISkiroot, self).setUp()
class PCIHostSoftboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCIHostSoftboot, cls).setUpClass(desired=OpSystemState.OS)
cls.pty.sendline("reboot")
cls.cv_SYSTEM.set_state(OpSystemState.BOOTING)
# clear the states since we rebooted outside the state machine
cls.cv_SYSTEM.util.clear_state(cls.cv_SYSTEM)
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
@classmethod
def tearDownClass(cls):
super(PCIHostSoftboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCIHostSoftboot, self).setUp()
class PCIHostHardboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCIHostHardboot, cls).setUpClass(
desired=OpSystemState.OS, power_cycle=1)
@classmethod
def tearDownClass(cls):
super(PCIHostHardboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCIHostHardboot, self).setUp()
class PCIHost(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCIHost, cls).setUpClass(desired=OpSystemState.OS)
def setUp(self):
# this left as placeholder for per test setUp
super(PCIHost, self).setUp()
def skiroot_softboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.skiroot_softboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'compare_boot_devices']
return unittest.TestSuite(list(map(PCISkirootSoftboot, tests)))
def skiroot_hardboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.skiroot_hardboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'compare_boot_devices']
return unittest.TestSuite(list(map(PCISkirootHardboot, tests)))
def skiroot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite BasicPCI
--run testcases.OpTestPCI.skiroot_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices']
return unittest.TestSuite(list(map(PCISkiroot, tests)))
def skiroot_full_suite():
'''
Function used to prepare a test suite (see op-test)
--run testcases.OpTestPCI.skiroot_full_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'driver_bind']
return unittest.TestSuite(list(map(PCISkiroot, tests)))
def host_softboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.host_softboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices', 'pci_link_check',
'compare_boot_devices', 'driver_bind', 'hot_plug_host']
return unittest.TestSuite(list(map(PCIHostSoftboot, tests)))
def host_hardboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.host_hardboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices', 'pci_link_check',
'compare_boot_devices', 'driver_bind', 'hot_plug_host']
return unittest.TestSuite(list(map(PCIHostHardboot, tests)))
def host_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite BasicPCI
--run testcases.OpTestPCI.host_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices']
return unittest.TestSuite(list(map(PCIHost, tests)))
def host_full_suite():
'''
Function used to prepare a test suite (see op-test)
--run testcases.OpTestPCI.host_full_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'driver_bind', 'hot_plug_host']
return unittest.TestSuite(list(map(PCIHost, tests)))
|
open-power/op-test-framework
|
testcases/OpTestPCI.py
|
Python
|
apache-2.0
| 36,744
| 0.001116
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetExerciseFollowUp
# Retrieves user data about all excercises which have the specified excercise as a prerequisite.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetExerciseFollowUp(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetExerciseFollowUp Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetExerciseFollowUp, self).__init__(temboo_session, '/Library/KhanAcademy/Users/GetExerciseFollowUp')
def new_input_set(self):
return GetExerciseFollowUpInputSet()
def _make_result_set(self, result, path):
return GetExerciseFollowUpResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetExerciseFollowUpChoreographyExecution(session, exec_id, path)
class GetExerciseFollowUpInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetExerciseFollowUp
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Khan Academy.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The OAuth Consumer Secret provided by Khan Academy.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('ConsumerSecret', value)
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((optional, string) The email address (coach or student ID) of user. If not provided, defaults to currently logged in user.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('Email', value)
def set_ExerciseName(self, value):
"""
Set the value of the ExerciseName input for this Choreo. ((required, string) The exercise for which you want to retrieve follwow up exercises (e.g. "simplifying_fractions").)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('ExerciseName', value)
def set_OAuthTokenSecret(self, value):
"""
Set the value of the OAuthTokenSecret input for this Choreo. ((required, string) The OAuth Token Secret retrieved during the OAuth process.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('OAuthTokenSecret', value)
def set_OAuthToken(self, value):
"""
Set the value of the OAuthToken input for this Choreo. ((required, string) The OAuth Token retrieved during the OAuth process.)
"""
super(GetExerciseFollowUpInputSet, self)._set_input('OAuthToken', value)
class GetExerciseFollowUpResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetExerciseFollowUp Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Khan Academy.)
"""
return self._output.get('Response', None)
class GetExerciseFollowUpChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetExerciseFollowUpResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/KhanAcademy/Users/GetExerciseFollowUp.py
|
Python
|
apache-2.0
| 4,590
| 0.0061
|
# RANDOM PASSWORD GENERATOR
# Author: Dan Barrese (danbarrese.com)
# Version: 1.01
# Description: Yep. This is my first Python script.
# Skills exemplified in this script:
# * Random number generation
# * Command line interface
# * List comprehension
# * Loops
#
# Update History:
# 2013.12.25 [DRB][1.0] Initial implementation.
# 2013.12.29 [DRB][1.01] Defaulted to 10 passwords and length of 5.
import random
import string
import sys
import argparse
# Parse arguments.
parser = argparse.ArgumentParser(description='Random password generator.')
parser.add_argument('--length', '-l', metavar='L', type=int, nargs=1,
dest='length', default=[5],
help='the number (L) of characters in the generated password')
parser.add_argument('--count', '-c', metavar='C', type=int, nargs=1,
dest='count', default=[10],
help='the number (C) of passwords to generate')
parser.add_argument('--first-char', '-a', metavar='X', type=str, nargs=1,
dest='first_char', default=[None],
help='the first character (X) in the generated password')
parser.add_argument('--numbers', dest='do_numbers', action='store_true',
help='include numbers [0-9]')
parser.add_argument('--no-numbers', dest='do_numbers', action='store_false',
help='do NOT include numbers [0-9]')
parser.set_defaults(do_numbers=True)
parser.add_argument('--alpha-lower', dest='do_alpha_lower', action='store_true',
help='include numbers [a-z]')
parser.add_argument('--no-alpha-lower', dest='do_alpha_lower', action='store_false',
help='do NOT include alphas [a-z]')
parser.set_defaults(do_alpha_lower=True)
parser.add_argument('--alpha-upper', dest='do_alpha_upper', action='store_true',
help='include numbers [A-Z]')
parser.add_argument('--no-alpha-upper', dest='do_alpha_upper', action='store_false',
help='do NOT include alphas [A-Z]')
parser.set_defaults(do_alpha_upper=True)
parser.add_argument('--symbols-common', dest='do_symbols_common', action='store_true',
help='include common symbols')
parser.add_argument('--no-symbols-common', dest='do_symbols_common', action='store_false',
help='do NOT include common symbols')
parser.set_defaults(do_symbols_common=True)
parser.add_argument('--symbols-uncommon', dest='do_symbols_uncommon', action='store_true',
help='include uncommon symbols')
parser.add_argument('--no-symbols-uncommon', dest='do_symbols_uncommon', action='store_false',
help='do NOT include uncommon symbols')
parser.set_defaults(do_symbols_uncommon=False)
args = parser.parse_args()
# Set arguments to variables.
pwd_len = args.length[0]
pwd_count = args.count[0]
do_numbers = args.do_numbers
do_alpha_lower = args.do_alpha_lower
do_alpha_upper = args.do_alpha_upper
do_symbols_common = args.do_symbols_common
do_symbols_uncommon = args.do_symbols_uncommon
# Define possible sets of characters.
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
alphas_lowercase = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm'
, 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
alphas_uppercase = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
]
symbols_common = [
'!', '$', '&', '(', ')', '-', '_', '@', '#'
]
symbols_uncommon = [
'%', '\'', '/', ':', ';', '<', '=', '>', '?', '[',
'\\', ']', '^', '{', '|', '}', '~', '"', '*', '+', ',', '.', '`'
]
# Define keyset.
keyset = []
if do_numbers:
keyset = keyset + numbers
if do_alpha_lower:
keyset = keyset + alphas_lowercase
if do_alpha_upper:
keyset = keyset + alphas_uppercase
if do_symbols_common:
keyset = keyset + symbols_common
if do_symbols_uncommon:
keyset = keyset + symbols_uncommon
num_pwds_generated = 0
while num_pwds_generated < pwd_count:
# Define first character in the password.
first_char = args.first_char[0]
if first_char is None:
first_char = random.sample(keyset, 1)[0]
# Make password.
pwd_len_counter = 2
pwd_sequence = [first_char]
while pwd_len_counter <= pwd_len:
pwd_sequence.append(random.sample(keyset, 1)[0])
pwd_len_counter += 1
pwd_str = ''.join(pwd_sequence)
# Print password.
print(pwd_str)
num_pwds_generated += 1
|
danbarrese/pwdgen
|
pwdgen.py
|
Python
|
gpl-3.0
| 4,586
| 0.002617
|
#!/usr/bin/env python
import io
import os
import sys
try:
from setuptools import setup, Command, find_packages
except ImportError:
from distutils.core import setup, Command, find_packages
__version__ = "1.4.3"
with io.open('README.rst', encoding='utf-8') as readme_file:
long_description = readme_file.read() + "\n\n"
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
os.system("git tag -a v%s -m 'version v%s'" % (__version__, __version__))
os.system("git push --tags")
os.system("git push")
sys.exit()
requirements = [
'click==6.2',
'PyYAML==3.11',
'tabulate==0.7.5',
'tinydb==3.1.2',
'rstr==2.2.3',
]
class PyTest(Command):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
self.pytest_args = ["-v", "tests/"]
def finalize_options(self):
pass
def run(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
class PyTestCoverage(PyTest):
def initialize_options(self):
self.pytest_args = [
"-v", "tests",
"--cov", 'passpie',
"--cov-config", ".coveragerc",
"--cov-report", "term-missing",
]
setup(
name='passpie',
version=__version__,
license='License :: OSI Approved :: MIT License',
description="Manage your login credentials from the terminal painlessly.",
long_description=long_description,
author='Marcwebbie',
author_email='marcwebbie@gmail.com',
url='https://github.com/marcwebbie/passpie',
download_url='https://github.com/marcwebbie/passpie',
packages=find_packages(),
entry_points={
'console_scripts': [
'passpie=passpie.cli:cli',
]
},
install_requires=requirements,
cmdclass={'test': PyTest, 'coverage': PyTestCoverage},
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python',
'Topic :: Security :: Cryptography',
],
)
|
eiginn/passpie
|
setup.py
|
Python
|
mit
| 2,730
| 0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Tools for the submission of Tasks."""
from __future__ import unicode_literals, division, print_function
import os
import time
import ruamel.yaml as yaml
import pickle
from collections import deque
from datetime import timedelta
from six.moves import cStringIO
from monty.io import get_open_fds
from monty.string import boxed, is_string
from monty.os.path import which
from monty.collections import AttrDict, dict2namedtuple
from monty.termcolor import cprint
from .utils import as_bool, File, Directory
from . import qutils as qu
from pymatgen.util.io_utils import ask_yesno
try:
import apscheduler
has_apscheduler = True
has_sched_v3 = apscheduler.version >= "3.0.0"
except ImportError:
has_apscheduler = False
import logging
logger = logging.getLogger(__name__)
__all__ = [
"ScriptEditor",
"PyLauncher",
"PyFlowScheduler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class ScriptEditor(object):
"""Simple editor that simplifies the writing of shell scripts"""
_shell = '/bin/bash'
def __init__(self):
self._lines = []
@property
def shell(self):
return self._shell
def _add(self, text, pre=""):
if is_string(text):
self._lines.append(pre + text)
else:
self._lines.extend([pre + t for t in text])
def reset(self):
"""Reset the editor."""
try:
del self._lines
except AttributeError:
pass
def shebang(self):
"""Adds the shebang line."""
self._lines.append('#!' + self.shell)
def declare_var(self, key, val):
"""Declare a env variable. If val is None the variable is unset."""
if val is not None:
line = "export " + key + '=' + str(val)
else:
line = "unset " + key
self._add(line)
def declare_vars(self, d):
"""Declare the variables defined in the dictionary d."""
for k, v in d.items():
self.declare_var(k, v)
def export_envar(self, key, val):
"""Export an environment variable."""
line = "export " + key + "=" + str(val)
self._add(line)
def export_envars(self, env):
"""Export the environment variables contained in the dict env."""
for k, v in env.items():
self.export_envar(k, v)
def add_emptyline(self):
"""Add an empty line."""
self._add("", pre="")
def add_comment(self, comment):
"""Add a comment"""
self._add(comment, pre="# ")
def load_modules(self, modules):
"""Load the list of specified modules."""
for module in modules:
self.load_module(module)
def load_module(self, module):
self._add('module load ' + module + " 2>> mods.err")
def add_line(self, line):
self._add(line)
def add_lines(self, lines):
self._add(lines)
def get_script_str(self, reset=True):
"""Returns a string with the script and reset the editor if reset is True"""
s = "\n".join(l for l in self._lines)
if reset:
self.reset()
return s
class PyLauncherError(Exception):
"""Error class for PyLauncher."""
class PyLauncher(object):
"""This object handle the submission of the tasks contained in a :class:`Flow`"""
Error = PyLauncherError
def __init__(self, flow, **kwargs):
"""
Initialize the object
Args:
flow: :class:`Flow` object
max_njobs_inqueue: The launcher will stop submitting jobs when the
number of jobs in the queue is >= Max number of jobs
"""
self.flow = flow
self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200)
#self.flow.check_pid_file()
def single_shot(self):
"""
Run the first :class:`Task` than is ready for execution.
Returns:
Number of jobs launched.
"""
num_launched = 0
# Get the tasks that can be executed in each workflow.
tasks = []
for work in self.flow:
try:
task = work.fetch_task_to_run()
if task is not None:
tasks.append(task)
else:
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.debug("No task to run! Possible deadlock")
except StopIteration:
logger.info("All tasks completed.")
# Submit the tasks and update the database.
if tasks:
tasks[0].start()
num_launched += 1
self.flow.pickle_dump()
return num_launched
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5):
"""
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run.
Args:
max_nlaunch: Maximum number of launches. default: no limit.
max_loops: Maximum number of loops
sleep_time: seconds to sleep between rapidfire loop iterations
Returns:
The number of tasks launched.
"""
num_launched, do_exit, launched = 0, False, []
for count in range(max_loops):
if do_exit:
break
if count > 0:
time.sleep(sleep_time)
tasks = self.fetch_tasks_to_run()
# I don't know why but we receive duplicated tasks.
if any(task in launched for task in tasks):
logger.critical("numtasks %d already in launched list:\n%s" % (len(tasks), launched))
# Preventive test.
tasks = [t for t in tasks if t not in launched]
if not tasks:
continue
for task in tasks:
fired = task.start()
if fired:
launched.append(task)
num_launched += 1
if num_launched >= max_nlaunch > 0:
logger.info('num_launched >= max_nlaunch, going back to sleep')
do_exit = True
break
# Update the database.
self.flow.pickle_dump()
return num_launched
def fetch_tasks_to_run(self):
"""
Return the list of tasks that can be submitted.
Empty list if no task has been found.
"""
tasks_to_run = []
for work in self.flow:
tasks_to_run.extend(work.fetch_alltasks_to_run())
return tasks_to_run
class PyFlowSchedulerError(Exception):
"""Exceptions raised by `PyFlowScheduler`."""
class PyFlowScheduler(object):
"""
This object schedules the submission of the tasks in a :class:`Flow`.
There are two types of errors that might occur during the execution of the jobs:
#. Python exceptions
#. Errors in the ab-initio code
Python exceptions are easy to detect and are usually due to a bug in the python code or random errors such as IOError.
The set of errors in the ab-initio is much much broader. It includes wrong input data, segmentation
faults, problems with the resource manager, etc. The flow tries to handle the most common cases
but there's still a lot of room for improvement.
Note, in particular, that `PyFlowScheduler` will shutdown automatically in the following cases:
#. The number of python exceptions is > max_num_pyexcs
#. The number of task errors (i.e. the number of tasks whose status is S_ERROR) is > max_num_abierrs
#. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).
#. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
If the mail cannot be sent, the scheduler will shutdown automatically.
This check prevents the scheduler from being trapped in an infinite loop.
"""
# Configuration file.
YAML_FILE = "scheduler.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
Error = PyFlowSchedulerError
@classmethod
def autodoc(cls):
i = cls.__init__.__doc__.index("Args:")
return cls.__init__.__doc__[i+5:]
def __init__(self, **kwargs):
"""
Args:
weeks: number of weeks to wait (DEFAULT: 0).
days: number of days to wait (DEFAULT: 0).
hours: number of hours to wait (DEFAULT: 0).
minutes: number of minutes to wait (DEFAULT: 0).
seconds: number of seconds to wait (DEFAULT: 0).
mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
(DEFAULT: None i.e. not used).
verbose: (int) verbosity level. (DEFAULT: 0)
use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
file before launching the jobs. (DEFAULT: "no")
max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
(int, DEFAULT: 1 day).
max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
(int, DEFAULT: 0)
max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
(int, DEFAULT: 0)
safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
`safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
(DEFAULT: -1 i.e. no limit)
debug: Debug level. Use 0 for production (int, DEFAULT: 0)
fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
rmflow: If "yes", the scheduler will remove the flow directory if the calculation
completed successfully. (DEFAULT: "no")
killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
before exiting due to an error. (DEFAULT: "yes")
"""
# Options passed to the scheduler.
self.sched_options = AttrDict(
weeks=kwargs.pop("weeks", 0),
days=kwargs.pop("days", 0),
hours=kwargs.pop("hours", 0),
minutes=kwargs.pop("minutes", 0),
seconds=kwargs.pop("seconds", 0),
#start_date=kwargs.pop("start_date", None),
)
if all(not v for v in self.sched_options.values()):
raise self.Error("Wrong set of options passed to the scheduler.")
self.mailto = kwargs.pop("mailto", None)
self.verbose = int(kwargs.pop("verbose", 0))
self.use_dynamic_manager = as_bool(kwargs.pop("use_dynamic_manager", False))
self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
self.max_ncores_used = kwargs.pop("max_ncores_used", None)
self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))
self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
#self.max_etime_s = kwargs.pop("max_etime_s", )
self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
self.debug = kwargs.pop("debug", 0)
self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
self.rmflow = as_bool(kwargs.pop("rmflow", False))
self.killjobs_if_errors = as_bool(kwargs.pop("killjobs_if_errors", True))
self.customer_service_dir = kwargs.pop("customer_service_dir", None)
if self.customer_service_dir is not None:
self.customer_service_dir = Directory(self.customer_service_dir)
self._validate_customer_service()
if kwargs:
raise self.Error("Unknown arguments %s" % kwargs)
if not has_apscheduler:
raise RuntimeError("Install apscheduler with pip")
if has_sched_v3:
logger.warning("Using scheduler v>=3.0.0")
from apscheduler.schedulers.blocking import BlockingScheduler
self.sched = BlockingScheduler()
else:
from apscheduler.scheduler import Scheduler
self.sched = Scheduler(standalone=True)
self.nlaunch = 0
self.num_reminders = 1
# Used to keep track of the exceptions raised while the scheduler is running
self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)
# Used to push additional info during the execution.
self.history = deque(maxlen=100)
@classmethod
def from_file(cls, filepath):
"""Read the configuration parameters from a Yaml file."""
with open(filepath, "rt") as fh:
return cls(**yaml.safe_load(fh))
@classmethod
def from_string(cls, s):
"""Create an istance from string s containing a YAML dictionary."""
stream = cStringIO(s)
stream.seek(0)
return cls(**yaml.safe_load(stream))
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
Search first in the working directory and then in the configuration directory of abipy.
Raises:
`RuntimeError` if file is not found.
"""
# Try in the current directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
# Try in the configuration directory.
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
raise cls.Error("Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path))
def __str__(self):
"""String representation."""
lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
app = lines.append
app("Scheduler options: %s" % str(self.sched_options))
if self.flow is not None:
app(80 * "=")
app(str(self.flow))
return "\n".join(lines)
@property
def pid(self):
"""The pid of the process associated to the scheduler."""
try:
return self._pid
except AttributeError:
self._pid = os.getpid()
return self._pid
@property
def pid_file(self):
"""
Absolute path of the file with the pid.
The file is located in the workdir of the flow
"""
return self._pid_file
@property
def flow(self):
"""`Flow`."""
try:
return self._flow
except AttributeError:
return None
@property
def num_excs(self):
"""Number of exceptions raised so far."""
return len(self.exceptions)
def get_delta_etime(self):
"""Returns a `timedelta` object representing with the elapsed time."""
return timedelta(seconds=(time.time() - self.start_time))
def add_flow(self, flow):
"""
Add an :class:`Flow` flow to the scheduler.
"""
if hasattr(self, "_flow"):
raise self.Error("Only one flow can be added to the scheduler.")
# Check if we are already using a scheduler to run this flow
flow.check_pid_file()
flow.set_spectator_mode(False)
# Build dirs and files (if not yet done)
flow.build()
with open(flow.pid_file, "wt") as fh:
fh.write(str(self.pid))
self._pid_file = flow.pid_file
self._flow = flow
def _validate_customer_service(self):
"""
Validate input parameters if customer service is on then
create directory for tarball files with correct premissions for user and group.
"""
direc = self.customer_service_dir
if not direc.exists:
mode = 0o750
print("Creating customer_service_dir %s with mode %s" % (direc, mode))
direc.makedirs()
os.chmod(direc.path, mode)
if self.mailto is None:
raise RuntimeError("customer_service_dir requires mailto option in scheduler.yml")
def _do_customer_service(self):
"""
This method is called before the shutdown of the scheduler.
If customer_service is on and the flow didn't completed successfully,
a lightweight tarball file with inputs and the most important output files
is created in customer_servide_dir.
"""
if self.customer_service_dir is None: return
doit = self.exceptions or not self.flow.all_ok
doit = True
if not doit: return
prefix = os.path.basename(self.flow.workdir) + "_"
import tempfile, datetime
suffix = str(datetime.datetime.now()).replace(" ", "-")
# Remove milliseconds
i = suffix.index(".")
if i != -1: suffix = suffix[:i]
suffix += ".tar.gz"
#back = os.getcwd()
#os.chdir(self.customer_service_dir.path)
_, tmpname = tempfile.mkstemp(suffix="_" + suffix, prefix=prefix,
dir=self.customer_service_dir.path, text=False)
print("Dear customer,\n We are about to generate a tarball in\n %s" % tmpname)
self.flow.make_light_tarfile(name=tmpname)
#os.chdir(back)
def start(self):
"""
Starts the scheduler in a new thread. Returns 0 if success.
In standalone mode, this method will block until there are no more scheduled jobs.
"""
self.history.append("Started on %s" % time.asctime())
self.start_time = time.time()
if not has_apscheduler:
raise RuntimeError("Install apscheduler with pip")
if has_sched_v3:
self.sched.add_job(self.callback, "interval", **self.sched_options)
else:
self.sched.add_interval_job(self.callback, **self.sched_options)
errors = self.flow.look_before_you_leap()
if errors:
self.exceptions.append(errors)
return 1
# Try to run the job immediately. If something goes wrong return without initializing the scheduler.
self._runem_all()
if self.exceptions:
self.cleanup()
self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
return 1
try:
self.sched.start()
return 0
except KeyboardInterrupt:
self.shutdown(msg="KeyboardInterrupt from user")
if ask_yesno("Do you want to cancel all the jobs in the queue? [Y/n]"):
print("Number of jobs cancelled:", self.flow.cancel())
self.flow.pickle_dump()
return -1
def _runem_all(self):
"""
This function checks the status of all tasks,
tries to fix tasks that went unconverged, abicritical, or queuecritical
and tries to run all the tasks that can be submitted.+
"""
excs = []
flow = self.flow
# Allow to change the manager at run-time
if self.use_dynamic_manager:
from pymatgen.io.abinit.tasks import TaskManager
new_manager = TaskManager.from_user_config()
for work in flow:
work.set_manager(new_manager)
nqjobs = 0
if self.contact_resource_manager:
# This call is expensive and therefore it's optional
nqjobs = flow.get_njobs_in_queue()
if nqjobs is None:
nqjobs = 0
if flow.manager.has_queue: logger.warning('Cannot get njobs_inqueue')
if nqjobs >= self.max_njobs_inqueue:
print("Too many jobs in the queue: %s, returning" % nqjobs)
return
if self.max_nlaunches == -1:
max_nlaunch = self.max_njobs_inqueue - nqjobs
else:
max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunches)
# check status.
flow.check_status(show=False)
# This check is not perfect, we should make a list of tasks to sumbit
# and select only the subset so that we don't exceeed mac_ncores_used
# Many sections of this code should be rewritten.
#if self.max_ncores_used is not None and flow.ncores_used > self.max_ncores_used:
if self.max_ncores_used is not None and flow.ncores_allocated > self.max_ncores_used:
print("Cannot exceed max_ncores_use:d %s" % self.max_ncores_used)
return
# Try to restart the unconverged tasks
# TODO: do not fire here but prepare for fireing in rapidfire
for task in self.flow.unconverged_tasks:
try:
logger.info("Flow will try restart task %s" % task)
fired = task.restart()
if fired:
self.nlaunch += 1
max_nlaunch -= 1
if max_nlaunch == 0:
logger.info("Restart: too many jobs in the queue, returning")
flow.pickle_dump()
return
except task.RestartError:
excs.append(straceback())
# Temporarily disable by MG because I don't know if fix_critical works after the
# introduction of the new qadapters
# reenabled by MsS disable things that do not work at low level
# fix only prepares for restarting, and sets to ready
if self.fix_qcritical:
nfixed = flow.fix_queue_critical()
if nfixed: print("Fixed %d QCritical error(s)" % nfixed)
nfixed = flow.fix_abicritical()
if nfixed: print("Fixed %d AbiCritical error(s)" % nfixed)
# update database
flow.pickle_dump()
# Submit the tasks that are ready.
try:
nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
self.nlaunch += nlaunch
if nlaunch:
print("[%s] Number of launches: %d" % (time.asctime(), nlaunch))
except Exception:
excs.append(straceback())
# check status.
flow.show_status()
if excs:
logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
self.exceptions.extend(excs)
def callback(self):
"""The function that will be executed by the scheduler."""
try:
return self._callback()
except:
# All exceptions raised here will trigger the shutdown!
s = straceback()
self.exceptions.append(s)
# This is useful when debugging
#try:
# print("Exception in callback, will cancel all tasks")
# for task in self.flow.iflat_tasks():
# task.cancel()
#except Exception:
# pass
self.shutdown(msg="Exception raised in callback!\n" + s)
def _callback(self):
"""The actual callback."""
if self.debug:
# Show the number of open file descriptors
print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())
self._runem_all()
# Mission accomplished. Shutdown the scheduler.
all_ok = self.flow.all_ok
if all_ok:
return self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")
# Handle failures.
err_lines = []
# Shall we send a reminder to the user?
delta_etime = self.get_delta_etime()
if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
self.num_reminders += 1
msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
(self.pid, self.flow, delta_etime))
retcode = self.send_email(msg, tag="[REMINDER]")
if retcode:
# Cannot send mail, shutdown now!
msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
" but send_email returned %d. Aborting now" % retcode)
err_lines.append(msg)
#if delta_etime.total_seconds() > self.max_etime_s:
# err_lines.append("\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s)
# Too many exceptions. Shutdown the scheduler.
if self.num_excs > self.max_num_pyexcs:
msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
self.num_excs, self.max_num_pyexcs)
err_lines.append(boxed(msg))
# Paranoid check: disable the scheduler if we have submitted
# too many jobs (it might be due to some bug or other external reasons
# such as race conditions between difference callbacks!)
if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
self.nlaunch, self.flow.num_tasks)
err_lines.append(boxed(msg))
# Count the number of tasks with status == S_ERROR.
if self.flow.num_errored_tasks > self.max_num_abierrs:
msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
self.flow.num_errored_tasks, self.max_num_abierrs)
err_lines.append(boxed(msg))
# Test on the presence of deadlocks.
g = self.flow.find_deadlocks()
if g.deadlocked:
# Check the flow again so that status are updated.
self.flow.check_status()
g = self.flow.find_deadlocks()
print("deadlocked:\n", g.deadlocked, "\nrunnables:\n", g.runnables, "\nrunning\n", g.running)
if g.deadlocked and not g.runnables and not g.running:
err_lines.append("No runnable job with deadlocked tasks:\n%s." % str(g.deadlocked))
if not g.runnables and not g.running:
# Check the flow again so that status are updated.
self.flow.check_status()
g = self.flow.find_deadlocks()
if not g.runnables and not g.running:
err_lines.append("No task is running and cannot find other tasks to submit.")
# Something wrong. Quit
if err_lines:
# Cancel all jobs.
if self.killjobs_if_errors:
cprint("killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting.", "yellow")
try:
num_cancelled = 0
for task in self.flow.iflat_tasks():
num_cancelled += task.cancel()
cprint("Killed %d tasks" % num_cancelled, "yellow")
except Exception as exc:
cprint("Exception while trying to kill jobs:\n%s" % str(exc), "red")
self.shutdown("\n".join(err_lines))
return len(self.exceptions)
def cleanup(self):
"""Cleanup routine: remove the pid file and save the pickle database"""
try:
os.remove(self.pid_file)
except OSError as exc:
logger.critical("Could not remove pid_file: %s", exc)
# Save the final status of the flow.
self.flow.pickle_dump()
def shutdown(self, msg):
"""Shutdown the scheduler."""
try:
self.cleanup()
self.history.append("Completed on: %s" % time.asctime())
self.history.append("Elapsed time: %s" % self.get_delta_etime())
if self.debug:
print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())
retcode = self.send_email(msg)
if self.debug:
print("send_mail retcode", retcode)
# Write file with the list of exceptions:
if self.exceptions:
dump_file = os.path.join(self.flow.workdir, "_exceptions")
with open(dump_file, "wt") as fh:
fh.writelines(self.exceptions)
fh.write("Shutdown message:\n%s" % msg)
lines = []
app = lines.append
app("Submitted on: %s" % time.ctime(self.start_time))
app("Completed on: %s" % time.asctime())
app("Elapsed time: %s" % str(self.get_delta_etime()))
if self.flow.all_ok:
app("Flow completed successfully")
else:
app("Flow %s didn't complete successfully" % repr(self.flow.workdir))
app("use `abirun.py FLOWDIR debug` to analyze the problem.")
app("Shutdown message:\n%s" % msg)
print("")
print("\n".join(lines))
print("")
self._do_customer_service()
if self.flow.all_ok:
print("Calling flow.finalize()...")
self.flow.finalize()
#print("finalized:", self.flow.finalized)
if self.rmflow:
app("Flow directory will be removed...")
try:
self.flow.rmtree()
except Exception:
logger.warning("Ignoring exception while trying to remove flow dir.")
finally:
# Shutdown the scheduler thus allowing the process to exit.
logger.debug('This should be the shutdown of the scheduler')
# Unschedule all the jobs before calling shutdown
#self.sched.print_jobs()
if not has_sched_v3:
for job in self.sched.get_jobs():
self.sched.unschedule_job(job)
#self.sched.print_jobs()
self.sched.shutdown()
# Uncomment the line below if shutdown does not work!
#os.system("kill -9 %d" % os.getpid())
def send_email(self, msg, tag=None):
"""
Send an e-mail before completing the shutdown.
Returns 0 if success.
"""
try:
return self._send_email(msg, tag)
except:
self.exceptions.append(straceback())
return -2
def _send_email(self, msg, tag):
if self.mailto is None:
return -1
header = msg.splitlines()
app = header.append
app("Submitted on: %s" % time.ctime(self.start_time))
app("Completed on: %s" % time.asctime())
app("Elapsed time: %s" % str(self.get_delta_etime()))
app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)
strio = cStringIO()
strio.writelines("\n".join(header) + 4 * "\n")
# Add the status of the flow.
self.flow.show_status(stream=strio)
if self.exceptions:
# Report the list of exceptions.
strio.writelines(self.exceptions)
if tag is None:
tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"
return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
def sendmail(subject, text, mailto, sender=None):
"""
Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status
"""
def user_at_host():
from socket import gethostname
return os.getlogin() + "@" + gethostname()
# Body of the message.
try:
sender = user_at_host() if sender is None else sender
except OSError:
sender = 'abipyscheduler@youknowwhere'
if is_string(mailto): mailto = [mailto]
from email.mime.text import MIMEText
mail = MIMEText(text)
mail["Subject"] = subject
mail["From"] = sender
mail["To"] = ", ".join(mailto)
msg = mail.as_string()
# sendmail works much better than the python interface.
# Note that sendmail is available only on Unix-like OS.
from subprocess import Popen, PIPE
sendmail = which("sendmail")
if sendmail is None: return -1
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE)
outdata, errdata = p.communicate(msg)
return len(errdata)
def __test_sendmail():
retcode = sendmail("sendmail_test", text="hello\nworld", mailto="nobody@nowhere.com")
print("Retcode", retcode)
assert retcode == 0
class BatchLauncherError(Exception):
"""Exceptions raised by :class:`BatchLauncher`."""
class BatchLauncher(object):
"""
This object automates the execution of multiple flow. It generates a job script
that uses abirun.py to run each flow stored in self with a scheduler.
The execution of the flows is done in sequential but each scheduler will start
to submit the tasks of the flow in autoparal mode.
The `BatchLauncher` is pickleable, hence one can reload it, check if all flows are completed
and rerun only those that are not completed due to the timelimit.
"""
PICKLE_FNAME = "__BatchLauncher__.pickle"
Error = BatchLauncherError
@classmethod
def from_dir(cls, top, workdir=None, name=None, manager=None, max_depth=2):
"""
Find all flows located withing the directory `top` and build the `BatchLauncher`.
Args:
top: Top level directory or list of directories.
workdir: Batch workdir.
name:
manager: :class:`TaskManager` object. If None, the manager is read from `manager.yml`
In this case the YAML file must provide the entry `batch_manager` that defined
the queue adapter used to submit the batch script.
max_depth: Search in directory only if it is N or fewer levels below top
"""
from .flows import Flow
def find_pickles(dirtop):
# Walk through each directory inside path and find the pickle database.
paths = []
for dirpath, dirnames, filenames in os.walk(dirtop):
fnames = [f for f in filenames if f == Flow.PICKLE_FNAME]
paths.extend([os.path.join(dirpath, f) for f in fnames])
return paths
if is_string(top):
pickle_paths = find_pickles(top)
else:
# List of directories.
pickle_paths = []
for p in top:
pickle_paths.extend(find_pickles(p))
#workdir = os.path.join(top, "batch") if workdir is None else workdir
workdir = "batch" if workdir is None else workdir
new = cls(workdir, name=name, manager=manager)
for path in pickle_paths:
new.add_flow(path)
return new
@classmethod
def pickle_load(cls, filepath):
"""
Loads the object from a pickle file.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
"""
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
with open(filepath, "rb") as fh:
new = pickle.load(fh)
# new.flows is a list of strings with the workdir of the flows (see __getstate__).
# Here we read the Flow from the pickle file so that we have
# and up-to-date version and we set the flow in visitor_mode
from .flows import Flow
flow_workdirs, new.flows = new.flows, []
for flow in map(Flow.pickle_load, flow_workdirs):
new.add_flow(flow)
return new
def pickle_dump(self):
"""Save the status of the object in pickle format."""
with open(os.path.join(self.workdir, self.PICKLE_FNAME), mode="wb") as fh:
pickle.dump(self, fh)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
Here we replace the flow objects with their workdir because we are observing
the flows and we want to have the updated version when we reload the `BatchLauncher` from pickle.
"""
d = {k: v for k, v in self.__dict__.items() if k not in ["flows"]}
d["flows"] = [flow.workdir for flow in self.flows]
return d
def __init__(self, workdir, name=None, flows=None, manager=None, timelimit=None):
"""
Args:
workdir: Working directory
name: Name assigned to the `BatchLauncher`.
flows: List of `Flow` objects.
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
timelimit: Time limit (int with seconds or string with time given with
the slurm convention: "days-hours:minutes:seconds".
If timelimit is None, the default value specified in the `batch_adapter` is taken.
"""
self.workdir = os.path.abspath(workdir)
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
else:
pass
#raise RuntimeError("Directory %s already exists. Use BatchLauncher.pickle_load()" % self.workdir)
self.name = os.path.basename(self.workdir) if name is None else name
self.script_file = File(os.path.join(self.workdir, "run.sh"))
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.batch_pidfile = File(os.path.join(self.workdir, "batch.pid"))
from .tasks import TaskManager
manager = TaskManager.as_manager(manager)
# Extract the qadapater to be used for the batch script.
try:
self.qadapter = qad = manager.batch_adapter
except AttributeError:
raise RuntimeError("Your manager.yml file does not define an entry for the batch_adapter")
if qad is None:
raise RuntimeError("Your manager.yml file does not define an entry for the batch_adapter")
# Set mpi_procs to 1 just to be on the safe side
# Then allow the user to change the timelimit via __init__
qad.set_mpi_procs(1)
if timelimit is not None:
self.set_timelimit(timelimit)
# FIXME: Remove me!
self.set_timelimit(36000)
# Initialize list of flows.
if flows is None: flows = []
if not isinstance(flows, (list, tuple)): flows = [flows]
self.flows = flows
def set_timelimit(self, timelimit):
"""
Set the timelimit of the batch launcher.
Args:
timelimit: Time limit (int with seconds or string with time given
with the slurm convention: "days-hours:minutes:seconds".
"""
self.qad.set_timelimit(qu.timelimit_parser(timelimit))
def to_string(self, **kwargs):
lines = []
lines.extend(str(self.qadapter).splitlines())
for i, flow in enumerate(self.flows):
lines.append("Flow [%d] " % i + str(flow))
return "\n".join(lines)
def __str__(self):
return self.to_string()
def add_flow(self, flow):
"""
Add a flow. Accept filepath or :class:`Flow` object. Return 1 if flow was added else 0.
"""
from .flows import Flow
flow = Flow.as_flow(flow)
if flow in self.flows:
raise self.Error("Cannot add same flow twice!")
if not flow.allocated:
# Set the workdir of the flow here. Create a dir in self.workdir with name flow.name
flow_workdir = os.path.join(self.workdir, os.path.basename(flow.name))
if flow_workdir in (flow.workdir for flow in self.flows):
raise self.Error("Two flows have the same name and hence the same workdir!")
flow.allocate(workdir=flow_workdir)
# Check if we are already using a scheduler to run this flow
flow.check_pid_file()
flow.set_spectator_mode(False)
flow.check_status(show=False)
#if flow.all_ok:
# print("flow.all_ok: Ignoring %s" % flow)
# return 0
self.flows.append(flow)
#print("Flow %s added to the BatchLauncher" % flow)
return 1
def submit(self, **kwargs):
"""
Submit a job script that will run the schedulers with `abirun.py`.
Args:
verbose: Verbosity level
dry_run: Don't submit the script if dry_run. Default: False
Returns:
namedtuple with attributes:
retcode: Return code as returned by the submission script.
qjob: :class:`QueueJob` object.
num_flows_inbatch: Number of flows executed by the batch script
Return code of the job script submission.
"""
verbose, dry_run = kwargs.pop("verbose", 0), kwargs.pop("dry_run", False)
if not self.flows:
print("Cannot submit an empty list of flows!")
return 0
if hasattr(self, "qjob"):
# This usually happens when we have loaded the object from pickle
# and we have already submitted to batch script to the queue.
# At this point we need to understand if the previous batch job
# is still running before trying to submit it again. There are three cases:
#
# 1) The batch script has completed withing timelimit and therefore
# the pid_file has been removed by the script. In this case, we
# should not try to submit it again.
# 2) The batch script has been killed due to timelimit (other reasons are possible
# but we neglect them). In this case the pid_file exists but there's no job with
# this pid runnig and we can resubmit it again.
# 3) The batch script is still running.
print("BatchLauncher has qjob %s" % self.qjob)
if not self.batch_pid_file.exists:
print("It seems that the batch script reached the end. Wont' try to submit it again")
return 0
msg = ("Here I have to understand if qjob is in the queue."
" but I need an abstract API that can retrieve info from the queue id")
raise RuntimeError(msg)
# TODO: Temptative API
if self.qjob.in_status("Running|Queued"):
print("Job is still running. Cannot submit")
else:
del self.qjob
script, num_flows_inbatch = self._get_script_nflows()
if num_flows_inbatch == 0:
print("All flows have reached all_ok! Batch script won't be submitted")
return 0
if verbose:
print("*** submission script ***")
print(script)
# Write the script.
self.script_file.write(script)
self.script_file.chmod(0o740)
# Builf the flow.
for flow in self.flows:
flow.build_and_pickle_dump()
# Submit the task and save the queue id.
if dry_run: return -1
print("Will submit %s flows in batch script" % len(self.flows))
self.qjob, process = self.qadapter.submit_to_queue(self.script_file.path)
# Save the queue id in the pid file
# The file will be removed by the job script if execution is completed.
self.batch_pidfile.write(str(self.qjob.qid))
self.pickle_dump()
process.wait()
return dict2namedtuple(retcode=process.returncode, qjob=self.qjob,
num_flows_inbatch=num_flows_inbatch)
def _get_script_nflows(self):
"""
Write the submission script. Return (script, num_flows_in_batch)
"""
flows_torun = [f for f in self.flows if not f.all_ok]
if not flows_torun:
return "", 0
executable = [
'export _LOG=%s' % self.log_file.path,
'date1=$(date +"%s")',
'echo Running abirun.py in batch mode > ${_LOG}',
" ",
]
app = executable.append
# Build list of abirun commands and save the name of the log files.
self.sched_logs, num_flows = [], len(flows_torun)
for i, flow in enumerate(flows_torun):
logfile = os.path.join(self.workdir, "log_" + os.path.basename(flow.workdir))
app("echo Starting flow %d/%d on: `date` >> ${LOG}" % (i+1, num_flows))
app("\nabirun.py %s scheduler > %s" % (flow.workdir, logfile))
app("echo Returning from abirun on `date` with retcode $? >> ${_LOG}")
assert logfile not in self.sched_logs
self.sched_logs.append(logfile)
# Remove the batch pid_file and compute elapsed time.
executable.extend([
" ",
"# Remove batch pid file",
'rm %s' % self.batch_pidfile.path,
" ",
"# Compute elapsed time",
'date2=$(date +"%s")',
'diff=$(($date2-$date1))',
'echo $(($diff / 60)) minutes and $(($diff % 60)) seconds elapsed. >> ${_LOG}'
])
return self.qadapter.get_script_str(
job_name=self.name,
launch_dir=self.workdir,
executable=executable,
qout_path=self.qout_file.path,
qerr_path=self.qerr_file.path,
), num_flows
def show_summary(self, **kwargs):
"""
Show a summary with the status of the flows.
"""
for flow in self.flows:
flow.show_summary()
def show_status(self, **kwargs):
"""
Report the status of the flows.
Args:
stream: File-like object, Default: sys.stdout
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
"""
for flow in self.flows:
flow.show_status(**kwargs)
|
matk86/pymatgen
|
pymatgen/io/abinit/launcher.py
|
Python
|
mit
| 47,862
| 0.003134
|
#!/usr/bin/env python
#
#===- check_clang_tidy.py - ClangTidy Test Helper ------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
ClangTidy Test Helper
=====================
This script runs clang-tidy in fix mode and verify fixes, messages or both.
Usage:
check_clang_tidy.py [-resource-dir=<resource-dir>] \
[-assume-filename=<file-with-source-extension>] \
[-check-suffix=<file-check-suffix>] \
<source-file> <check-name> <temp-file> \
-- [optional clang-tidy arguments]
Example:
// RUN: %check_clang_tidy %s llvm-include-order %t -- -- -isystem %S/Inputs
"""
import argparse
import os
import re
import subprocess
import sys
def write_file(file_name, text):
with open(file_name, 'w') as f:
f.write(text)
f.truncate()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-expect-clang-tidy-error', action='store_true')
parser.add_argument('-resource-dir')
parser.add_argument('-assume-filename')
parser.add_argument('-check-suffix', default='')
parser.add_argument('input_file_name')
parser.add_argument('check_name')
parser.add_argument('temp_file_name')
args, extra_args = parser.parse_known_args()
resource_dir = args.resource_dir
assume_file_name = args.assume_filename
input_file_name = args.input_file_name
check_name = args.check_name
temp_file_name = args.temp_file_name
expect_clang_tidy_error = args.expect_clang_tidy_error
file_name_with_extension = assume_file_name or input_file_name
_, extension = os.path.splitext(file_name_with_extension)
if extension not in ['.c', '.hpp', '.m', '.mm']:
extension = '.cpp'
temp_file_name = temp_file_name + extension
clang_tidy_extra_args = extra_args
if len(clang_tidy_extra_args) == 0:
clang_tidy_extra_args = ['--']
if extension in ['.cpp', '.hpp', '.mm']:
clang_tidy_extra_args.append('--std=c++11')
if extension in ['.m', '.mm']:
clang_tidy_extra_args.extend(
['-fobjc-abi-version=2', '-fobjc-arc'])
if args.check_suffix and not re.match('^[A-Z0-9\-]+$', args.check_suffix):
sys.exit('Only A..Z, 0..9 and "-" are allowed in check suffix, but "%s" was given' % (args.check_suffix))
file_check_suffix = ('-' + args.check_suffix) if args.check_suffix else ''
check_fixes_prefix = 'CHECK-FIXES' + file_check_suffix
check_messages_prefix = 'CHECK-MESSAGES' + file_check_suffix
# Tests should not rely on STL being available, and instead provide mock
# implementations of relevant APIs.
clang_tidy_extra_args.append('-nostdinc++')
if resource_dir is not None:
clang_tidy_extra_args.append('-resource-dir=%s' % resource_dir)
with open(input_file_name, 'r') as input_file:
input_text = input_file.read()
has_check_fixes = check_fixes_prefix in input_text
has_check_messages = check_messages_prefix in input_text
if not has_check_fixes and not has_check_messages:
sys.exit('Neither %s nor %s found in the input' % (check_fixes_prefix, check_messages_prefix) )
# Remove the contents of the CHECK lines to avoid CHECKs matching on
# themselves. We need to keep the comments to preserve line numbers while
# avoiding empty lines which could potentially trigger formatting-related
# checks.
cleaned_test = re.sub('// *CHECK-[A-Z0-9\-]*:[^\r\n]*', '//', input_text)
write_file(temp_file_name, cleaned_test)
original_file_name = temp_file_name + ".orig"
write_file(original_file_name, cleaned_test)
args = ['clang-tidy', temp_file_name, '-fix', '--checks=-*,' + check_name] + \
clang_tidy_extra_args
if expect_clang_tidy_error:
args.insert(0, 'not')
print('Running ' + repr(args) + '...')
try:
clang_tidy_output = \
subprocess.check_output(args, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as e:
print('clang-tidy failed:\n' + e.output.decode())
raise
print('------------------------ clang-tidy output -----------------------\n' +
clang_tidy_output +
'\n------------------------------------------------------------------')
try:
diff_output = subprocess.check_output(
['diff', '-u', original_file_name, temp_file_name],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
diff_output = e.output
print('------------------------------ Fixes -----------------------------\n' +
diff_output.decode() +
'\n------------------------------------------------------------------')
if has_check_fixes:
try:
subprocess.check_output(
['FileCheck', '-input-file=' + temp_file_name, input_file_name,
'-check-prefix=' + check_fixes_prefix, '-strict-whitespace'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('FileCheck failed:\n' + e.output.decode())
raise
if has_check_messages:
messages_file = temp_file_name + '.msg'
write_file(messages_file, clang_tidy_output)
try:
subprocess.check_output(
['FileCheck', '-input-file=' + messages_file, input_file_name,
'-check-prefix=' + check_messages_prefix,
'-implicit-check-not={{warning|error}}:'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('FileCheck failed:\n' + e.output.decode())
raise
if __name__ == '__main__':
main()
|
youtube/cobalt
|
third_party/llvm-project/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
|
Python
|
bsd-3-clause
| 5,589
| 0.013777
|
"""
pdf2image is a light wrapper for the poppler-utils tools that can convert your
PDFs into Pillow images.
"""
import os
import platform
import re
import uuid
import tempfile
import shutil
from subprocess import Popen, PIPE
from PIL import Image
from .parsers import (
parse_buffer_to_ppm,
parse_buffer_to_jpeg,
parse_buffer_to_png
)
from .exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError
)
TRANSPARENT_FILE_TYPES = ['png', 'tiff']
def convert_from_path(pdf_path, dpi=200, output_folder=None, first_page=None, last_page=None,
fmt='ppm', thread_count=1, userpw=None, use_cropbox=False, strict=False, transparent=False,
output_file=str(uuid.uuid4()), poppler_path=None):
"""
Description: Convert PDF to Image will throw whenever one of the condition is reached
Parameters:
pdf_path -> Path to the PDF that you want to convert
dpi -> Image quality in DPI (default 200)
output_folder -> Write the resulting images to a folder (instead of directly in memory)
first_page -> First page to process
last_page -> Last page to process before stopping
fmt -> Output image format
thread_count -> How many threads we are allowed to spawn for processing
userpw -> PDF's password
use_cropbox -> Use cropbox instead of mediabox
strict -> When a Syntax Error is thrown, it will be raised as an Exception
transparent -> Output with a transparent background instead of a white one.
output_file -> What is the output filename
poppler_path -> Path to look for poppler binaries
"""
page_count = _page_count(pdf_path, userpw, poppler_path=poppler_path)
# We start by getting the output format, the buffer processing function and if we need pdftocairo
parsed_fmt, parse_buffer_func, use_pdfcairo_format = _parse_format(fmt)
# We use pdftocairo is the format requires it OR we need a transparent output
use_pdfcairo = use_pdfcairo_format or (transparent and parsed_fmt in TRANSPARENT_FILE_TYPES)
if thread_count < 1:
thread_count = 1
if first_page is None:
first_page = 1
if last_page is None or last_page > page_count:
last_page = page_count
auto_temp_dir = False
if output_folder is None and use_pdfcairo:
auto_temp_dir = True
output_folder = tempfile.mkdtemp()
# Recalculate page count based on first and last page
page_count = last_page - first_page + 1
if thread_count > page_count:
thread_count = page_count
reminder = page_count % thread_count
current_page = first_page
processes = []
for i in range(thread_count):
thread_output_file = output_file + '_' + str(i) if thread_count > 1 else output_file
# Get the number of pages the thread will be processing
thread_page_count = page_count // thread_count + int(reminder > 0)
# Build the command accordingly
args = _build_command(['-r', str(dpi), pdf_path], output_folder, current_page, current_page + thread_page_count - 1, parsed_fmt, thread_output_file, userpw, use_cropbox, transparent)
if use_pdfcairo:
args = [_get_command_path('pdftocairo', poppler_path)] + args
else:
args = [_get_command_path('pdftoppm', poppler_path)] + args
# Update page values
current_page = current_page + thread_page_count
reminder -= int(reminder > 0)
# Spawn the process and save its uuid
processes.append((thread_output_file, Popen(args, stdout=PIPE, stderr=PIPE)))
images = []
for uid, proc in processes:
data, err = proc.communicate()
if b'Syntax Error'in err and strict:
raise PDFSyntaxError(err.decode("utf8", "ignore"))
if output_folder is not None:
images += _load_from_output_folder(output_folder, uid, in_memory=auto_temp_dir)
else:
images += parse_buffer_func(data)
if auto_temp_dir:
shutil.rmtree(output_folder)
return images
def convert_from_bytes(pdf_file, dpi=200, output_folder=None, first_page=None, last_page=None,
fmt='ppm', thread_count=1, userpw=None, use_cropbox=False, strict=False, transparent=False,
output_file=str(uuid.uuid4()), poppler_path=None):
"""
Description: Convert PDF to Image will throw whenever one of the condition is reached
Parameters:
pdf_file -> Bytes representing the PDF file
dpi -> Image quality in DPI
poppler_path -> Path to look for poppler binaries
output_folder -> Write the resulting images to a folder (instead of directly in memory)
first_page -> First page to process
last_page -> Last page to process before stopping
fmt -> Output image format
thread_count -> How many threads we are allowed to spawn for processing
userpw -> PDF's password
use_cropbox -> Use cropbox instead of mediabox
strict -> When a Syntax Error is thrown, it will be raised as an Exception
transparent -> Output with a transparent background instead of a white one.
output_file -> What is the output filename
poppler_path -> Path to look for poppler binaries
"""
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, 'wb') as f:
f.write(pdf_file)
f.flush()
return convert_from_path(f.name, dpi=dpi, output_folder=output_folder,
first_page=first_page, last_page=last_page, fmt=fmt, thread_count=thread_count,
userpw=userpw, use_cropbox=use_cropbox, strict=strict, transparent=transparent,
output_file=output_file, poppler_path=poppler_path)
finally:
os.close(fh)
os.remove(temp_filename)
def _build_command(args, output_folder, first_page, last_page, fmt, output_file, userpw, use_cropbox, transparent):
if use_cropbox:
args.append('-cropbox')
if transparent and fmt in TRANSPARENT_FILE_TYPES:
args.append('-transp')
if first_page is not None:
args.extend(['-f', str(first_page)])
if last_page is not None:
args.extend(['-l', str(last_page)])
if fmt != 'ppm':
args.append('-' + fmt)
if output_folder is not None:
args.append(os.path.join(output_folder, output_file))
if userpw is not None:
args.extend(['-upw', userpw])
return args
def _parse_format(fmt):
fmt = fmt.lower()
if fmt[0] == '.':
fmt = fmt[1:]
if fmt in ('jpeg', 'jpg'):
return 'jpeg', parse_buffer_to_jpeg, False
if fmt == 'png':
return 'png', parse_buffer_to_png, False
if fmt in ('tif', 'tiff'):
return 'tiff', None, True
# Unable to parse the format so we'll use the default
return 'ppm', parse_buffer_to_ppm, False
def _get_command_path(command, poppler_path=None):
if platform.system() == 'Windows':
command = command + '.exe'
if poppler_path is not None:
command = os.path.join(poppler_path, command)
return command
def _page_count(pdf_path, userpw=None, poppler_path=None):
try:
command = [_get_command_path("pdfinfo", poppler_path), pdf_path]
if userpw is not None:
command.extend(['-upw', userpw])
proc = Popen(command, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
except:
raise PDFInfoNotInstalledError('Unable to get page count. Is poppler installed and in PATH?')
try:
# This will throw if we are unable to get page count
return int(re.search(r'Pages:\s+(\d+)', out.decode("utf8", "ignore")).group(1))
except:
raise PDFPageCountError('Unable to get page count. %s' % err.decode("utf8", "ignore"))
def _load_from_output_folder(output_folder, output_file, in_memory=False):
images = []
for f in sorted(os.listdir(output_folder)):
if output_file in f:
images.append(Image.open(os.path.join(output_folder, f)))
if in_memory:
images[-1].load()
return images
|
Kankroc/pdf2image
|
pdf2image/pdf2image.py
|
Python
|
mit
| 8,412
| 0.004042
|
#!/usr/bin/python3
import struct
import sys
if len(sys.argv) == 1:
print("python karel_mdo_convert.py mundo.mdo")
sys.exit(1)
f = open(sys.argv[1], "rb")
data = f.read()
f.close()
worldname = sys.argv[1]
if '/' in worldname:
worldname = worldname[worldname.rfind('/')+1:]
if '.' in worldname:
worldname = worldname[:worldname.rfind('.')]
kec = False
for extension in ("kec", "KEC"):
try:
f = open(sys.argv[1][:sys.argv[1].rfind(".")] + "." + extension, "rb")
kec = f.read()
f.close()
break
except Exception:
pass
if not kec:
print("%s.kec not found" % worldname)
sys.exit(1)
(x1, width, height, buzzers, karelx, karely, karelorient, wallcount, heapcount, x10) = struct.unpack("HHHHHHHHHH", data[10:30])
tuples = [struct.unpack("HHH", data[i:i+6]) for i in range(30, len(data), 6)]
kec = [struct.unpack("HHH", kec[i:i+6]) for i in range(0, len(kec), 6)]
maxlines = kec[0][1] if kec[0][0] else 10000000
maxmove = kec[1][1] if kec[1][0] else False
maxturnleft = kec[2][1] if kec[2][0] else False
maxpickbeeper = kec[3][1] if kec[3][0] else False
maxputbeeper = kec[4][1] if kec[4][0] else False
maxkarelbeepers = kec[5][1] if kec[5][0] else False
maxbeepers = kec[6][1] if kec[6][0] else False
endposition = kec[7][1:] if kec[7][0] else False
endorientation = ["NORTE", "ESTE", "SUR", "OESTE"][kec[8][1]] if kec[8][0] else False
dumpcount = kec[9][1] if kec[9][0] else 0
def formatbuzzers(b):
if b == 65535:
return "INFINITO"
else:
return "%d" % b
def isborder(wall, w, h):
if wall[0] == wall[2]:
return wall[0] in (0, w)
if wall[1] == wall[3]:
return wall[1] in (0, h)
def decodewalls(t, w, h):
dx = ((-1, 0, -1, -1), (0, 0, 0, -1))
dy = ((0, -1, -1, -1), (0, 0, -1, 0))
for i in range(4):
if (t[2] & (1 << i)):
wall = (t[0] + dx[0][i], t[1] + dy[0][i], t[0] + dx[1][i], t[1] + dy[1][i])
if not isborder(wall, w, h):
yield wall
def encodewall(w):
if w[0] == w[2]:
return 'x1="%d" y1="%d" y2="%d"' % (w[0], min(w[1], w[3]), max(w[1], w[3]))
elif w[1] == w[3]:
return 'x1="%d" x2="%d" y1="%d"' % (min(w[0], w[2]), max(w[0], w[2]), w[1])
else:
sys.exit(1)
def generateIn():
print("<ejecucion>")
if maxmove != False or maxturnleft != False or maxpickbeeper != False or maxputbeeper != False:
print(" <condiciones instruccionesMaximasAEjecutar=\"%d\" longitudStack=\"65000\">" % maxlines)
if maxmove != False:
print(' <comando nombre="AVANZA" maximoNumeroDeEjecuciones="%d" />' % maxmove)
if maxturnleft != False:
print(' <comando nombre="GIRA_IZQUIERDA" maximoNumeroDeEjecuciones="%d" />' % maxturnleft)
if maxpickbeeper != False:
print(' <comando nombre="COGE_ZUMBADOR" maximoNumeroDeEjecuciones="%d" />' % maxpickbeeper)
if maxputbeeper != False:
print(' <comando nombre="DEJA_ZUMBADOR" maximoNumeroDeEjecuciones="%d" />' % maxputbeeper)
print(" </condiciones>")
else:
print(" <condiciones instruccionesMaximasAEjecutar=\"%d\" longitudStack=\"65000\" />" % maxlines)
print(" <mundos>")
print(" <mundo nombre=\"mundo_0\" ancho=\"%d\" alto=\"%d\">" % (width, height))
for i in range(wallcount):
for wall in decodewalls(tuples[i], width, height):
print(" <pared %s/>" % encodewall(wall))
for i in range(wallcount, wallcount + heapcount):
print(" <monton x=\"%d\" y=\"%d\" zumbadores=\"%d\"/>" % tuples[i])
for i in range(10, 10 + dumpcount):
print(" <posicionDump x=\"%d\" y=\"%d\" />" % kec[i][:2])
print(" </mundo>")
print(" </mundos>")
print(" <programas tipoEjecucion=\"CONTINUA\" intruccionesCambioContexto=\"1\" milisegundosParaPasoAutomatico=\"0\">")
print(" <programa nombre=\"p1\" ruta=\"{$2$}\" mundoDeEjecucion=\"mundo_0\" xKarel=\"%d\" yKarel=\"%s\" direccionKarel=\"%s\" mochilaKarel=\"%s\" >" \
% (karelx, karely, ["", "NORTE", "ESTE", "SUR", "OESTE"][karelorient], formatbuzzers(buzzers)))
if dumpcount:
print(" <despliega tipo=\"MUNDO\" />")
if endorientation:
print(" <despliega tipo=\"ORIENTACION\" />")
if endposition:
print(" <despliega tipo=\"POSICION\" />")
print(" </programa>")
print(" </programas>")
print("</ejecucion>")
generateIn()
|
samdsmx/omegaup
|
bin/karel_mdo_convert.py
|
Python
|
bsd-3-clause
| 4,150
| 0.024096
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com>
# Copyright (C) 2006-2007 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from papyon.gnet.constants import *
from papyon.gnet.proxy.proxyfiable import ProxyfiableClient
from sock import SocketClient
import gobject
__all__ = ['TCPClient']
class TCPClient(SocketClient, ProxyfiableClient):
"""Asynchronous TCP client class.
@sort: __init__, open, send, close
@undocumented: do_*, _watch_*, __io_*, _connect_done_handler
@since: 0.1"""
def __init__(self, host, port):
"""initializer
@param host: the hostname to connect to.
@type host: string
@param port: the port number to connect to.
@type port: integer > 0 and < 65536"""
SocketClient.__init__(self, host, port, AF_INET, SOCK_STREAM)
ProxyfiableClient.__init__(self)
gobject.type_register(TCPClient)
|
Kjir/papyon
|
papyon/gnet/io/tcp.py
|
Python
|
gpl-2.0
| 1,708
| 0.001172
|
import sys
import unittest
import restserver
import json
from flask import jsonify
class RestServerTest(unittest.TestCase):
def setUp(self):
self.app = restserver.create_app(static=True).test_client()
def tearDown(self):
pass
def test_struct_graph(self):
rv = self.app.post('/struct_graph')
# not posting any data should be a 'Bad Request'
# ideally, with an error message
self.assertEqual(rv.data, "Missing a json in the request")
self.assertEqual(rv.status_code, 400)
data_in = json.dumps({'seq':'ACCCGG', 'struct':'((..))'})
rv = self.app.post('/struct_graph',
data=data_in,
content_type='application/json')
self.assertEqual(rv.status_code, 201)
data_in = json.dumps({'seq':'ACxCGG', 'struct':'((..))'})
rv = self.app.post('/struct_graph',
data=data_in,
content_type='application/json')
self.assertEqual(rv.status_code, 400)
|
pkerpedjiev/forna
|
test/restserver_test.py
|
Python
|
apache-2.0
| 1,136
| 0.014085
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cProfile import Profile
import logging
import os.path
import sys
from threading import Thread
import time
from optparse import OptionParser
from greplin import scales
dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.append(dirname)
sys.path.append(os.path.join(dirname, '..'))
import cassandra
from cassandra.cluster import Cluster
from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.policies import HostDistance
log = logging.getLogger()
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
logging.getLogger('cassandra').setLevel(logging.WARN)
have_libev = False
supported_reactors = [AsyncoreConnection]
try:
from cassandra.io.libevreactor import LibevConnection
have_libev = True
supported_reactors.append(LibevConnection)
except ImportError as exc:
pass
have_twisted = False
try:
from cassandra.io.twistedreactor import TwistedConnection
have_twisted = True
supported_reactors.append(TwistedConnection)
except ImportError as exc:
log.exception("Error importing twisted")
pass
KEYSPACE = "testkeyspace" + str(int(time.time()))
TABLE = "testtable"
def setup(hosts):
log.info("Using 'cassandra' package from %s", cassandra.__path__)
cluster = Cluster(hosts, protocol_version=1)
cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
try:
session = cluster.connect()
log.debug("Creating keyspace...")
session.execute("""
CREATE KEYSPACE %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
""" % KEYSPACE)
log.debug("Setting keyspace...")
session.set_keyspace(KEYSPACE)
log.debug("Creating table...")
session.execute("""
CREATE TABLE %s (
thekey text,
col1 text,
col2 text,
PRIMARY KEY (thekey, col1)
)
""" % TABLE)
finally:
cluster.shutdown()
def teardown(hosts):
cluster = Cluster(hosts, protocol_version=1)
cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
session = cluster.connect()
session.execute("DROP KEYSPACE " + KEYSPACE)
cluster.shutdown()
def benchmark(thread_class):
options, args = parse_options()
for conn_class in options.supported_reactors:
setup(options.hosts)
log.info("==== %s ====" % (conn_class.__name__,))
kwargs = {'metrics_enabled': options.enable_metrics,
'connection_class': conn_class}
if options.protocol_version:
kwargs['protocol_version'] = options.protocol_version
cluster = Cluster(options.hosts, **kwargs)
session = cluster.connect(KEYSPACE)
log.debug("Sleeping for two seconds...")
time.sleep(2.0)
query = session.prepare("""
INSERT INTO {table} (thekey, col1, col2) VALUES (?, ?, ?)
""".format(table=TABLE))
values = ('key', 'a', 'b')
per_thread = options.num_ops // options.threads
threads = []
log.debug("Beginning inserts...")
start = time.time()
try:
for i in range(options.threads):
thread = thread_class(
i, session, query, values, per_thread,
cluster.protocol_version, options.profile)
thread.daemon = True
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
while thread.is_alive():
thread.join(timeout=0.5)
end = time.time()
finally:
cluster.shutdown()
teardown(options.hosts)
total = end - start
log.info("Total time: %0.2fs" % total)
log.info("Average throughput: %0.2f/sec" % (options.num_ops / total))
if options.enable_metrics:
stats = scales.getStats()['cassandra']
log.info("Connection errors: %d", stats['connection_errors'])
log.info("Write timeouts: %d", stats['write_timeouts'])
log.info("Read timeouts: %d", stats['read_timeouts'])
log.info("Unavailables: %d", stats['unavailables'])
log.info("Other errors: %d", stats['other_errors'])
log.info("Retries: %d", stats['retries'])
request_timer = stats['request_timer']
log.info("Request latencies:")
log.info(" min: %0.4fs", request_timer['min'])
log.info(" max: %0.4fs", request_timer['max'])
log.info(" mean: %0.4fs", request_timer['mean'])
log.info(" stddev: %0.4fs", request_timer['stddev'])
log.info(" median: %0.4fs", request_timer['median'])
log.info(" 75th: %0.4fs", request_timer['75percentile'])
log.info(" 95th: %0.4fs", request_timer['95percentile'])
log.info(" 98th: %0.4fs", request_timer['98percentile'])
log.info(" 99th: %0.4fs", request_timer['99percentile'])
log.info(" 99.9th: %0.4fs", request_timer['999percentile'])
def parse_options():
parser = OptionParser()
parser.add_option('-H', '--hosts', default='127.0.0.1',
help='cassandra hosts to connect to (comma-separated list) [default: %default]')
parser.add_option('-t', '--threads', type='int', default=1,
help='number of threads [default: %default]')
parser.add_option('-n', '--num-ops', type='int', default=10000,
help='number of operations [default: %default]')
parser.add_option('--asyncore-only', action='store_true', dest='asyncore_only',
help='only benchmark with asyncore connections')
parser.add_option('--libev-only', action='store_true', dest='libev_only',
help='only benchmark with libev connections')
parser.add_option('--twisted-only', action='store_true', dest='twisted_only',
help='only benchmark with Twisted connections')
parser.add_option('-m', '--metrics', action='store_true', dest='enable_metrics',
help='enable and print metrics for operations')
parser.add_option('-l', '--log-level', default='info',
help='logging level: debug, info, warning, or error')
parser.add_option('-p', '--profile', action='store_true', dest='profile',
help='Profile the run')
parser.add_option('--protocol-version', type='int', dest='protocol_version',
help='Native protocol version to use')
options, args = parser.parse_args()
options.hosts = options.hosts.split(',')
log.setLevel(options.log_level.upper())
if options.asyncore_only:
options.supported_reactors = [AsyncoreConnection]
elif options.libev_only:
if not have_libev:
log.error("libev is not available")
sys.exit(1)
options.supported_reactors = [LibevConnection]
elif options.twisted_only:
if not have_twisted:
log.error("Twisted is not available")
sys.exit(1)
options.supported_reactors = [TwistedConnection]
else:
options.supported_reactors = supported_reactors
if not have_libev:
log.warning("Not benchmarking libev reactor because libev is not available")
return options, args
class BenchmarkThread(Thread):
def __init__(self, thread_num, session, query, values, num_queries, protocol_version, profile):
Thread.__init__(self)
self.thread_num = thread_num
self.session = session
self.query = query
self.values = values
self.num_queries = num_queries
self.protocol_version = protocol_version
self.profiler = Profile() if profile else None
def start_profile(self):
if self.profiler:
self.profiler.enable()
def finish_profile(self):
if self.profiler:
self.profiler.disable()
self.profiler.dump_stats('profile-%d' % self.thread_num)
|
bbirand/python-driver
|
benchmarks/base.py
|
Python
|
apache-2.0
| 8,795
| 0.001478
|
"""CloseSpider is an extension that forces spiders to be closed after certain
conditions are met.
See documentation in docs/topics/extensions.rst
"""
from collections import defaultdict
from twisted.internet import reactor
from scrapy import signals
from scrapy.exceptions import NotConfigured
class CloseSpider(object):
def __init__(self, crawler):
self.crawler = crawler
self.close_on = {
'timeout': crawler.settings.getfloat('CLOSESPIDER_TIMEOUT'),
'itemcount': crawler.settings.getint('CLOSESPIDER_ITEMCOUNT'),
'pagecount': crawler.settings.getint('CLOSESPIDER_PAGECOUNT'),
'errorcount': crawler.settings.getint('CLOSESPIDER_ERRORCOUNT'),
}
if not any(self.close_on.values()):
raise NotConfigured
self.counter = defaultdict(int)
if self.close_on.get('errorcount'):
crawler.signals.connect(self.error_count, signal=signals.spider_error)
if self.close_on.get('pagecount'):
crawler.signals.connect(self.page_count, signal=signals.response_received)
if self.close_on.get('timeout'):
crawler.signals.connect(self.spider_opened, signal=signals.spider_opened)
if self.close_on.get('itemcount'):
crawler.signals.connect(self.item_scraped, signal=signals.item_scraped)
crawler.signals.connect(self.spider_closed, signal=signals.spider_closed)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def error_count(self, failure, response, spider):
self.counter['errorcount'] += 1
if self.counter['errorcount'] == self.close_on['errorcount']:
self.crawler.engine.close_spider(spider, 'closespider_errorcount')
def page_count(self, response, request, spider):
self.counter['pagecount'] += 1
if self.counter['pagecount'] == self.close_on['pagecount']:
self.crawler.engine.close_spider(spider, 'closespider_pagecount')
def spider_opened(self, spider):
self.task = reactor.callLater(self.close_on['timeout'],
self.crawler.engine.close_spider, spider,
reason='closespider_timeout')
def item_scraped(self, item, spider):
self.counter['itemcount'] += 1
if self.counter['itemcount'] == self.close_on['itemcount']:
self.crawler.engine.close_spider(spider, 'closespider_itemcount')
def spider_closed(self, spider):
task = getattr(self, 'task', False)
if task and task.active():
task.cancel()
|
eLRuLL/scrapy
|
scrapy/extensions/closespider.py
|
Python
|
bsd-3-clause
| 2,631
| 0.0019
|
from __future__ import absolute_import, division, print_function, with_statement
import pytest
import sys
from tornado import concurrent
import tornado.gen
import tornado.testing
import rw.scope
# def test_low_level_api():
# scope = rw.scope.Scope()
# current_user = object()
#
# def get_current_user():
# get_current_user.runs += 1
# return current_user
# scope.provider('user', get_current_user)
# get_current_user.runs = 0
#
# assert get_current_user.runs == 0
# assert rw.scope.get('user') is current_user
# assert get_current_user.runs == 1
# # make sure provider is not run twice
# rw.scope.get('user')
# assert get_current_user.runs == 1
#
# assert scope.get('unknown value', 'default') == 'default'
# with pytest.raises(IndexError):
# scope.get('unknown value')
def test_basic():
scope = rw.scope.Scope()
scope['some_static_value'] = 42
current_user = object()
def get_current_user():
return current_user
scope.provider('user', get_current_user)
@rw.scope.inject
def foo(user):
return user
@rw.scope.inject
def bar(some_static_value):
return some_static_value
@rw.scope.inject
def some_function_with_a_default_value(my_paramenter='my_default_value'):
return my_paramenter
with scope():
assert foo() is current_user
assert foo(1) == 1
assert foo() is current_user
assert bar() == 42
assert bar(10) == 10
assert bar(some_static_value=11) == 11
# normal calling behaviour must be preserved
assert some_function_with_a_default_value('value') == 'value'
assert some_function_with_a_default_value() == 'my_default_value'
# check nested scope
nested_scope = rw.scope.Scope()
nested_scope['user'] = 2
with nested_scope():
assert foo() == 2
assert bar() == 42
assert foo() is current_user
assert bar() == 42
def test_recursion():
"""Entering the same scope twice should not produce unexpected behaviour"""
scope = rw.scope.Scope()
scope2 = rw.scope.Scope()
with scope():
assert rw.scope.get_current_scope() is scope
with scope2():
assert rw.scope.get_current_scope() is scope2
with scope2():
assert rw.scope.get_current_scope() is scope2
with scope():
assert rw.scope.get_current_scope() is scope
assert rw.scope.get_current_scope() is scope2
assert rw.scope.get_current_scope() is scope2
assert rw.scope.get_current_scope() is scope
assert rw.scope.get_current_scope() is None
def test_sub_scope():
scope1 = rw.scope.Scope()
scope2 = rw.scope.Scope()
scope3 = rw.scope.Scope()
sub1 = scope1.subscope('my_sub_scope')
sub2 = scope2.subscope('my_sub_scope')
sub1['value_1'] = 1
sub1['shared'] = 1
sub2['value_2'] = 2
sub2['shared'] = 2
@rw.scope.inject
def get_sub_scope(my_sub_scope):
return my_sub_scope
@rw.scope.inject
def get_sub_scope_var(var, my_sub_scope):
return my_sub_scope[var]
def checks_inside_scope1():
assert rw.scope.get('my_sub_scope') == get_sub_scope()
assert rw.scope.get('my_sub_scope')['value_1'] == 1
assert get_sub_scope_var('value_1') == 1
assert rw.scope.get('my_sub_scope')['shared'] == 1
assert get_sub_scope_var('shared') == 1
assert 'value_2' not in rw.scope.get('my_sub_scope')
def checks_inside_scope2():
assert rw.scope.get('my_sub_scope') == get_sub_scope()
assert rw.scope.get('my_sub_scope')['value_1'] == 1
assert get_sub_scope_var('value_1') == 1
assert rw.scope.get('my_sub_scope')['value_2'] == 2
assert get_sub_scope_var('value_2') == 2
assert rw.scope.get('my_sub_scope')['shared'] == 2
assert get_sub_scope_var('shared') == 2
with scope1():
checks_inside_scope1()
with scope2():
checks_inside_scope2()
with scope3():
# scope 3 does not have a 'my_sub_scope' subscope
# so we expect the same results as for scope 2
checks_inside_scope2()
checks_inside_scope1()
def test_fail():
@rw.scope.inject
def foo(something_to_inject):
pass
with pytest.raises(rw.scope.OutsideScopeError):
foo()
# if all arguments are provided we are ok to run outside of a scope
foo(something_to_inject=1)
class ScopeLeakingTest(tornado.testing.AsyncTestCase):
def test_scope_leaking(self):
# if an exception ocurus inside a scope the scope might not
# get clean up correctly.
scope = rw.scope.Scope()
with pytest.raises(NotImplementedError):
with scope():
raise NotImplementedError('Just some random error')
# no we are outside of the scope
assert rw.scope.get_current_scope() is None
@tornado.gen.coroutine
def check_a():
assert rw.scope.get('name') == 'a'
class ConcurrencyTestWithoutWithStatement(tornado.testing.AsyncTestCase):
@tornado.testing.gen_test
def test_stuff(self):
"""Setup two scopes and two "locks"."""
self.scope_a = rw.scope.Scope()
self.scope_a['name'] = 'a'
yield self.scope_a.run(check_a)
class ConcurrencyTest(tornado.testing.AsyncTestCase):
"""test concurrent ioloop futures inside different scopes
Three tests with different resolution order
"""
def setup(self):
"""Setup two scopes and two "locks"."""
self.scope_a = rw.scope.Scope()
self.scope_a['name'] = 'a'
self.lock_a = concurrent.Future()
self.scope_b = rw.scope.Scope()
self.scope_b['name'] = 'b'
self.lock_b = concurrent.Future()
@rw.scope.inject
def get_name(name):
return name
@tornado.gen.coroutine
def thread_a():
yield self.lock_a
raise tornado.gen.Return(get_name())
@tornado.gen.coroutine
def thread_b():
yield self.lock_b
raise tornado.gen.Return(get_name())
with self.scope_a():
future_a = thread_a()
with self.scope_b():
future_b = thread_b()
return future_a, future_b
@tornado.testing.gen_test
def test_concurrent_scopes_both(self):
"""set both results before yield-ing"""
future_a, future_b = self.setup()
self.lock_a.set_result(None)
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
assert (yield future_a) == 'a'
@tornado.testing.gen_test
def test_concurrent_scopes_both(self):
"""set both results before yield-ing"""
future_a, future_b = self.setup()
self.lock_a.set_result(None)
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
assert (yield future_a) == 'a'
@tornado.testing.gen_test
def test_concurrent_scopes_ba(self):
"""b then a"""
future_a, future_b = self.setup()
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
self.lock_a.set_result(None)
assert (yield future_a) == 'a'
@tornado.testing.gen_test
def test_concurrent_scopes_ab(self):
"""a then b"""
future_a, future_b = self.setup()
self.lock_a.set_result(None)
assert (yield future_a) == 'a'
self.lock_b.set_result(None)
assert (yield future_b) == 'b'
if sys.version_info >= (3, 0):
def test_scope_with_hint():
from test.scope_py3 import test_python3_typehinted_injection
test_python3_typehinted_injection()
|
FlorianLudwig/rueckenwind
|
test/test_scope.py
|
Python
|
apache-2.0
| 7,840
| 0.000128
|
#!/usr/bin/python
# encoding: utf-8
# Jan 2011 (markus kossner) Cleaned up the code, added some documentation
# somwhere around Aug 2008 (markus kossner) created
#
# This script extracts the molecular framework for a database of molecules.
# You can use two modes (hard coded):
# - Scaff: The molecular frame is extracted
# - RedScaff: All linking chains between rings are deleted. The rings are directly connected.
#
# You can comment in/out the code snippets indicated by the comments
# to force each atom of the frame to be a Carbon.
#
# Usage: Frames.py <database.sdf>
# Output:
# - sd files containing all molecules belonging to one frame (1.sdf, 2.sdf etc)
# - frames.smi containing the (caninical) smiles and count of occurrence
#
from __future__ import print_function
import os,sys
from Chem import AllChem as Chem
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all nested sub-sequences (iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def GetFrame(mol, mode='Scaff'):
'''return a ganeric molecule defining the reduced scaffold of the input mol.
mode can be 'Scaff' or 'RedScaff':
Scaff -> chop off the side chains and return the scaffold
RedScaff -> remove all linking chains and connect the rings
directly at the atoms where the linker was
'''
ring = mol.GetRingInfo()
RingAtoms = flatten(ring.AtomRings())
NonRingAtoms = [ atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() not in RingAtoms ]
RingNeighbors = []
Paths = []
for NonRingAtom in NonRingAtoms:
for neighbor in mol.GetAtomWithIdx(NonRingAtom).GetNeighbors():
if neighbor.GetIdx() in RingAtoms:
RingNeighbors.append(NonRingAtom)
Paths.append([neighbor.GetIdx(),NonRingAtom]) #The ring Atoms having a non ring Nieghbor will be the start of a walk
break
PosConnectors = [x for x in NonRingAtoms if x not in RingNeighbors] #Only these Atoms are potential starting points of a Linker chain
#print 'PosConnectors:'
#print PosConnectors
Framework = [ x for x in RingAtoms ]
#Start a list of pathways which we will have to walk
#print 'Path atoms:'
#print Paths
Linkers = []
while len(Paths)>0:
NewPaths = []
for P in Paths:
if P == None:
print('ooh')
else:
for neighbor in mol.GetAtomWithIdx(P[-1]).GetNeighbors():
if neighbor.GetIdx() not in P:
if neighbor.GetIdx() in NonRingAtoms:
n = P[:]
n.append(neighbor.GetIdx())
NewPaths.append(n[:])
elif neighbor.GetIdx() in RingAtoms:
#print 'adding the following path to Framework:'
#print P
n = P[:]
n.append(neighbor.GetIdx())
Linkers.append(n)
Framework=Framework+P[:]
Paths = NewPaths[:]
#print 'Linkers:',Linkers
#print 'RingAtoms:',RingAtoms
#em.AddBond(3,4,Chem.BondType.SINGLE)
if mode == 'RedScaff':
Framework = list(set(Framework))
todel = []
NonRingAtoms.sort(reverse=True)
em = Chem.EditableMol(mol)
BondsToAdd = [ sorted([i[0],i[-1]]) for i in Linkers ]
mem = []
for i in BondsToAdd:
if i not in mem:
em.AddBond(i[0],i[1],Chem.BondType.SINGLE)
mem.append(i)
for i in NonRingAtoms:
todel.append(i)
for i in todel:
em.RemoveAtom(i)
m = em.GetMol()
#===================================#
# Now do the flattening of atoms and bonds!
# Any heavy atom will become a carbon and any bond will become a single bond! #
#===================================#
# for atom in m.GetAtoms(): #
# atom.SetAtomicNum(6) #
# atom.SetFormalCharge(0) #
# for bond in m.GetBonds(): #
# bond.SetBondType(Chem.BondType.SINGLE) #
# Chem.SanitizeMol(m) #
#===================================#
return m
if mode == 'Scaff':
Framework = list(set(Framework))
todel = []
NonRingAtoms.sort(reverse=True)
for i in NonRingAtoms:
if i != None:
if i not in Framework:
todel.append(i)
em = Chem.EditableMol(mol)
for i in todel:
em.RemoveAtom(i)
m = em.GetMol()
#===================================#
# Now do the flattening of atoms and bonds!
# Any heavy atom will become a carbon and any bond will become a single bond!! #
#===================================#
# for atom in m.GetAtoms(): #
# atom.SetAtomicNum(6) #
# atom.SetFormalCharge(0) #
# for bond in m.GetBonds(): #
# bond.SetBondType(Chem.BondType.SINGLE) #
# Chem.SanitizeMol(m) #
#===================================#
return m
if __name__=='__main__':
if len(sys.argv) < 2:
print("No input file provided: Frames.py filetosprocess.ext")
sys.exit(1)
suppl = Chem.SDMolSupplier(sys.argv[1])
FrameDict = {}
for mol in suppl:
m = GetFrame(mol)
cansmiles = Chem.MolToSmiles(m, isomericSmiles=True)
if FrameDict.has_key(cansmiles):
FrameDict[cansmiles].append(mol)
else:
FrameDict[cansmiles]=[mol,]
counter=0
w=open('frames.smi','w')
for key,item in FrameDict.items():
counter+=1
d=Chem.SDWriter(str(counter)+'.sdf')
for i in item:
i.SetProp('Scaffold',key)
i.SetProp('Cluster',str(counter))
d.write(i)
print(key,len(item))
w.write(key+'\t'+str(len(item))+'\n')
w.close
print('number of Clusters: %d' %(counter))
|
soerendip42/rdkit
|
Contrib/M_Kossner/Frames.py
|
Python
|
bsd-3-clause
| 6,124
| 0.033801
|
import path
import logging
import shutil
import os
log = logging
START_ID = "automatically created attributes start"
END_ID = "automatically created attributes end"
START_STATIC_ID = "automatically created static attributes start"
END_STATIC_ID = "automatically created static attributes end"
START_NODE_ID = 0x0011EF5E
COMPILER_VERSION = "vs2010"
baseDestPath = None
baseSourcePath = None
CODINGROOT = "H:/userDatenHaggi/documents/coding/"
# an automatic attibute is defined as follows:
# attributeName, type, displayName, defaultValue, options
# e.g.
# samples, int, Shading Samples, 2
# filters, enum, Pixel Filter, 0, Mitchell:Gauss:Triangle
# bgColor, color, Background Color, 0.4:0.5:0.7
def aeTemplateCreator(attDict, renderer, shortCut):
sourceAEFile = baseSourcePath + "/mt@_devmodule/scripts/@/AETemplate/AE@shaderTemplate.py"
destAEPath = path.path(baseDestPath + "/mt@_devmodule/scripts/@/AETemplate/".replace("mt@_", shortCut + "_").replace("@", renderer.capitalize()))
print "Sourcefile", sourceAEFile
print "Destpath", destAEPath
allContent = []
allContent.append(' self.addSeparator()\n')
for key in attDict.keys():
if key.lower() == "all":
for attKey in attDict[key].keys():
if attKey == "out":
continue
attName = attKey
attDisplayName = attDict[key][attKey][1]
allContent.append(' self.addControl("{0}", label="{1}")\n'.format(attName, attDisplayName))
for key in attDict.keys():
newContent = []
aeFileName = "AE" + renderer.lower() + key.capitalize() + "Template.py"
destAEFile = path.path(destAEPath + aeFileName)
#print "create AE for", key, destAEFile
if destAEFile.exists():
continue
if key.lower() == "all":
continue
print "Creating AE file", destAEFile
sourceHandle = open(sourceAEFile, "r")
content = sourceHandle.readlines()
sourceHandle.close()
startIndex = 0
endIndex = 0
noColorOut = False
for attKey in attDict[key].keys():
if attKey == "out":
if not "color" in attDict[key][attKey][0]:
noColorOut = True
for index, line in enumerate(content):
if "AE@shaderTemplate" in line:
content[index] = line.replace("AE@shaderTemplate", "AE" + renderer.lower() + key.capitalize() + "Template")
#if noColorOut:
# if "pm.mel.AEswatchDisplay(nodeName)" in line:
# content[index] = "#"+line
if "#autoAddBegin" in line:
print "Start new content"
startIndex = index
if "#autoAddEnd" in line:
print "End new content"
endIndex = index
#print "Creating data for", key
#print attDict[key]
for attKey in attDict[key].keys():
if attKey == "out":
continue
attName = attKey
attDisplayName = attDict[key][attKey][1]
#print ' self.addControl("{0}", label="{1}")\n'.format(attName, attDisplayName)
newContent.append(' self.addControl("{0}", label="{1}")\n'.format(attName, attDisplayName))
finalContent = []
finalContent.extend(content[:startIndex+1])
finalContent.extend(newContent)
finalContent.extend(allContent)
finalContent.extend(content[endIndex:])
#print finalContent
destHandle = open(destAEFile, "w")
destHandle.writelines(finalContent)
destHandle.close()
class Attribute(object):
def __init__(self, aName, aType, aDisplayName, default, data=None):
self.name = aName
self.type = aType
self.displayName = aDisplayName
self.default = default
self.data = data
print "Attribute", self.name, self.type, self.displayName, self.default, self.data
print "make func", "make{0}".format(self.type.capitalize())
def getDefForDefFile(self):
return "inAtt:{0}:{1}".format(self.name, self.type)
def getDefinition(self):
return "\tstatic MObject {0};".format(self.name)
def getAEDefinition(self):
return ' self.addControl("{0}", label="{1}")'.format(self.name, self.displayName)
def getStaticDefinition(self):
return "MObject\tTextureBase::{0};".format(self.name)
def getImplementation(self):
methodName = "make" + self.type.capitalize()
print "Calling", methodName, "for", self.name
return getattr(Attribute, methodName)(self)
def makeInt(self):
string = "\t{0} = nAttr.create(\"{0}\", \"{0}\", MFnNumericData::kInt, {1});\n".format(self.name, self.default)
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeBool(self):
string = "\t{0} = nAttr.create(\"{0}\", \"{0}\", MFnNumericData::kBoolean, {1});\n".format(self.name, self.default)
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeFloat(self):
string = "\t{0} = nAttr.create(\"{0}\", \"{0}\", MFnNumericData::kFloat, {1});\n".format(self.name, self.default)
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeColor(self):
string = "\t{0} = nAttr.createColor(\"{0}\", \"{0}\");\n".format(self.name)
if len(self.default.split(":")) == 1:
a = self.default.split(":")[0]
self.default = "{0}:{0}:{0}".format(a)
string += "\tnAttr.setDefault({0});\n".format(",".join(self.default.split(":")))
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
def makeVector(self):
string = "\tMObject {0}X = nAttr.create(\"{0}X\", \"{0}x\", MFnNumericData::kDouble, 0.0);\n".format(self.name)
string += "\tMObject {0}Y = nAttr.create(\"{0}Y\", \"{0}y\", MFnNumericData::kDouble, 0.0);\n".format(self.name)
string += "\tMObject {0}Z = nAttr.create(\"{0}Z\", \"{0}z\", MFnNumericData::kDouble, 0.0);\n".format(self.name)
string += "\t{0} = nAttr.create(\"{0}\", \"{0}\", {0}X, {0}Y, {0}Z);\n".format(self.name)
if len(self.default.split(":")) == 1:
a = self.default.split(":")[0]
self.default = "{0}:{0}:{0}".format(a)
string += "\tMAKE_INPUT(nAttr);\n"
if self.type.endswith("Array"):
string += "\tnAttr.setArray(true);\n"
string += "\tnAttr.setDefault({0});\n".format(",".join(self.default.split(":")))
string += "\tCHECK_MSTATUS(addAttribute( {0} ));\n\n".format(self.name)
return string
class ShaderNode(object):
def __init__(self, name):
self.name = name
self.attributeList = []
self.pluginId = 0
self.path = None
class TextureCreator(object):
def __init__(self, startId, name, shortcut):
self.pluginStartId = startId
self.rendererName = name
self.codingRoot = path.path(__file__).parent.parent.parent.parent
self.rendererCodingRoot = self.codingRoot + "/mayaTo" + self.rendererName.capitalize() + "/src"
self.capitalName = self.rendererName.capitalize()
self.shortCut = shortcut
self.baseDestination = path.path("{0}/mayaTo{1}".format(self.codingRoot , self.capitalName))
self.mayaToBaseTexH = "{0}/mayaToBase/src/shaders/textureBase.h".format(self.codingRoot )
self.mayaToBaseTexCPP = "{0}/mayaToBase/src/shaders/textureBase.cpp".format(self.codingRoot )
self.rendererMatDefs = path.path("{0}/mayaTo{1}/vs2010/sourceCodeDocs/textures.txt".format(self.codingRoot , self.capitalName))
self.destinationDir = path.path("{0}/mayaTo{1}/src/textures".format(self.codingRoot , self.capitalName))
self.nodesToCreate = []
self.texFileContent = None
self.textureFileHandle = None
self.mayaToBaseTexHContent = None
self.mayaToBaseTexCPPContent = None
fh = open(self.mayaToBaseTexH)
self.mayaToBaseTexHContent = fh.readlines()
fh.close()
fh = open(self.mayaToBaseTexCPP)
self.mayaToBaseTexCPPContent = fh.readlines()
fh.close()
self.createTextureFiles()
self.printIdInfo()
self.printPluginLoadInfo()
self.createAETemplates()
self.printShaderDefinitions()
def printShaderDefinitions(self):
for node in self.nodesToCreate:
print "shader_start:{0}".format(node.name)
for att in node.attributeList:
print "\t{0}".format(att.getDefForDefFile())
print "\toutAtt:outColor:color"
print "shader_end"
# outatt:displacement:float
def createAETemplates(self):
sourceAEFile = self.codingRoot + "/mayaToBase/mt@_devmodule/scripts/@/AETemplate/AE@shaderTemplate.py"
sourceContent = []
fh = open(sourceAEFile, "r")
sourceContent = fh.readlines()
fh.close()
for node in self.nodesToCreate:
destAEPath = path.path(self.baseDestination + "/{0}_devmodule/scripts/{1}/AETemplate/AE{1}{2}Template.py".format(self.shortCut, self.rendererName.capitalize(), node.name.capitalize()))
content = sourceContent
newContent = []
if destAEPath.exists():
fh = open(destAEPath, "r")
content = fh.readlines()
fh.close()
replaceInProgress = False
for line in content:
line = line.replace('AE@shaderTemplate', "AE{0}{1}Template".format(self.rendererName.capitalize(), node.name.capitalize()))
if not replaceInProgress:
newContent.append(line)
if "#autoAddBegin" in line:
replaceInProgress = True
for att in node.attributeList:
newContent.append(att.getAEDefinition() + "\n")
if "#autoAddEnd" in line:
replaceInProgress = False
newContent.append(line)
fh = open(destAEPath, "w")
fh.writelines(newContent)
fh.close()
def printPluginLoadInfo(self):
for node in self.nodesToCreate:
includeString = '#include "textures/{0}.h"'.format(node.name)
print includeString
print ""
for node in self.nodesToCreate:
regId = 'static const MString {1}Classification("{0}/texture/{0}{1}");'.format(self.rendererName.capitalize(), node.name.capitalize())
print regId
print ""
for node in self.nodesToCreate:
register = 'CHECK_MSTATUS( plugin.registerNode( "{1}{0}", {0}::id, {0}::creator, {0}::initialize, MPxNode::kDependNode, &{0}Classification ));'.format(node.name.capitalize(), self.rendererName.capitalize())
print register
print ""
for node in self.nodesToCreate:
deregister = 'CHECK_MSTATUS( plugin.deregisterNode({0}::id));'.format(node.name.capitalize())
print deregister
def printIdInfo(self):
#0x0011EF55 0x0011EF55 mayaToLux wrinkledTexture.cpp H:/UserDatenHaggi/Documents/coding/OpenMaya/src/mayaToLux\src\textures\wrinkledTexture.cpp
for node in self.nodesToCreate:
print node.pluginId, "mayaTo"+self.rendererName.capitalize(), node.name+".cpp", node.path
def createTextureFiles(self):
self.parseTextureDefinitions()
self.createCPPFiles()
def createCPPFiles(self):
for node in self.nodesToCreate:
self.createHFile(node)
self.createCPPFile(node)
def createHFile(self, node):
print "Creating header file for node", node.name
destinationHeaderFile = self.destinationDir + "/" + node.name + ".h"
headerFileContent = self.mayaToBaseTexHContent
if destinationHeaderFile.exists():
print "Found existing header file"
fh = open(destinationHeaderFile)
headerFileContent = fh.readlines()
fh.close()
newHFileContent = []
replaceInProgress = False
for index, line in enumerate(headerFileContent):
line = line.strip()
line = line.replace("MAYATO@_TextureBase", "{0}_{1}".format(self.shortCut, node.name))
line = line.replace("TextureBase", "{0}".format(node.name.capitalize()))
if not replaceInProgress:
newHFileContent.append(line)
if START_ID in line:
replaceInProgress = True
for att in node.attributeList:
newHFileContent.append(att.getDefinition())
if replaceInProgress and not END_ID in line:
continue
if END_ID in line:
replaceInProgress = False
newHFileContent.append(line)
print "Writing file", destinationHeaderFile
fh = open(destinationHeaderFile, "w")
for line in newHFileContent:
fh.write(line + "\n")
fh.close()
def createCPPFile(self, node):
print "Creating cpp file for node", node.name
self.pluginStartId += 1
nodeId = "0x%08X" % self.pluginStartId
node.pluginId = nodeId
destinationCppFile = self.destinationDir + "/" + node.name + ".cpp"
node.path = destinationCppFile
cppFileContent = self.mayaToBaseTexCPPContent
if destinationCppFile.exists():
print "Found existing header file"
fh = open(destinationCppFile)
headerFileContent = fh.readlines()
fh.close()
newCppFileContent = []
replaceInProgress = False
for index in range(len(cppFileContent)):
line = cppFileContent[index]
#line = line.strip()
line = line.replace("textureBase", node.name)
line = line.replace("TextureBase", node.name.capitalize())
line = line.replace("0x00000", nodeId)
if not replaceInProgress:
newCppFileContent.append(line)
if START_STATIC_ID in line:
replaceInProgress = True
for att in node.attributeList:
newCppFileContent.append(att.getStaticDefinition().replace("TextureBase", node.name.capitalize()) + "\n")
while not END_STATIC_ID in line:
index += 1
line = cppFileContent[index]
replaceInProgress = False
if START_ID in line:
replaceInProgress = True
for att in node.attributeList:
newCppFileContent.append(att.getImplementation() + "\n")
if replaceInProgress and not END_ID in line:
continue
if END_ID in line:
replaceInProgress = False
newCppFileContent.append(line)
print "Writing file", destinationCppFile
#print newCppFileContent
fh = open(destinationCppFile, "w")
for line in newCppFileContent:
fh.write(line)
fh.close()
def parseTextureDefinitions(self):
print "Load texture def file", self.rendererMatDefs
if not self.rendererMatDefs.exists():
print "File does not exist"
return
self.textureFileHandle = open(self.rendererMatDefs, "r")
self.texFileContent = self.textureFileHandle.readlines()
self.textureFileHandle.close()
currentTexture = None
for line in self.texFileContent:
line = line.strip()
if len(line) == 0:
continue
if line.startswith("//"):
continue
if line.startswith("#"):
if currentTexture is not None:
self.nodesToCreate.append(currentTexture)
currentTexture = ShaderNode(line.replace("#", ""))
print "Found texture", line.replace("#", "")
continue
values = line.split(",")
values = [a.strip() for a in values]
if len(values) < 4:
print "Attribute needs at least 4 values", values
continue
data = None
if len(values) > 4:
data = ":".join(values[4:])
att = Attribute(values[0], values[1], values[2], values[3], data)
currentTexture.attributeList.append(att)
if __name__ == "__main__":
tc = TextureCreator(0x0011EF5E, "thea", "mtth")
print "LastID", "0x%08X" % tc.pluginStartId
#global START_NODE_ID
#print "ID: --> 0x%08X" % START_NODE_ID
|
haggi/OpenMaya
|
src/common/python/Renderer/textureCreator.py
|
Python
|
mit
| 17,166
| 0.005942
|
import glob
import os
import shutil
import sys
import tarfile
import traceback
from model import Model
from subprocess import Popen, PIPE
class Apsim75(Model):
def run(self, latidx, lonidx):
try:
apsim_bin = self.config.get('executable')
# The apsim 'executable' is a gzipped tarball that needs to be extracted into the current working directory
tar = tarfile.open(apsim_bin)
tar.extractall()
tar.close()
model_dir = 'Model'
for xml_file in glob.glob('*.xml'):
if os.path.basename(xml_file) == 'Apsim.xml':
continue
old_xml = '%s/%s' % (model_dir, os.path.basename(xml_file))
if os.path.isfile(old_xml):
os.remove(old_xml)
if os.path.islink(xml_file):
link = os.readlink(xml_file)
shutil.copy(link, model_dir)
else:
shutil.copy(xml_file, model_dir)
# Create sim files
p = Popen('source paths.sh ; mono Model/ApsimToSim.exe Generic.apsim', shell=True, executable='/bin/bash', stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout_file = open('RESULT.OUT', 'w')
stdout_file.write(stdout)
if p.returncode != 0:
rc = p.returncode
# Run apsim for each sim file
for sim in glob.glob('*.sim'):
p = Popen('source paths.sh ; Model/ApsimModel.exe %s' % sim, shell=True, executable='/bin/bash', stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout_file.write(stdout)
if p.returncode != 0:
rc = p.returncode
stdout_file.close()
return True
except:
print "[%s]: %s" % (os.path.basename(__file__), traceback.format_exc())
return False
|
RDCEP/psims
|
pysims/models/apsim75.py
|
Python
|
agpl-3.0
| 2,004
| 0.003992
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from aldryn_client import forms
class Form(forms.BaseForm):
plugin_module = forms.CharField('Plugin module name', initial='Generic')
plugin_name = forms.CharField('Plugin name', initial='Facebook Comments')
plugin_template = forms.CharField('Plugin Template', initial='djangocms_fbcomments/default.html')
app_id = forms.CharField('Facebook App ID', required=False)
def to_settings(self, data, settings):
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_MODULE'] = data['plugin_module']
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_NAME'] = data['plugin_name']
settings['DJANGOCMS_FBCOMMENTS_PLUGIN_TEMPLATE'] = data['plugin_template']
settings['DJANGOCMS_FBCOMMENTS_APP_ID'] = data['app_id']
return settings
|
mishbahr/djangocms-fbcomments
|
aldryn_config.py
|
Python
|
bsd-3-clause
| 819
| 0.003663
|
"""Support for KNX/IP climate devices."""
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
STATE_DRY, STATE_ECO, STATE_FAN_ONLY, STATE_HEAT, STATE_IDLE, STATE_MANUAL,
SUPPORT_ON_OFF, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
CONF_SETPOINT_SHIFT_ADDRESS = 'setpoint_shift_address'
CONF_SETPOINT_SHIFT_STATE_ADDRESS = 'setpoint_shift_state_address'
CONF_SETPOINT_SHIFT_STEP = 'setpoint_shift_step'
CONF_SETPOINT_SHIFT_MAX = 'setpoint_shift_max'
CONF_SETPOINT_SHIFT_MIN = 'setpoint_shift_min'
CONF_TEMPERATURE_ADDRESS = 'temperature_address'
CONF_TARGET_TEMPERATURE_ADDRESS = 'target_temperature_address'
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = 'target_temperature_state_address'
CONF_OPERATION_MODE_ADDRESS = 'operation_mode_address'
CONF_OPERATION_MODE_STATE_ADDRESS = 'operation_mode_state_address'
CONF_CONTROLLER_STATUS_ADDRESS = 'controller_status_address'
CONF_CONTROLLER_STATUS_STATE_ADDRESS = 'controller_status_state_address'
CONF_CONTROLLER_MODE_ADDRESS = 'controller_mode_address'
CONF_CONTROLLER_MODE_STATE_ADDRESS = 'controller_mode_state_address'
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = \
'operation_mode_frost_protection_address'
CONF_OPERATION_MODE_NIGHT_ADDRESS = 'operation_mode_night_address'
CONF_OPERATION_MODE_COMFORT_ADDRESS = 'operation_mode_comfort_address'
CONF_OPERATION_MODES = 'operation_modes'
CONF_ON_OFF_ADDRESS = 'on_off_address'
CONF_ON_OFF_STATE_ADDRESS = 'on_off_state_address'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
DEFAULT_NAME = 'KNX Climate'
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEPENDENCIES = ['knx']
# Map KNX operation modes to HA modes. This list might not be full.
OPERATION_MODES = {
# Map DPT 201.100 HVAC operating modes
"Frost Protection": STATE_MANUAL,
"Night": STATE_IDLE,
"Standby": STATE_ECO,
"Comfort": STATE_HEAT,
# Map DPT 201.104 HVAC control modes
"Fan only": STATE_FAN_ONLY,
"Dehumidification": STATE_DRY
}
OPERATION_MODES_INV = dict((
reversed(item) for item in OPERATION_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STEP,
default=DEFAULT_SETPOINT_SHIFT_STEP): vol.All(
float, vol.Range(min=0, max=2)),
vol.Optional(CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX):
vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN):
vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES):
vol.All(cv.ensure_list, [vol.In(OPERATION_MODES)]),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME) + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS),
group_address_controller_status=config.get(
CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.get(
CONF_CONTROLLER_STATUS_STATE_ADDRESS),
group_address_controller_mode=config.get(
CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS),
operation_modes=config.get(
CONF_OPERATION_MODES))
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_temperature=config[CONF_TEMPERATURE_ADDRESS],
group_address_target_temperature=config.get(
CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_target_temperature_state=config[
CONF_TARGET_TEMPERATURE_STATE_ADDRESS],
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS),
setpoint_shift_step=config.get(CONF_SETPOINT_SHIFT_STEP),
setpoint_shift_max=config.get(CONF_SETPOINT_SHIFT_MAX),
setpoint_shift_min=config.get(CONF_SETPOINT_SHIFT_MIN),
group_address_on_off=config.get(CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.device.mode.supports_operation_mode:
support |= SUPPORT_OPERATION_MODE
if self.device.supports_on_off:
support |= SUPPORT_ON_OFF
return support
async def async_added_to_hass(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature.value
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.device.setpoint_shift_step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.device.target_temperature.value
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.device.target_temperature_min
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.device.target_temperature_max
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self.device.set_target_temperature(temperature)
await self.async_update_ha_state()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.device.mode.supports_operation_mode:
return OPERATION_MODES.get(self.device.mode.operation_mode.value)
return None
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [OPERATION_MODES.get(operation_mode.value) for
operation_mode in
self.device.mode.operation_modes]
async def async_set_operation_mode(self, operation_mode):
"""Set operation mode."""
if self.device.mode.supports_operation_mode:
from xknx.knx import HVACOperationMode
knx_operation_mode = HVACOperationMode(
OPERATION_MODES_INV.get(operation_mode))
await self.device.mode.set_operation_mode(knx_operation_mode)
await self.async_update_ha_state()
@property
def is_on(self):
"""Return true if the device is on."""
if self.device.supports_on_off:
return self.device.is_on
return None
async def async_turn_on(self):
"""Turn on."""
await self.device.turn_on()
async def async_turn_off(self):
"""Turn off."""
await self.device.turn_off()
|
jamespcole/home-assistant
|
homeassistant/components/knx/climate.py
|
Python
|
apache-2.0
| 11,048
| 0
|
'''Splitter
======
.. versionadded:: 1.5.0
.. image:: images/splitter.jpg
:align: right
The :class:`Splitter` is a widget that helps you re-size its child
widget/layout by letting you re-size it via dragging the boundary or
double tapping the boundary. This widget is similar to the
:class:`~kivy.uix.scrollview.ScrollView` in that it allows only one
child widget.
Usage::
splitter = Splitter(sizable_from = 'right')
splitter.add_widget(layout_or_widget_instance)
splitter.min_size = 100
splitter.max_size = 250
To change the size of the strip/border used for resizing::
splitter.strip_size = '10pt'
To change its appearance::
splitter.strip_cls = your_custom_class
You can also change the appearance of the `strip_cls`, which defaults to
:class:`SplitterStrip`, by overriding the `kv` rule in your app:
.. code-block:: kv
<SplitterStrip>:
horizontal: True if self.parent and self.parent.sizable_from[0] \
in ('t', 'b') else False
background_normal: 'path to normal horizontal image' \
if self.horizontal else 'path to vertical normal image'
background_down: 'path to pressed horizontal image' \
if self.horizontal else 'path to vertical pressed image'
'''
__all__ = ('Splitter', )
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.uix.button import Button
from kivy.properties import (OptionProperty, NumericProperty, ObjectProperty,
ListProperty, BooleanProperty)
from kivy.uix.boxlayout import BoxLayout
class SplitterStrip(Button):
'''Class used for tbe graphical representation of a
:class:`kivy.uix.splitter.SplitterStripe`.
'''
pass
class Splitter(BoxLayout):
'''See module documentation.
:Events:
`on_press`:
Fired when the splitter is pressed.
`on_release`:
Fired when the splitter is released.
.. versionchanged:: 1.6.0
Added `on_press` and `on_release` events.
'''
border = ListProperty([4, 4, 4, 4])
'''Border used for the
:class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction.
This must be a list of four values: (bottom, right, top, left).
Read the BorderImage instructions for more information about how
to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and
defaults to (4, 4, 4, 4).
'''
strip_cls = ObjectProperty(SplitterStrip)
'''Specifies the class of the resize Strip.
:attr:`strip_cls` is an :class:`kivy.properties.ObjectProperty` and
defaults to :class:`~kivy.uix.splitter.SplitterStrip`, which is of type
:class:`~kivy.uix.button.Button`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
sizable_from = OptionProperty('left', options=(
'left', 'right', 'top', 'bottom'))
'''Specifies whether the widget is resizable. Options are:
`left`, `right`, `top` or `bottom`
:attr:`sizable_from` is an :class:`~kivy.properties.OptionProperty`
and defaults to `left`.
'''
strip_size = NumericProperty('10pt')
'''Specifies the size of resize strip
:attr:`strp_size` is a :class:`~kivy.properties.NumericProperty`
defaults to `10pt`
'''
min_size = NumericProperty('100pt')
'''Specifies the minimum size beyond which the widget is not resizable.
:attr:`min_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to `100pt`.
'''
max_size = NumericProperty('500pt')
'''Specifies the maximum size beyond which the widget is not resizable.
:attr:`max_size` is a :class:`~kivy.properties.NumericProperty`
and defaults to `500pt`.
'''
_parent_proportion = NumericProperty(0.)
'''(internal) Specifies the distance that the slider has travelled
across its parent, used to automatically maintain a sensible
position if the parent is resized.
:attr:`_parent_proportion` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
.. versionadded:: 1.9.0
'''
_bound_parent = ObjectProperty(None, allownone=True)
'''(internal) References the widget whose size is currently being
tracked by :attr:`_parent_proportion`.
:attr:`_bound_parent` is a
:class:`~kivy.properties.ObjectProperty` and defaults to None.
.. versionadded:: 1.9.0
'''
keep_within_parent = BooleanProperty(False)
'''If True, will limit the splitter to stay within its parent widget.
:attr:`keep_within_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
rescale_with_parent = BooleanProperty(False)
'''If True, will automatically change size to take up the same
proportion of the parent widget when it is resized, while
staying within :attr:`min_size` and :attr:`max_size`. As long as
these attributes can be satisfied, this stops the
:class:`Splitter` from exceeding the parent size during rescaling.
:attr:`rescale_with_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
__events__ = ('on_press', 'on_release')
def __init__(self, **kwargs):
self._container = None
self._strip = None
super(Splitter, self).__init__(**kwargs)
do_size = self._do_size
fbind = self.fbind
fbind('max_size', do_size)
fbind('min_size', do_size)
fbind('parent', self._rebind_parent)
def on_sizable_from(self, instance, sizable_from):
if not instance._container:
return
sup = super(Splitter, instance)
_strp = instance._strip
if _strp:
# remove any previous binds
_strp.unbind(on_touch_down=instance.strip_down)
_strp.unbind(on_touch_move=instance.strip_move)
_strp.unbind(on_touch_up=instance.strip_up)
self.unbind(disabled=_strp.setter('disabled'))
sup.remove_widget(instance._strip)
else:
cls = instance.strip_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
instance._strip = _strp = cls()
sz_frm = instance.sizable_from[0]
if sz_frm in ('l', 'r'):
_strp.size_hint = None, 1
_strp.width = instance.strip_size
instance.orientation = 'horizontal'
instance.unbind(strip_size=_strp.setter('width'))
instance.bind(strip_size=_strp.setter('width'))
else:
_strp.size_hint = 1, None
_strp.height = instance.strip_size
instance.orientation = 'vertical'
instance.unbind(strip_size=_strp.setter('height'))
instance.bind(strip_size=_strp.setter('height'))
index = 1
if sz_frm in ('r', 'b'):
index = 0
sup.add_widget(_strp, index)
_strp.bind(on_touch_down=instance.strip_down)
_strp.bind(on_touch_move=instance.strip_move)
_strp.bind(on_touch_up=instance.strip_up)
_strp.disabled = self.disabled
self.bind(disabled=_strp.setter('disabled'))
def add_widget(self, widget, index=0):
if self._container or not widget:
return Exception('Splitter accepts only one Child')
self._container = widget
sz_frm = self.sizable_from[0]
if sz_frm in ('l', 'r'):
widget.size_hint_x = 1
else:
widget.size_hint_y = 1
index = 0
if sz_frm in ('r', 'b'):
index = 1
super(Splitter, self).add_widget(widget, index)
self.on_sizable_from(self, self.sizable_from)
def remove_widget(self, widget, *largs):
super(Splitter, self).remove_widget(widget)
if widget == self._container:
self._container = None
def clear_widgets(self):
self.remove_widget(self._container)
def strip_down(self, instance, touch):
if not instance.collide_point(*touch.pos):
return False
touch.grab(self)
self.dispatch('on_press')
def on_press(self):
pass
def _rebind_parent(self, instance, new_parent):
if self._bound_parent is not None:
self._bound_parent.unbind(size=self.rescale_parent_proportion)
if self.parent is not None:
new_parent.bind(size=self.rescale_parent_proportion)
self._bound_parent = new_parent
self.rescale_parent_proportion()
def rescale_parent_proportion(self, *args):
if not self.parent:
return
if self.rescale_with_parent:
parent_proportion = self._parent_proportion
if self.sizable_from in ('top', 'bottom'):
new_height = parent_proportion * self.parent.height
self.height = max(self.min_size,
min(new_height, self.max_size))
else:
new_width = parent_proportion * self.parent.width
self.width = max(self.min_size, min(new_width, self.max_size))
def _do_size(self, instance, value):
if self.sizable_from[0] in ('l', 'r'):
self.width = max(self.min_size, min(self.width, self.max_size))
else:
self.height = max(self.min_size, min(self.height, self.max_size))
def strip_move(self, instance, touch):
if touch.grab_current is not instance:
return False
max_size = self.max_size
min_size = self.min_size
sz_frm = self.sizable_from[0]
if sz_frm in ('t', 'b'):
diff_y = (touch.dy)
if self.keep_within_parent:
if sz_frm == 't' and (self.top + diff_y) > self.parent.top:
diff_y = self.parent.top - self.top
elif sz_frm == 'b' and (self.y + diff_y) < self.parent.y:
diff_y = self.parent.y - self.y
if sz_frm == 'b':
diff_y *= -1
if self.size_hint_y:
self.size_hint_y = None
if self.height > 0:
self.height += diff_y
else:
self.height = 1
height = self.height
self.height = max(min_size, min(height, max_size))
self._parent_proportion = self.height / self.parent.height
else:
diff_x = (touch.dx)
if self.keep_within_parent:
if sz_frm == 'l' and (self.x + diff_x) < self.parent.x:
diff_x = self.parent.x - self.x
elif (sz_frm == 'r' and
(self.right + diff_x) > self.parent.right):
diff_x = self.parent.right - self.right
if sz_frm == 'l':
diff_x *= -1
if self.size_hint_x:
self.size_hint_x = None
if self.width > 0:
self.width += diff_x
else:
self.width = 1
width = self.width
self.width = max(min_size, min(width, max_size))
self._parent_proportion = self.width / self.parent.width
def strip_up(self, instance, touch):
if touch.grab_current is not instance:
return
if touch.is_double_tap:
max_size = self.max_size
min_size = self.min_size
sz_frm = self.sizable_from[0]
s = self.size
if sz_frm in ('t', 'b'):
if self.size_hint_y:
self.size_hint_y = None
if s[1] - min_size <= max_size - s[1]:
self.height = max_size
else:
self.height = min_size
else:
if self.size_hint_x:
self.size_hint_x = None
if s[0] - min_size <= max_size - s[0]:
self.width = max_size
else:
self.width = min_size
touch.ungrab(instance)
self.dispatch('on_release')
def on_release(self):
pass
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
class SplitterApp(App):
def build(self):
root = FloatLayout()
bx = BoxLayout()
bx.add_widget(Button())
bx.add_widget(Button())
bx2 = BoxLayout()
bx2.add_widget(Button())
bx2.add_widget(Button())
bx2.add_widget(Button())
spl = Splitter(
size_hint=(1, .25),
pos_hint={'top': 1},
sizable_from='bottom')
spl1 = Splitter(
sizable_from='left',
size_hint=(None, 1), width=90)
spl1.add_widget(Button())
bx.add_widget(spl1)
spl.add_widget(bx)
spl2 = Splitter(size_hint=(.25, 1))
spl2.add_widget(bx2)
spl2.sizable_from = 'right'
root.add_widget(spl)
root.add_widget(spl2)
return root
SplitterApp().run()
|
akshayaurora/kivy
|
kivy/uix/splitter.py
|
Python
|
mit
| 13,228
| 0.000076
|
#!/usr/bin/env python
#
# @file createCMakeFiles.py
# @brief create the CMake files
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2014 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
import sys
import os
import fileHeaders
import strFunctions
def writeSrcListsFile(name, nameOfPackage, plugins, classes):
capName = name.upper()
uname = strFunctions.cap(name)
codeName = 'CMakeLists.txt'
fileOut = open(codeName, 'w')
fileOut.write('# This CMake file integrates the binding source with the libsbml source tree\n#\n\n\n'.format(name))
fileOut.write('# include common functions (used for copying / removing files)\n')
fileOut.write('if(NOT EXISTS ${LIBSBML_SOURCE}/common.cmake)\n')
fileOut.write(' message(FATAL_ERROR "Invalid libsbml source directory")\n')
fileOut.write('endif()\n\n')
fileOut.write('include(${LIBSBML_SOURCE}/common.cmake)\n\n')
fileOut.write('# specify the package files\n')
fileOut.write('set(PACKAGE_FILES\n\n')
fileOut.write(' # forward declaractions\n')
fileOut.write(' "common/{0}fwd.h"\n'.format(name))
fileOut.write(' "common/{0}ExtensionTypes.h"\n\n'.format(uname))
fileOut.write(' # extension points\n')
fileOut.write(' "extension/{0}Extension.h"\n'.format(uname))
for i in range (0, len(plugins)):
fileOut.write(' "extension/{0}{1}Plugin.h"\n'.format(nameOfPackage, plugins[i]['sbase']))
fileOut.write(' "extension/{0}Extension.cpp"\n'.format(uname))
for i in range (0, len(plugins)):
fileOut.write(' "extension/{0}{1}Plugin.cpp"\n'.format(nameOfPackage, plugins[i]['sbase']))
fileOut.write('\n #new SBML classes\n')
for i in range (0, len(classes)):
fileOut.write(' "sbml/{0}.h"\n'.format(classes[i]['name']))
for i in range (0, len(classes)):
fileOut.write(' "sbml/{0}.cpp"\n'.format(classes[i]['name']))
fileOut.write('\n #test cases\n')
fileOut.write('\n\n')
fileOut.write(' )\n\n')
fileOut.write('# specify the files for the language bindings\n')
fileOut.write('set(BINDING_FILES\n\n')
fileOut.write(' # C# bindings\n')
fileOut.write(' "bindings/csharp/local-downcast-extension-{0}.i"\n'.format(name))
fileOut.write(' "bindings/csharp/local-downcast-namespaces-{0}.i"\n'.format(name))
fileOut.write(' "bindings/csharp/local-packages-{0}.i"\n\n'.format(name))
fileOut.write(' # java bindings\n')
fileOut.write(' "bindings/java/local-downcast-extension-{0}.i"\n'.format(name))
fileOut.write(' "bindings/java/local-downcast-namespaces-{0}.i"\n'.format(name))
fileOut.write(' "bindings/java/local-packages-{0}.i"\n\n'.format(name))
fileOut.write(' # perl bindings\n')
fileOut.write(' "bindings/perl/local-downcast-extension-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/perl/local-downcast-packages-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/perl/local-downcast-namespaces-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/perl/local-downcast-plugins-{0}.cpp"\n\n'.format(name))
fileOut.write(' # python bindings\n')
fileOut.write(' "bindings/python/local-downcast-extension-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/python/local-downcast-packages-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/python/local-downcast-namespaces-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/python/local-downcast-plugins-{0}.cpp"\n\n'.format(name))
fileOut.write(' # ruby bindings\n')
fileOut.write(' "bindings/ruby/local-downcast-extension-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/ruby/local-downcast-packages-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/ruby/local-downcast-namespaces-{0}.cpp"\n'.format(name))
fileOut.write(' "bindings/ruby/local-downcast-plugins-{0}.cpp"\n\n'.format(name))
fileOut.write(' # generic swig bindings\n')
fileOut.write(' "bindings/swig/{0}-package.h"\n'.format(name))
fileOut.write(' "bindings/swig/{0}-package.i"\n\n'.format(name))
fileOut.write(' )\n\n')
fileOut.write('if(MODE STREQUAL "integrate")\n')
fileOut.write(' # integrate the package with the specified libsbml source directory\n\n')
fileOut.write(' # copy the CMake script that integrates the source files with libsbml-5\n')
fileOut.write(' copy_file("../{0}-package.cmake" '.format(name))
fileOut.write('${LIBSBML_SOURCE})\n')
fileOut.write(' copy_file("{0}-package.cmake" '.format(name))
fileOut.write('${LIBSBML_SOURCE}/src)\n\n')
fileOut.write(' # copy language binding files\n')
fileOut.write(' foreach(bindingFile ${BINDING_FILES})\n')
fileOut.write(' copy_file_to_subdir( ${bindingFile} ${LIBSBML_SOURCE}/src)\n')
fileOut.write(' endforeach()\n\n')
fileOut.write(' # copy package files\n')
fileOut.write(' foreach(packageFile ${PACKAGE_FILES})\n')
fileOut.write(' copy_file_to_subdir( ${packageFile} ${LIBSBML_SOURCE}')
fileOut.write('/src/packages/{0})\n'.format(name))
fileOut.write(' endforeach()\n\n')
fileOut.write(' # copy header files to include directory just in case\n')
fileOut.write(' foreach(dir common extension sbml)\n\n')
fileOut.write(' # copy files\n')
fileOut.write(' copy_files( ${CMAKE_CURRENT_SOURCE_DIR}/${dir}/\n')
fileOut.write(' ${LIBSBML_SOURCE}')
fileOut.write('/include/sbml/{0} *.h )\n\n'.format(name))
fileOut.write(' endforeach()\n\n')
fileOut.write(' add_custom_target(integrate ALL)\n\n')
fileOut.write(' message(STATUS "Finished integrating the SBML {0} package with the libsbml source tree in:")\n'.format(name))
fileOut.write(' message(STATUS "${LIBSBML_SOURCE}")\n\n')
fileOut.write('elseif(MODE STREQUAL "remove")\n')
fileOut.write(' # remove all package files from the specified libsbml source directory\n\n')
fileOut.write(' remove_file(${LIBSBML_SOURCE}')
fileOut.write('/{0}-package.cmake)\n'.format(name))
fileOut.write(' remove_file(${LIBSBML_SOURCE}')
fileOut.write('/src/{0}-package.cmake)\n\n'.format(name))
fileOut.write(' # copy language binding files\n')
fileOut.write(' foreach(bindingFile ${BINDING_FILES})\n')
fileOut.write(' remove_file_in_subdir( ${bindingFile} ${LIBSBML_SOURCE}/src)\n')
fileOut.write(' endforeach()\n\n')
fileOut.write(' # copy package files\n')
fileOut.write(' foreach(packageFile ${PACKAGE_FILES})\n')
fileOut.write(' remove_file_in_subdir( ${packageFile} ${LIBSBML_SOURCE}')
fileOut.write('/src/packages/{0})\n'.format(name))
fileOut.write(' endforeach()\n\n')
fileOut.write(' # delete package directory\n')
fileOut.write(' file(REMOVE ${LIBSBML_SOURCE}')
fileOut.write('/src/packages/{0})\n'.format(name))
fileOut.write(' file(REMOVE_RECURSE ${LIBSBML_SOURCE}')
fileOut.write('/include/sbml/{0})\n\n'.format(name))
fileOut.write(' add_custom_target(remove ALL)\n\n')
fileOut.write(' message(STATUS "Finished removing the SBML {0} package from the libsbml source tree in:")\n'.format(name))
fileOut.write(' message(STATUS "${LIBSBML_SOURCE}")\n\n')
fileOut.write('endif()\n\n')
def writeTopLevelListsFile(name):
capName = name.upper()
codeName = 'CMakeLists.txt'
fileOut = open(codeName, 'w')
fileOut.write('# This CMake Package integrates the SBML {0} package with libsbml 5\n#\n\n\n'.format(name))
fileOut.write('cmake_minimum_required(VERSION 2.8)\n\n')
fileOut.write('# the project name should be the same name as the SBML package\n')
fileOut.write('project({0})\n\n'.format(name))
fileOut.write('set(MODE "integrate" CACHE STRING "The operation to perform, valid options are integrate|compile|remove.")\n')
fileOut.write('set(LIBSBML_SOURCE "$ENV{HOME}/Development/libsbml-5/" CACHE PATH "Path to the libsbml source distribution")\n')
fileOut.write('set(EXTRA_LIBS "xml2;bz2;z" CACHE STRING "List of Libraries to link against" )\n\n')
fileOut.write('if(MODE STREQUAL "compile")\n')
fileOut.write(' # compile the package and link it against an existing libsbml-5 version\n')
fileOut.write(' # file sources\n')
fileOut.write(' file(GLOB sources\n')
fileOut.write(' src/extension/*.cpp src/extension/*.h\n')
fileOut.write(' src/sbml/*.cpp src/sbml/*.h \n')
fileOut.write(' src/common/*.h )\n\n')
fileOut.write(' # add sources \n')
fileOut.write(' set(SOURCE_FILES ${sources} )\n\n')
fileOut.write(' include_directories(${LIBSBML_SOURCE}/include)\n\n')
fileOut.write(' find_library(LIBSBML_LIBS \n')
fileOut.write(' NAMES libsbml.lib sbml\n')
fileOut.write(' PATHS ${LIBSBML_SOURCE} \n')
fileOut.write(' ${LIBSBML_SOURCE/lib}\n')
fileOut.write(' ${LIBSBML_SOURCE/src/.libs}\n')
fileOut.write(' /usr/lib /usr/local/lib \n')
fileOut.write(' ${LIBSBML_ROOT_SOURCE_DIR} \n')
fileOut.write(' ${LIBSBML_ROOT_SOURCE_DIR}/dependencies/lib\n')
fileOut.write(' )\n\n')
fileOut.write(' make_directory(${CMAKE_CURRENT_BINARY_DIR}/include/sbml/')
fileOut.write('{0})\n\n'.format(name))
fileOut.write(' # copy header files to facilitate build\n')
fileOut.write(' foreach(dir common extension sbml validator validator/constraints)\n\n')
fileOut.write(' # copy files\n')
fileOut.write(' file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/src/${dir}/\n')
fileOut.write(' DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/sbml/')
fileOut.write('{0}\n'.format(name))
fileOut.write(' PATTERN ${CMAKE_CURRENT_SOURCE_DIR}/src/${dir}/*.h)\n\n')
fileOut.write(' endforeach()\n\n')
fileOut.write(' if (NOT UNIX)\n')
fileOut.write(' add_definitions(-DWIN32 -DLIBSBML_EXPORTS -DLIBLAX_EXPORTS)\n')
fileOut.write(' endif()\n\n')
fileOut.write(' include_directories(${CMAKE_CURRENT_BINARY_DIR}/include)\n')
fileOut.write(' include_directories("src/common")\n')
fileOut.write(' include_directories("src/extension")\n')
fileOut.write(' include_directories("src/sbml")\n\n')
fileOut.write(' add_library({0} STATIC '.format(name))
fileOut.write('${SOURCE_FILES} )\n')
fileOut.write(' target_link_libraries({0} '.format(name))
fileOut.write('${LIBSBML_LIBS})\n\n')
fileOut.write(' option(WITH_EXAMPLE "Compile Example File" ON)\n\n')
fileOut.write(' if(WITH_EXAMPLE)\n\n')
fileOut.write(' set(EXAMPLE_SOURCE examples/c++/example1.cpp)\n')
fileOut.write(' add_executable({0}_example '.format(name))
fileOut.write('${EXAMPLE_SOURCE})\n')
fileOut.write(' target_link_libraries({0}_example {0} '.format(name))
fileOut.write('${EXTRA_LIBS})\n\n')
fileOut.write(' endif()\n\n')
fileOut.write('else()\n')
fileOut.write(' add_subdirectory(src)\n')
fileOut.write('endif()\n')
def writeSrcFile(name):
capName = name.upper()
codeName = name + '-package.cmake'
fileOut = open(codeName, 'w')
fileHeaders.addCMakeFilename(fileOut, codeName, name)
fileHeaders.addCMakeLicence(fileOut)
fileOut.write('\n')
fileOut.write('if (ENABLE_{0} )\n\n'.format(capName))
fileOut.write('include(${LIBSBML_ROOT_SOURCE_DIR}/')
fileOut.write('{0}-package.cmake)\n\n'.format(name))
fileOut.write('#build up sources\n')
fileOut.write('set({0}_SOURCES)\n\n'.format(capName))
fileOut.write('# go through all directories: common, extension and sbml\n')
fileOut.write('foreach(dir common extension sbml validator validator/constraints)\n\n')
fileOut.write(' # add to include directory\n')
fileOut.write(' include_directories(${CMAKE_CURRENT_SOURCE_DIR}/sbml/packages/')
fileOut.write('{0}/$'.format(name))
fileOut.write('{dir})\n\n')
fileOut.write(' # file sources\n')
fileOut.write(' file(GLOB current ${CMAKE_CURRENT_SOURCE_DIR}/sbml/packages/')
fileOut.write('{0}/$'.format(name))
fileOut.write('{dir}/*.cpp\n')
fileOut.write(' ${CMAKE_CURRENT_SOURCE_DIR}/sbml/packages/')
fileOut.write('{0}/$'.format(name))
fileOut.write('{dir}/*.c\n')
fileOut.write(' ${CMAKE_CURRENT_SOURCE_DIR}/sbml/packages/')
fileOut.write('{0}/$'.format(name))
fileOut.write('{dir}/*.h)\n\n')
fileOut.write(' # add sources\n')
fileOut.write(' set({0}_SOURCES $'.format(capName))
fileOut.write('{')
fileOut.write('{0}_SOURCES'.format(capName))
fileOut.write('} ${current})\n\n')
fileOut.write(' # mark header files for installation\n')
fileOut.write(' file(GLOB {0}_headers\n'.format(name))
fileOut.write(' ${CMAKE_CURRENT_SOURCE_DIR}/sbml/packages/')
fileOut.write('{0}/$'.format(name))
fileOut.write('{dir}/*.h)\n\n')
fileOut.write(' install(FILES ${')
fileOut.write('{0}_headers'.format(name))
fileOut.write('}\n DESTINATION include/sbml/packages/')
fileOut.write('{0}/$'.format(name))
fileOut.write('{dir} )\n\n')
fileOut.write('endforeach()\n\n')
fileOut.write('# create source group for IDEs\n')
fileOut.write('source_group({0}_package FILES $'.format(name))
fileOut.write('{')
fileOut.write('{0}_SOURCES'.format(capName))
fileOut.write('})\n\n')
fileOut.write('# add {0} sources to SBML sources\n'.format(name))
fileOut.write('SET(LIBSBML_SOURCES ${LIBSBML_SOURCES} ${')
fileOut.write('{0}_SOURCES'.format(capName))
fileOut.write('})\n\n')
fileOut.write('######################################################\n')
fileOut.write('#\n# add test scripts\n#\n')
fileOut.write('if(WITH_CHECK)\n\n\n')
fileOut.write('endif()\n\n')
fileOut.write('endif()\n\n')
def writeTopLevelFile(name):
capName = name.upper()
codeName = name + '-package.cmake'
fileOut = open(codeName, 'w')
fileHeaders.addCMakeFilename(fileOut, codeName, name)
fileHeaders.addCMakeLicence(fileOut)
fileOut.write('\n')
fileOut.write('option(ENABLE_{0} "Enable libSBML support for the \'{1}\' package" OFF)\n\n\n'.format(capName, name))
fileOut.write('\n')
fileOut.write('# provide summary status =\n')
fileOut.write('list(APPEND LIBSBML_PACKAGE_SUMMARY "SBML \'{0}\' package = $'.format(name))
fileOut.write('{')
fileOut.write('ENABLE_{0}'.format(capName))
fileOut.write('}")\n')
fileOut.write('\n')
fileOut.write('if (ENABLE_{0} )\n'.format(capName))
fileOut.write(' add_definitions(-DUSE_{0})\n'.format(capName))
fileOut.write(' set(LIBSBML_PACKAGE_INCLUDES ${LIBSBML_PACKAGE_INCLUDES} ')
fileOut.write('"LIBSBML_HAS_PACKAGE_{0}")\n'.format(capName))
fileOut.write(' list(APPEND SWIG_EXTRA_ARGS -DUSE_{0})\n'.format(capName))
fileOut.write(' list(APPEND SWIG_SWIGDOCDEFINES --define USE_{0})\n'.format(capName))
fileOut.write('endif()\n\n')
def main(package):
nameOfPackage = package['name']
name = nameOfPackage.lower()
plugins = package['plugins']
classes = package['sbmlElements']
os.chdir('./' + name)
writeTopLevelFile(name)
# writeTopLevelListsFile(name)
os.chdir('./src')
writeSrcFile(name)
# writeSrcListsFile(name, nameOfPackage, plugins, classes)
|
hovo1990/deviser
|
generator/legacy/createCMakeFiles.py
|
Python
|
lgpl-2.1
| 16,329
| 0.020454
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import inspect
from datetime import datetime
import freezegun
import pytest
from sqlalchemy import DateTime, cast
from sqlalchemy.sql.functions import _FunctionGenerator
@pytest.fixture
def monkeypatch_methods(monkeypatch):
"""Monkeypatches all methods from `cls` onto `target`
This utility lets you easily mock multiple methods in an existing class.
In case of classmethods the binding will not be changed, i.e. `cls` will
keep pointing to the source class and not the target class.
"""
def _monkeypatch_methods(target, cls):
for name, method in inspect.getmembers(cls, inspect.ismethod):
if method.__self__ is None:
# For unbound methods we need to copy the underlying function
method = method.__func__
monkeypatch.setattr('{}.{}'.format(target, name), method)
return _monkeypatch_methods
@pytest.fixture
def freeze_time(monkeypatch):
"""Returns a function that freezes the current time
It affects datetime.now, date.today, etc. and also SQLAlchemy's `func.now()`
which simply returns the current time from `datetime.now()` instead of
retrieving it using the actual `now()` function of PostgreSQL.
"""
freezers = []
orig_call = _FunctionGenerator.__call__
def FunctionGenerator_call(self, *args, **kwargs):
if self._FunctionGenerator__names == ['now']:
return cast(datetime.now().isoformat(), DateTime)
return orig_call(self, *args, **kwargs)
monkeypatch.setattr(_FunctionGenerator, '__call__', FunctionGenerator_call)
def _freeze_time(time_to_freeze):
freezer = freezegun.freeze_time(time_to_freeze)
freezer.start()
freezers.append(freezer)
yield _freeze_time
for freezer in reversed(freezers):
freezer.stop()
|
mic4ael/indico
|
indico/testing/fixtures/util.py
|
Python
|
mit
| 2,040
| 0.00049
|
from tests.subscriptions import Subscription_tests
from tests.messaging import Event_publisher_tests
from tests.callbacks import Callback_tests
from tests.shortcut_subscriptions import Decorator_tests
import unittest
import logging
def configure_logger():
handler = logging.FileHandler('log.txt')
formatter = logging.Formatter("%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
if __name__ == '__main__':
configure_logger()
unittest.main()
|
nevtum/Echopoint
|
tests/testrunner.py
|
Python
|
apache-2.0
| 569
| 0.005272
|
#!/usr/bin/env python3
import sys
import text_parsers as tp
import os
infilenames = sys.argv[1:len(sys.argv)-1]
nameprefix = sys.argv[len(sys.argv)-1]
for infilename in infilenames:
outfilename = f'{os.environ["HOME"]}/{os.path.basename(infilename)}.h5'
data = tp.parse_txt_xy(infilename)
data['metadata']['name'] = f'{nameprefix}: {os.path.basename(infilename)}'
tp.save_sample_file(outfilename, data['data'], data['metadata'])
sys.exit(0)
|
BiRG/Omics-Dashboard
|
modules/sbin/batch_parse_txtxy.py
|
Python
|
mit
| 457
| 0
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll
import sys
from os.path import abspath, dirname, join
sys.path.insert(0, join(dirname(dirname(dirname(abspath(__file__)))), 'py-bindings'))
from functools import partial
from time import clock
from math import fabs
import unittest
import copy
import ompl.util as ou
import ompl.base as ob
import ompl.control as oc
from ompl.util import setLogLevel, LogLevel
SOLUTION_TIME = 10.0
MAX_VELOCITY = 3.0
class Environment(object):
def __init__(self, fname):
fp = open(fname, 'r')
lines = fp.readlines()
fp.close()
self.width, self.height = [int(i) for i in lines[0].split(' ')[1:3]]
self.grid = []
self.start = [int(i) for i in lines[1].split(' ')[1:3]]
self.goal = [int(i) for i in lines[2].split(' ')[1:3]]
for i in range(self.width):
self.grid.append(
[int(j) for j in lines[4+i].split(' ')[0:self.height]])
self.char_mapping = ['__', '##', 'oo', 'XX']
def __str__(self):
result = ''
for line in self.grid:
result = result + ''.join([self.char_mapping[c] for c in line]) + '\n'
return result
def isValid(grid, state):
# planning is done in a continuous space, but our collision space
# representation is discrete
x = int(state[0])
y = int(state[1])
if x < 0 or y < 0 or x >= len(grid) or y >= len(grid[0]):
return False
return grid[x][y] == 0 # 0 means valid state
class MyStateSpace(ob.RealVectorStateSpace):
def __init__(self):
super(MyStateSpace, self).__init__(4)
def distance(self, state1, state2):
x1 = int(state1[0])
y1 = int(state1[1])
x2 = int(state2[0])
y2 = int(state2[1])
return fabs(x1-x2) + fabs(y1-y2)
class MyProjectionEvaluator(ob.ProjectionEvaluator):
def __init__(self, space, cellSizes):
super(MyProjectionEvaluator, self).__init__(space)
self.setCellSizes(cellSizes)
def getDimension(self):
return 2
def project(self, state, projection):
projection[0] = state[0]
projection[1] = state[1]
class MyStatePropagator(oc.StatePropagator):
def propagate(self, state, control, duration, result):
result[0] = state[0] + duration*control[0]
result[1] = state[1] + duration*control[1]
result[2] = control[0]
result[3] = control[1]
class TestPlanner(object):
def execute(self, env, time, pathLength, show=False):
result = True
sSpace = MyStateSpace()
sbounds = ob.RealVectorBounds(4)
# dimension 0 (x) spans between [0, width)
# dimension 1 (y) spans between [0, height)
# since sampling is continuous and we round down, we allow values until
# just under the max limit
# the resolution is 1.0 since we check cells only
sbounds.low = ou.vectorDouble()
sbounds.low.extend([0.0, 0.0, -MAX_VELOCITY, -MAX_VELOCITY])
sbounds.high = ou.vectorDouble()
sbounds.high.extend([float(env.width) - 0.000000001, \
float(env.height) - 0.000000001, \
MAX_VELOCITY, MAX_VELOCITY])
sSpace.setBounds(sbounds)
cSpace = oc.RealVectorControlSpace(sSpace, 2)
cbounds = ob.RealVectorBounds(2)
cbounds.low[0] = -MAX_VELOCITY
cbounds.high[0] = MAX_VELOCITY
cbounds.low[1] = -MAX_VELOCITY
cbounds.high[1] = MAX_VELOCITY
cSpace.setBounds(cbounds)
ss = oc.SimpleSetup(cSpace)
isValidFn = ob.StateValidityCheckerFn(partial(isValid, env.grid))
ss.setStateValidityChecker(isValidFn)
propagator = MyStatePropagator(ss.getSpaceInformation())
ss.setStatePropagator(propagator)
planner = self.newplanner(ss.getSpaceInformation())
ss.setPlanner(planner)
# the initial state
start = ob.State(sSpace)
start()[0] = env.start[0]
start()[1] = env.start[1]
start()[2] = 0.0
start()[3] = 0.0
goal = ob.State(sSpace)
goal()[0] = env.goal[0]
goal()[1] = env.goal[1]
goal()[2] = 0.0
goal()[3] = 0.0
ss.setStartAndGoalStates(start, goal, 0.05)
startTime = clock()
if ss.solve(SOLUTION_TIME):
elapsed = clock() - startTime
time = time + elapsed
if show:
print('Found solution in %f seconds!' % elapsed)
path = ss.getSolutionPath()
path.interpolate()
if not path.check():
return (False, time, pathLength)
pathLength = pathLength + path.length()
if show:
print(env, '\n')
temp = copy.deepcopy(env)
for i in range(len(path.states)):
x = int(path.states[i][0])
y = int(path.states[i][1])
if temp.grid[x][y] in [0, 2]:
temp.grid[x][y] = 2
else:
temp.grid[x][y] = 3
print(temp, '\n')
else:
result = False
return (result, time, pathLength)
def newplanner(self, si):
raise NotImplementedError('pure virtual method')
class RRTTest(TestPlanner):
def newplanner(self, si):
planner = oc.RRT(si)
return planner
class ESTTest(TestPlanner):
def newplanner(self, si):
planner = oc.EST(si)
cdim = ou.vectorDouble()
cdim.extend([1, 1])
ope = MyProjectionEvaluator(si.getStateSpace(), cdim)
planner.setProjectionEvaluator(ope)
return planner
class SyclopDecomposition(oc.GridDecomposition):
def __init__(self, length, bounds):
super(SyclopDecomposition, self).__init__(length, 2, bounds)
def project(self, state, coord):
coord[0] = state[0]
coord[1] = state[1]
def sampleFullState(self, sampler, coord, s):
sampler.sampleUniform(s)
s[0] = coord[0]
s[1] = coord[1]
class SyclopRRTTest(TestPlanner):
def newplanner(self, si):
spacebounds = si.getStateSpace().getBounds()
bounds = ob.RealVectorBounds(2)
bounds.setLow(0, spacebounds.low[0])
bounds.setLow(1, spacebounds.low[1])
bounds.setHigh(0, spacebounds.high[0])
bounds.setHigh(1, spacebounds.high[1])
# Create a 10x10 grid decomposition for Syclop
decomp = SyclopDecomposition(10, bounds)
planner = oc.SyclopRRT(si, decomp)
# Set syclop parameters conducive to a tiny workspace
planner.setNumFreeVolumeSamples(1000)
planner.setNumRegionExpansions(10)
planner.setNumTreeExpansions(5)
return planner
class SyclopESTTest(TestPlanner):
def newplanner(self, si):
spacebounds = si.getStateSpace().getBounds()
bounds = ob.RealVectorBounds(2)
bounds.setLow(0, spacebounds.low[0])
bounds.setLow(1, spacebounds.low[1])
bounds.setHigh(0, spacebounds.high[0])
bounds.setHigh(1, spacebounds.high[1])
# Create a 10x10 grid decomposition for Syclop
decomp = SyclopDecomposition(10, bounds)
planner = oc.SyclopEST(si, decomp)
# Set syclop parameters conducive to a tiny workspace
planner.setNumFreeVolumeSamples(1000)
planner.setNumRegionExpansions(10)
planner.setNumTreeExpansions(5)
return planner
class KPIECE1Test(TestPlanner):
def newplanner(self, si):
planner = oc.KPIECE1(si)
cdim = ou.vectorDouble()
cdim.extend([1, 1])
ope = MyProjectionEvaluator(si.getStateSpace(), cdim)
planner.setProjectionEvaluator(ope)
return planner
class PlanTest(unittest.TestCase):
def setUp(self):
self.env = Environment(dirname(abspath(__file__))+'/../../tests/resources/env1.txt')
if self.env.width * self.env.height == 0:
self.fail('The environment has a 0 dimension. Cannot continue')
self.verbose = True
def runPlanTest(self, planner):
time = 0.0
length = 0.0
good = 0
N = 25
for _ in range(N):
(result, time, length) = planner.execute(self.env, time, length, False)
if result:
good = good + 1
success = 100.0 * float(good) / float(N)
avgruntime = time / float(N)
avglength = length / float(N)
if self.verbose:
print(' Success rate: %f%%' % success)
print(' Average runtime: %f' % avgruntime)
print(' Average path length: %f' % avglength)
return (success, avgruntime, avglength)
def testControl_RRT(self):
planner = RRTTest()
(success, avgruntime, avglength) = self.runPlanTest(planner)
self.assertTrue(success >= 99.0)
self.assertTrue(avgruntime < 5)
self.assertTrue(avglength < 100.0)
def testControl_EST(self):
planner = ESTTest()
(success, avgruntime, avglength) = self.runPlanTest(planner)
self.assertTrue(success >= 99.0)
self.assertTrue(avgruntime < 5)
self.assertTrue(avglength < 100.0)
def testControl_KPIECE1(self):
planner = KPIECE1Test()
(success, avgruntime, avglength) = self.runPlanTest(planner)
self.assertTrue(success >= 99.0)
self.assertTrue(avgruntime < 2.5)
self.assertTrue(avglength < 100.0)
def testControl_SyclopRRT(self):
planner = SyclopRRTTest()
(success, avgruntime, avglength) = self.runPlanTest(planner)
self.assertTrue(success >= 99.0)
self.assertTrue(avgruntime < 2.5)
self.assertTrue(avglength < 100.0)
def testControl_SyclopEST(self):
planner = SyclopESTTest()
(success, avgruntime, avglength) = self.runPlanTest(planner)
self.assertTrue(success >= 99.0)
self.assertTrue(avgruntime < 2.5)
self.assertTrue(avglength < 100.0)
def suite():
suites = (unittest.makeSuite(PlanTest))
return unittest.TestSuite(suites)
if __name__ == '__main__':
setLogLevel(LogLevel.LOG_ERROR)
unittest.main()
|
utiasASRL/batch-informed-trees
|
tests/control/test_control.py
|
Python
|
bsd-3-clause
| 11,947
| 0.002595
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import traceback
from fuel_health.common.ssh import Client as SSHClient
from fuel_health.common.utils import data_utils
from fuel_health.tests.ha.test_mysql_status import BaseMysqlTest
LOG = logging.getLogger(__name__)
class TestMysqlReplication(BaseMysqlTest):
@classmethod
def setUpClass(cls):
super(TestMysqlReplication, cls).setUpClass()
cls.database = 'ost1' + str(data_utils.rand_int_id(100, 999))
cls.master_ip = None
def setUp(self):
super(TestMysqlReplication, self).setUp()
if 'ha' not in self.config.compute.deployment_mode:
self.skipTest('Cluster is not HA mode, skipping tests')
@classmethod
def tearDownClass(cls):
if cls.master_ip:
try:
cmd = "mysql -h localhost -e 'DROP DATABASE %s'" % cls.database
SSHClient(cls.master_ip, cls.node_user,
key_filename=cls.node_key).exec_command(cmd)
except Exception:
LOG.debug(traceback.format_exc())
def test_mysql_replication(self):
"""Check data replication over mysql
Target Service: HA mysql
Scenario:
1. Check that mysql is running on all controller or database nodes.
2. Create database on one node.
3. Create table in created database
4. Insert data to the created table
5. Get replicated data from each database node.
6. Verify that replicated data in the same from each database
7. Drop created database
Duration: 10 s.
"""
LOG.info("'Test MySQL replication' started")
databases = self.verify(20, self.get_database_nodes,
1, "Can not get database hostnames. Check that"
" at least one controller is operable",
"get database nodes",
self.controller_ip,
self.node_user,
key=self.node_key)
self.verify_response_body_not_equal(0, len(databases),
self.no_db_msg, 1)
if len(databases) == 1:
self.skipTest(self.one_db_msg)
LOG.info("Database nodes are " + ", ".join(databases))
self.master_ip = databases[0]
# check that mysql is running on all hosts
cmd = 'mysql -h localhost -e "" '
for db_node in databases:
ssh_client = SSHClient(
db_node, self.node_user,
key_filename=self.node_key, timeout=100)
self.verify(
20, ssh_client.exec_command, 1,
'Can not connect to mysql. '
'Please check that mysql is running and there '
'is connectivity by management network',
'detect mysql node', cmd)
database_name = self.database
table_name = 'ost' + str(data_utils.rand_int_id(100, 999))
record_data = str(data_utils.rand_int_id(1000000000, 9999999999))
create_database = (
'mysql -h localhost -e "CREATE DATABASE IF NOT EXISTS '
'{database}" '.format(database=database_name)
)
create_table = (
'mysql -h localhost -e'
' "CREATE TABLE IF NOT EXISTS {database}.{table}'
' (data VARCHAR(100))" '.format(database=database_name,
table=table_name)
)
create_record = (
'mysql -h localhost -e "INSERT INTO {database}.{table} (data) '
'VALUES({data})" '.format(database=database_name,
table=table_name,
data=record_data)
)
get_record = (
'mysql -h localhost -e "SELECT * FROM {database}.{table} '
'WHERE data = \"{data}\"" '.format(database=database_name,
table=table_name,
data=record_data)
)
drop_db = "mysql -h localhost -e 'DROP DATABASE {database}'".format(
database=database_name
)
# create db, table, insert data on one node
LOG.info('target node ip/hostname: "{0}" '.format(self.master_ip))
master_ssh_client = SSHClient(self.master_ip, self.node_user,
key_filename=self.node_key,
timeout=100)
self.verify(20, master_ssh_client.exec_command, 2,
'Database creation failed', 'create database',
create_database)
LOG.info('create database')
self.verify(20, master_ssh_client.exec_command, 3,
'Table creation failed', 'create table', create_table)
LOG.info('create table')
self.verify(20, master_ssh_client.exec_command, 4,
'Can not insert data in created table', 'data insertion',
create_record)
LOG.info('create data')
# Verify that data is replicated on other databases
for db_node in databases:
if db_node != self.master_ip:
client = SSHClient(db_node,
self.node_user,
key_filename=self.node_key)
output = self.verify(
20, client.exec_command, 5,
'Can not get data from database node %s' % db_node,
'get_record', get_record)
self.verify_response_body(output, record_data,
msg='Expected data missing',
failed_step='6')
# Drop created db
ssh_client = SSHClient(self.master_ip, self.node_user,
key_filename=self.node_key)
self.verify(20, ssh_client.exec_command, 7,
'Can not delete created database',
'database deletion', drop_db)
self.master_ip = None
|
eayunstack/fuel-ostf
|
fuel_health/tests/ha/test_mysql_replication.py
|
Python
|
apache-2.0
| 6,751
| 0
|
import logging
class BaseDebugInterface(object):
def __init__(self, debuger):
self.robotDebuger = debuger
self.debugCtx = debuger.debugCtx
self.logger = logging.getLogger("rbt.int")
self.bp_id = 0
def start(self, settings):
"""start debug interface."""
pass
def close(self):
pass
def go_steps(self, count): self.debugCtx.go_steps(int(count))
def go_into(self): self.debugCtx.go_into()
def go_over(self): self.debugCtx.go_over()
def go_on(self): self.debugCtx.go_on()
def go_return(self): self.debugCtx.go_return()
def go_pause(self): return self.debugCtx.go_pause()
def add_breakpoint(self, bp): self.robotDebuger.add_breakpoint(bp)
def watch_variable(self, name): return self.robotDebuger.watch_variable(name)
def remove_variable(self, name): return self.robotDebuger.remove_variable(name)
def run_keyword(self, name, *args): return self.robotDebuger.run_keyword(name, *args)
def update_variable(self, name, value):
from robot.running import NAMESPACES
if NAMESPACES.current is not None:
NAMESPACES.current.variables[name] = value
def variable_value(self, var_list):
from robot.running import NAMESPACES
if NAMESPACES.current is None:
return [(e, None) for e in var_list]
robot_vars = NAMESPACES.current.variables
val_list = []
for e in var_list:
try:
v = robot_vars.replace_scalar(e)
except Exception, et:
if "Non-existing" in str(et):
v = None
else: raise
val_list.append((e, v))
return val_list
@property
def watching_variable(self):return self.robotDebuger.watching_variable
@property
def callstack(self):
"""Return a runtime list"""
return list(self.debugCtx.call_stack)
@property
def breakpoints(self):
"""Return list of breakpoint"""
return list(self.debugCtx.break_points)
@property
def active_breakpoint(self):return self.debugCtx.active_break_point
def disable_breakpoint(self, name, match_kw=False):
bp = self._get_breakpoint(name, match_kw)
if bp: bp.active = False
def enable_breakpoint(self, name, match_kw=False):
bp = self._get_breakpoint(name, match_kw)
if bp: bp.active = True
def update_breakpoint(self, name, match_kw=False):
bp = self._get_breakpoint(name, match_kw)
if bp: bp.active = not bp.active
def _get_breakpoint(self, name, match_kw):
for e in self.debugCtx.break_points:
if match_kw and hasattr(e, 'kw_name') and e.kw_name == name:
return e
elif not match_kw and e.name == name:
return e
return None
def add_telnet_monitor(self, monitor):
"""this is IPAMml special feature."""
self.robotDebuger.add_telnet_monitor(monitor)
def add_debug_listener(self, l):
self.debugCtx.add_listener(l)
def remove_debug_listener(self, l):
self.debugCtx.remove_listener(l)
class Listener:
def __init__(self):
pass
def pause(self, breakpoint):
pass
def go_on(self):
pass
def start_keyword(self, keyword):
pass
def end_keyword(self, keyword):
pass
|
deonwu/robotframework-debuger
|
src/rdb/interface/base.py
|
Python
|
gpl-2.0
| 3,484
| 0.01062
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consent', '0013_auto_20170217_1606'),
]
operations = [
migrations.AlterField(
model_name='educationdetail',
name='college_passout_year',
field=models.CharField(default=2017, max_length=4),
preserve_default=False,
),
]
|
aakashrana1995/svnit-tnp
|
tnp/consent/migrations/0014_auto_20170325_1723.py
|
Python
|
mit
| 523
| 0
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.doc.table
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Table document elements.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from collections import defaultdict
from ..model import Compound, ModelList
from ..parse.table import CompoundHeadingParser, CompoundCellParser, UvvisAbsHeadingParser, UvvisAbsCellParser, \
QuantumYieldHeadingParser, QuantumYieldCellParser, UvvisEmiHeadingParser, UvvisEmiCellParser, ExtinctionCellParser, \
ExtinctionHeadingParser, FluorescenceLifetimeHeadingParser, FluorescenceLifetimeCellParser, \
ElectrochemicalPotentialHeadingParser, ElectrochemicalPotentialCellParser, IrHeadingParser, IrCellParser, \
SolventCellParser, SolventHeadingParser, SolventInHeadingParser, UvvisAbsEmiQuantumYieldHeadingParser, \
UvvisAbsEmiQuantumYieldCellParser, MeltingPointHeadingParser, MeltingPointCellParser, GlassTransitionHeadingParser, GlassTransitionCellParser, TempInHeadingParser, \
UvvisAbsDisallowedHeadingParser, UvvisEmiQuantumYieldHeadingParser, UvvisEmiQuantumYieldCellParser
# TODO: Sort out the above import... import module instead
from ..nlp.tag import NoneTagger
from ..nlp.tokenize import FineWordTokenizer
from ..utils import memoized_property
from .element import CaptionedElement
from .text import Sentence
log = logging.getLogger(__name__)
class Table(CaptionedElement):
#: Table cell parsers
parsers = [
(CompoundHeadingParser(), CompoundCellParser()),
(UvvisAbsEmiQuantumYieldHeadingParser(), UvvisAbsEmiQuantumYieldCellParser()),
(UvvisEmiQuantumYieldHeadingParser(), UvvisEmiQuantumYieldCellParser()),
(UvvisEmiHeadingParser(), UvvisEmiCellParser()),
(UvvisAbsHeadingParser(), UvvisAbsCellParser(), UvvisAbsDisallowedHeadingParser()),
(IrHeadingParser(), IrCellParser()),
(ExtinctionHeadingParser(), ExtinctionCellParser()),
(QuantumYieldHeadingParser(), QuantumYieldCellParser()),
(FluorescenceLifetimeHeadingParser(), FluorescenceLifetimeCellParser()),
(ElectrochemicalPotentialHeadingParser(), ElectrochemicalPotentialCellParser()),
(MeltingPointHeadingParser(), MeltingPointCellParser()),
(GlassTransitionHeadingParser(), GlassTransitionCellParser()),
(SolventHeadingParser(), SolventCellParser()),
(SolventInHeadingParser(),),
(TempInHeadingParser(),)
]
def __init__(self, caption, label=None, headings=None, rows=None, footnotes=None, **kwargs):
super(Table, self).__init__(caption=caption, label=label, **kwargs)
self.headings = headings if headings is not None else [] # list(list(Cell))
self.rows = rows if rows is not None else [] # list(list(Cell))
self.footnotes = footnotes if footnotes is not None else []
@property
def document(self):
return self._document
@document.setter
def document(self, document):
self._document = document
self.caption.document = document
for row in self.headings:
for cell in row:
cell.document = document
for row in self.rows:
for cell in row:
cell.document = document
def serialize(self):
"""Convert Table element to python dictionary."""
data = {
'type': self.__class__.__name__,
'caption': self.caption.serialize(),
'headings': [[cell.serialize() for cell in hrow] for hrow in self.headings],
'rows': [[cell.serialize() for cell in row] for row in self.rows],
}
return data
def _repr_html_(self):
html_lines = ['<table class="table">']
html_lines.append(self.caption._repr_html_ ())
html_lines.append('<thead>')
for hrow in self.headings:
html_lines.append('<tr>')
for cell in hrow:
html_lines.append('<th>' + cell.text + '</th>')
html_lines.append('</thead>')
html_lines.append('<tbody>')
for row in self.rows:
html_lines.append('<tr>')
for cell in row:
html_lines.append('<td>' + cell.text + '</td>')
html_lines.append('</tbody>')
html_lines.append('</table>')
return '\n'.join(html_lines)
@property
def records(self):
"""Chemical records that have been parsed from the table."""
caption_records = self.caption.records
# Parse headers to extract contextual data and determine value parser for the column
value_parsers = {}
header_compounds = defaultdict(list)
table_records = ModelList()
seen_compound_col = False
log.debug('Parsing table headers')
for i, col_headings in enumerate(zip(*self.headings)):
# log.info('Considering column %s' % i)
for parsers in self.parsers:
log.debug(parsers)
heading_parser = parsers[0]
value_parser = parsers[1] if len(parsers) > 1 else None
disallowed_parser = parsers[2] if len(parsers) > 2 else None
allowed = False
disallowed = False
for cell in col_headings:
log.debug(cell.tagged_tokens)
results = list(heading_parser.parse(cell.tagged_tokens))
if results:
allowed = True
log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.serialize() for c in results]))
# Results from every parser are stored as header compounds
header_compounds[i].extend(results)
# Referenced footnote records are also stored
for footnote in self.footnotes:
# print('%s - %s - %s' % (footnote.id, cell.references, footnote.id in cell.references))
if footnote.id in cell.references:
log.debug('Adding footnote %s to column %s: %s' % (footnote.id, i, [c.serialize() for c in footnote.records]))
# print('Footnote records: %s' % [c.to_primitive() for c in footnote.records])
header_compounds[i].extend(footnote.records)
# Check if the disallowed parser matches this cell
if disallowed_parser and list(disallowed_parser.parse(cell.tagged_tokens)):
log.debug('Column %s: Disallowed %s' % (i, heading_parser.__class__.__name__))
disallowed = True
# If heading parser matches and disallowed parser doesn't, store the value parser
if allowed and not disallowed and value_parser and i not in value_parsers:
if isinstance(value_parser, CompoundCellParser):
# Only take the first compound col
if seen_compound_col:
continue
seen_compound_col = True
log.debug('Column %s: Value parser: %s' % (i, value_parser.__class__.__name__))
value_parsers[i] = value_parser
# Stop after value parser is assigned?
# for hrow in self.headings:
# for i, cell in enumerate(hrow):
# log.debug(cell.tagged_tokens)
# for heading_parser, value_parser in self.parsers:
# results = list(heading_parser.parse(cell.tagged_tokens))
# if results:
# log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.to_primitive() for c in results]))
# # Results from every parser are stored as header compounds
# header_compounds[i].extend(results)
# if results and value_parser and i not in value_parsers:
# if isinstance(value_parser, CompoundCellParser):
# # Only take the first compound col
# if seen_compound_col:
# continue
# seen_compound_col = True
# value_parsers[i] = value_parser
# break # Stop after first heading parser matches
# # Referenced footnote records are also stored
# for footnote in self.footnotes:
# # print('%s - %s - %s' % (footnote.id, cell.references, footnote.id in cell.references))
# if footnote.id in cell.references:
# log.debug('Adding footnote %s to column %s: %s' % (footnote.id, i, [c.to_primitive() for c in footnote.records]))
# # print('Footnote records: %s' % [c.to_primitive() for c in footnote.records])
# header_compounds[i].extend(footnote.records)
# If no parsers, skip processing table
if value_parsers:
# If no CompoundCellParser() in value_parsers and value_parsers[0] == [] then set CompoundCellParser()
if not seen_compound_col and 0 not in value_parsers:
log.debug('No compound column found in table, assuming first column')
value_parsers[0] = CompoundCellParser()
for row in self.rows:
row_compound = Compound()
# Keep cell records that are contextual to merge at the end
contextual_cell_compounds = []
for i, cell in enumerate(row):
log.debug(cell.tagged_tokens)
if i in value_parsers:
results = list(value_parsers[i].parse(cell.tagged_tokens))
if results:
log.debug('Cell column %s: Match %s: %s' % (i, value_parsers[i].__class__.__name__, [c.serialize() for c in results]))
# For each result, merge in values from elsewhere
for result in results:
# Merge each header_compounds[i]
for header_compound in header_compounds[i]:
if header_compound.is_contextual:
result.merge_contextual(header_compound)
# Merge footnote compounds
for footnote in self.footnotes:
if footnote.id in cell.references:
for footnote_compound in footnote.records:
result.merge_contextual(footnote_compound)
if result.is_contextual:
# Don't merge cell as a value compound if there are no values
contextual_cell_compounds.append(result)
else:
row_compound.merge(result)
# Merge contextual information from cells
for contextual_cell_compound in contextual_cell_compounds:
row_compound.merge_contextual(contextual_cell_compound)
# If no compound name/label, try take from previous row
if not row_compound.names and not row_compound.labels and table_records:
prev = table_records[-1]
row_compound.names = prev.names
row_compound.labels = prev.labels
# Merge contextual information from caption into the full row
for caption_compound in caption_records:
if caption_compound.is_contextual:
row_compound.merge_contextual(caption_compound)
# And also merge from any footnotes that are referenced from the caption
for footnote in self.footnotes:
if footnote.id in self.caption.references:
# print('Footnote records: %s' % [c.to_primitive() for c in footnote.records])
for fn_compound in footnote.records:
row_compound.merge_contextual(fn_compound)
log.debug(row_compound.serialize())
if row_compound.serialize():
table_records.append(row_compound)
# TODO: If no rows have name or label, see if one is in the caption
# Include non-contextual caption records in the final output
caption_records = [c for c in caption_records if not c.is_contextual]
table_records += caption_records
return table_records
# TODO: extend abbreviations property to include footnotes
# TODO: Resolve footnote records into headers
class Cell(Sentence):
word_tokenizer = FineWordTokenizer()
# pos_tagger = NoneTagger()
ner_tagger = NoneTagger()
@memoized_property
def abbreviation_definitions(self):
"""Empty list. Abbreviation detection is disabled within table cells."""
return []
@property
def records(self):
"""Empty list. Individual cells don't provide records, this is handled by the parent Table."""
return []
|
mcs07/ChemDataExtractor
|
chemdataextractor/doc/table.py
|
Python
|
mit
| 13,370
| 0.003141
|
from bongo.apps.bongo import models
from rest_framework import serializers
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = models.Event
fields = ('id', )
|
BowdoinOrient/bongo
|
bongo/apps/api/serializers/event.py
|
Python
|
mit
| 200
| 0
|
from __future__ import print_function
import numpy as np
import datetime
import csv
import pickle
import sys
species_map = {'CULEX RESTUANS' : "100000",
'CULEX TERRITANS' : "010000",
'CULEX PIPIENS' : "001000",
'CULEX PIPIENS/RESTUANS' : "101000",
'CULEX ERRATICUS' : "000100",
'CULEX SALINARIUS': "000010",
'CULEX TARSALIS' : "000001",
'UNSPECIFIED CULEX': "001000"} # Treating unspecified as PIPIENS (http://www.ajtmh.org/content/80/2/268.full)
def date(text):
return datetime.datetime.strptime(text, "%Y-%m-%d").date()
def precip(text):
TRACE = 1e-3
text = text.strip()
if text == "M":
return None
if text == "-":
return None
if text == "T":
return TRACE
return float(text)
def impute_missing_weather_station_values(weather):
# Stupid simple
for k, v in weather.items():
if v[0] is None:
v[0] = v[1]
elif v[1] is None:
v[1] = v[0]
for k1 in v[0]:
if v[0][k1] is None:
v[0][k1] = v[1][k1]
for k1 in v[1]:
if v[1][k1] is None:
v[1][k1] = v[0][k1]
def load_weather(weatherfile):
weather = {}
for line in csv.DictReader(open(weatherfile)):
for name, converter in {"Date" : date,
"Tmax" : float,"Tmin" : float,"Tavg" : float,
"DewPoint" : float, "WetBulb" : float,
"PrecipTotal" : precip,"Sunrise" : precip,"Sunset" : precip,
"Depart" : float, "Heat" : precip,"Cool" : precip,
"ResultSpeed" : float,"ResultDir" : float,"AvgSpeed" : float,
"StnPressure" : float, "SeaLevel" : float}.items():
x = line[name].strip()
line[name] = converter(x) if (x != "M") else None
station = int(line["Station"]) - 1
assert station in [0,1]
dt = line["Date"]
if dt not in weather:
weather[dt] = [None, None]
assert weather[dt][station] is None, "duplicate weather reading {0}:{1}".format(dt, station)
weather[dt][station] = line
impute_missing_weather_station_values(weather)
return weather
def load_testing(testfile):
training = []
for line in csv.DictReader(open(testfile)):
for name, converter in {"Date" : date,
"Latitude" : float, "Longitude" : float}.items():
line[name] = converter(line[name])
training.append(line)
return training
def closest_station(lat, longi):
# Chicago is small enough that we can treat coordinates as rectangular.
stations = np.array([[41.995, -87.933],
[41.786, -87.752]])
loc = np.array([lat, longi])
deltas = stations - loc[None, :]
dist2 = (deltas**2).sum(1)
return np.argmin(dist2)
def normalize(X, mean=None, std=None):
count = X.shape[1]
if mean is None:
mean = np.nanmean(X, axis=0)
for i in range(count):
X[np.isnan(X[:,i]), i] = mean[i]
if std is None:
std = np.std(X, axis=0)
for i in range(count):
X[:,i] = (X[:,i] - mean[i]) / std[i]
return mean, std
def scaled_count(record):
SCALE = 9.0
if "NumMosquitos" not in record:
# This is test data
return 1
return int(np.ceil(record["NumMosquitos"] / SCALE))
def assemble_X(base, weather):
X = []
for b in base:
date = b["Date"]
lat, longi = b["Latitude"], b["Longitude"]
case = [date.year, date.month, date.day, date.weekday(), lat, longi]
# Look at a selection of past weather values
for days_ago in [0,1,3,5,8,12]:
day = date - datetime.timedelta(days=days_ago)
for obs in ["Tmax","Tmin","Tavg","DewPoint","WetBulb","PrecipTotal","Depart","Sunrise","Sunset","Cool","ResultSpeed","ResultDir"]:
station = closest_station(lat, longi)
case.append(weather[day][station][obs])
# Specify which mosquitos are present
species_vector = [float(x) for x in species_map[b["Species"]]]
case.extend(species_vector)
# Weight each observation by the number of mosquitos seen. Test data
# Doesn't have this column, so in that case use 1. This accidentally
# Takes into account multiple entries that result from >50 mosquitos
# on one day.
for repeat in range(scaled_count(b)):
X.append(case)
X = np.asarray(X, dtype=np.float32)
return X
class AdjustVariable(object):
def __init__(self, variable, target, half_life=20):
self.variable = variable
self.target = target
self.half_life = half_life
def __call__(self, nn, train_history):
delta = self.variable.get_value() - self.target
delta /= 2**(1.0/self.half_life)
self.variable.set_value(np.float32(self.target + delta))
def submit(net, mean, std, testfile, weatherfile):
weather = load_weather(weatherfile)
testing = load_testing(testfile)
X = assemble_X(testing, weather)
normalize(X, mean, std)
predictions = net.predict_proba(X)[:,0]
out = csv.writer(open("submissionlasagna.tmp", "w"))
out.writerow(["Id","WnvPresent"])
for row, p in zip(testing, predictions):
out.writerow([row["Id"], p])
if __name__ == "__main__":
if len(sys.argv) == 3:
fileObject = open("modellasagne.dat",'r')
dict = pickle.load(fileObject)
fileObject.close()
submit(dict['net'], dict['mean'], dict['std'], sys.argv[1], sys.argv[2])
else:
print("The script needs 2 arguments : \n1: Test file \n2: Weather csv file \n"
"Example: python predict.py ./input/test.csv ./input/weather.csv")
|
nhlx5haze/Kaggle_WestNileVirus
|
src/predict.py
|
Python
|
bsd-3-clause
| 5,983
| 0.01755
|
# -*- coding: utf-8 -*-
"""Startup utilities"""
# pylint:skip-file
import os
import sys
from functools import partial
import paste.script.command
import werkzeug.script
etc = partial(os.path.join, 'parts', 'etc')
DEPLOY_INI = etc('deploy.ini')
DEPLOY_CFG = etc('deploy.cfg')
DEBUG_INI = etc('debug.ini')
DEBUG_CFG = etc('debug.cfg')
_buildout_path = __file__
for i in range(2 + __name__.count('.')):
_buildout_path = os.path.dirname(_buildout_path)
abspath = partial(os.path.join, _buildout_path)
del _buildout_path
# bin/paster serve parts/etc/deploy.ini
def make_app(global_conf={}, config=DEPLOY_CFG, debug=False):
from presence_analyzer import app
app.config.from_pyfile(abspath(config))
app.debug = debug
return app
# bin/paster serve parts/etc/debug.ini
def make_debug(global_conf={}, **conf):
from werkzeug.debug import DebuggedApplication
app = make_app(global_conf, config=DEBUG_CFG, debug=True)
return DebuggedApplication(app, evalex=True)
# bin/flask-ctl shell
def make_shell():
"""
Interactive Flask Shell.
"""
from flask import request
app = make_app()
http = app.test_client()
reqctx = app.test_request_context
return locals()
def _serve(action, debug=False, dry_run=False):
"""
Build paster command from 'action' and 'debug' flag.
"""
if debug:
config = DEBUG_INI
else:
config = DEPLOY_INI
argv = ['bin/paster', 'serve', config]
if action in ('start', 'restart'):
argv += [action, '--daemon']
elif action in ('', 'fg', 'foreground'):
argv += ['--reload']
else:
argv += [action]
# Print the 'paster' command
print ' '.join(argv)
if dry_run:
return
# Configure logging and lock file
if action in ('start', 'stop', 'restart', 'status'):
argv += [
'--log-file', abspath('var', 'log', 'paster.log'),
'--pid-file', abspath('var', 'log', '.paster.pid'),
]
sys.argv = argv[:2] + [abspath(config)] + argv[3:]
# Run the 'paster' command
paste.script.command.run()
# bin/flask-ctl ...
def run():
action_shell = werkzeug.script.make_shell(make_shell, make_shell.__doc__)
# bin/flask-ctl serve [fg|start|stop|restart|status]
def action_serve(action=('a', 'start'), dry_run=False):
"""Serve the application.
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
Options:
- 'action' is one of [fg|start|stop|restart|status]
- '--dry-run' print the paster command and exit
"""
_serve(action, debug=False, dry_run=dry_run)
# bin/flask-ctl debug [fg|start|stop|restart|status]
def action_debug(action=('a', 'start'), dry_run=False):
"""
Serve the debugging application.
"""
_serve(action, debug=True, dry_run=dry_run)
# bin/flask-ctl status
def action_status(dry_run=False):
"""
Status of the application.
"""
_serve('status', dry_run=dry_run)
# bin/flask-ctl stop
def action_stop(dry_run=False):
"""
Stop the application.
"""
_serve('stop', dry_run=dry_run)
werkzeug.script.run()
def download_xml():
"""
Download xml files from stx website.
"""
import urllib
url = 'http://sargo.bolt.stxnext.pl/users.xml'
urllib.urlretrieve(url, 'runtime/data/users.xml')
|
stxnext-kindergarten/presence-analyzer-kjagodzinski
|
src/presence_analyzer/script.py
|
Python
|
mit
| 3,487
| 0
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import glob
import os
import pexpect
import re
import tempfile
from trove.guestagent.strategies.restore import base
from trove.openstack.common import log as logging
from trove.common import exception
from trove.common import utils
import trove.guestagent.datastore.mysql.service as dbaas
from trove.common.i18n import _ # noqa
LOG = logging.getLogger(__name__)
class MySQLRestoreMixin(object):
"""Common utils for restoring MySQL databases."""
RESET_ROOT_RETRY_TIMEOUT = 100
RESET_ROOT_SLEEP_INTERVAL = 10
# Reset the root password in a single transaction with 'FLUSH PRIVILEGES'
# to ensure we never leave database wide open without 'grant tables'.
RESET_ROOT_MYSQL_COMMANDS = ("START TRANSACTION;",
"UPDATE `mysql`.`user` SET"
" `password`=PASSWORD('')"
" WHERE `user`='root';",
"FLUSH PRIVILEGES;",
"COMMIT;")
# This is a suffix MySQL appends to the file name given in
# the '--log-error' startup parameter.
_ERROR_LOG_SUFFIX = '.err'
_ERROR_MESSAGE_PATTERN = re.compile("^ERROR:\s+.+$")
def mysql_is_running(self):
try:
utils.execute_with_timeout("/usr/bin/mysqladmin", "ping")
LOG.debug("MySQL is up and running.")
return True
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return False
def mysql_is_not_running(self):
try:
utils.execute_with_timeout("/usr/bin/pgrep", "mysqld")
LOG.info("MySQL is still running.")
return False
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return True
def poll_until_then_raise(self, event, exc):
try:
utils.poll_until(event,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
except exception.PollTimeOut:
raise exc
def _start_mysqld_safe_with_init_file(self, init_file, err_log_file):
child = pexpect.spawn("sudo mysqld_safe"
" --skip-grant-tables"
" --skip-networking"
" --init-file='%s'"
" --log-error='%s'" %
(init_file.name, err_log_file.name)
)
try:
i = child.expect(['Starting mysqld daemon'])
if i == 0:
LOG.info(_("Starting MySQL"))
except pexpect.TIMEOUT:
LOG.exception(_("Got a timeout launching mysqld_safe"))
finally:
# There is a race condition here where we kill mysqld before
# the init file been executed. We need to ensure mysqld is up.
#
# mysqld_safe will start even if init-file statement(s) fail.
# We therefore also check for errors in the log file.
self.poll_until_then_raise(
self.mysql_is_running,
base.RestoreError("Reset root password failed:"
" mysqld did not start!"))
first_err_message = self._find_first_error_message(err_log_file)
if first_err_message:
raise base.RestoreError("Reset root password failed: %s"
% first_err_message)
LOG.info(_("Root password reset successfully."))
LOG.debug("Cleaning up the temp mysqld process.")
utils.execute_with_timeout("mysqladmin", "-uroot", "shutdown")
LOG.debug("Polling for shutdown to complete.")
try:
utils.poll_until(self.mysql_is_not_running,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
LOG.debug("Database successfully shutdown")
except exception.PollTimeOut:
LOG.debug("Timeout shutting down database "
"- performing killall on mysqld_safe.")
utils.execute_with_timeout("killall", "mysqld_safe",
root_helper="sudo",
run_as_root=True)
self.poll_until_then_raise(
self.mysql_is_not_running,
base.RestoreError("Reset root password failed: "
"mysqld did not stop!"))
def reset_root_password(self):
with tempfile.NamedTemporaryFile() as init_file:
utils.execute_with_timeout("sudo", "chmod", "a+r", init_file.name)
self._writelines_one_per_line(init_file,
self.RESET_ROOT_MYSQL_COMMANDS)
# Do not attempt to delete the file as the 'trove' user.
# The process writing into it may have assumed its ownership.
# Only owners can delete temporary
# files (restricted deletion).
err_log_file = tempfile.NamedTemporaryFile(
suffix=self._ERROR_LOG_SUFFIX,
delete=False)
try:
self._start_mysqld_safe_with_init_file(init_file, err_log_file)
finally:
err_log_file.close()
MySQLRestoreMixin._delete_file(err_log_file.name)
def _writelines_one_per_line(self, fp, lines):
fp.write(os.linesep.join(lines))
fp.flush()
def _find_first_error_message(self, fp):
if MySQLRestoreMixin._is_non_zero_file(fp):
return MySQLRestoreMixin._find_first_pattern_match(
fp,
self._ERROR_MESSAGE_PATTERN
)
return None
@classmethod
def _delete_file(self, file_path):
"""Force-remove a given file as root.
Do not raise an exception on failure.
"""
if os.path.isfile(file_path):
try:
utils.execute_with_timeout("rm", "-f", file_path,
run_as_root=True,
root_helper="sudo")
except Exception:
LOG.exception("Could not remove file: '%s'" % file_path)
@classmethod
def _is_non_zero_file(self, fp):
file_path = fp.name
return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0)
@classmethod
def _find_first_pattern_match(self, fp, pattern):
for line in fp:
if pattern.match(line):
return line
return None
class MySQLDump(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for MySQLDump."""
__strategy_name__ = 'mysqldump'
base_restore_cmd = 'sudo mysql'
class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for InnoBackupEx."""
__strategy_name__ = 'innobackupex'
base_restore_cmd = 'sudo xbstream -x -C %(restore_location)s'
base_prepare_cmd = ('sudo innobackupex --apply-log %(restore_location)s'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup xtrabackup 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBackupEx, self).__init__(*args, **kwargs)
self.prepare_cmd = self.base_prepare_cmd % kwargs
self.prep_retcode = None
def pre_restore(self):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.stop_db()
LOG.info(_("Cleaning out restore location: %s."),
self.restore_location)
utils.execute_with_timeout("chmod", "-R", "0777",
self.restore_location,
root_helper="sudo",
run_as_root=True)
utils.clean_out(self.restore_location)
def _run_prepare(self):
LOG.debug("Running innobackupex prepare: %s.", self.prepare_cmd)
self.prep_retcode = utils.execute(self.prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def post_restore(self):
self._run_prepare()
utils.execute_with_timeout("chown", "-R", "-f", "mysql",
self.restore_location,
root_helper="sudo",
run_as_root=True)
self._delete_old_binlogs()
self.reset_root_password()
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.start_mysql()
def _delete_old_binlogs(self):
files = glob.glob(os.path.join(self.restore_location, "ib_logfile*"))
for f in files:
os.unlink(f)
class InnoBackupExIncremental(InnoBackupEx):
__strategy_name__ = 'innobackupexincremental'
incremental_prep = ('sudo innobackupex'
' --apply-log'
' --redo-only'
' %(restore_location)s'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup xtrabackup'
' %(incremental_args)s'
' 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
self.restore_location = kwargs.get('restore_location')
self.content_length = 0
def _incremental_restore_cmd(self, incremental_dir):
"""Return a command for a restore with a incremental location."""
args = {'restore_location': incremental_dir}
return (self.decrypt_cmd +
self.unzip_cmd +
(self.base_restore_cmd % args))
def _incremental_prepare_cmd(self, incremental_dir):
if incremental_dir is not None:
incremental_arg = '--incremental-dir=%s' % incremental_dir
else:
incremental_arg = ''
args = {
'restore_location': self.restore_location,
'incremental_args': incremental_arg,
}
return self.incremental_prep % args
def _incremental_prepare(self, incremental_dir):
prepare_cmd = self._incremental_prepare_cmd(incremental_dir)
LOG.debug("Running innobackupex prepare: %s.", prepare_cmd)
utils.execute(prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def _incremental_restore(self, location, checksum):
"""Recursively apply backups from all parents.
If we are the parent then we restore to the restore_location and
we apply the logs to the restore_location only.
Otherwise if we are an incremental we restore to a subfolder to
prevent stomping on the full restore data. Then we run apply log
with the '--incremental-dir' flag
"""
metadata = self.storage.load_metadata(location, checksum)
incremental_dir = None
if 'parent_location' in metadata:
LOG.info(_("Restoring parent: %(parent_location)s"
" checksum: %(parent_checksum)s.") % metadata)
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially
self._incremental_restore(parent_location, parent_checksum)
# for *this* backup set the incremental_dir
# just use the checksum for the incremental path as it is
# sufficiently unique /var/lib/mysql/<checksum>
incremental_dir = os.path.join(self.restore_location, checksum)
utils.execute("mkdir", "-p", incremental_dir,
root_helper="sudo",
run_as_root=True)
command = self._incremental_restore_cmd(incremental_dir)
else:
# The parent (full backup) use the same command from InnobackupEx
# super class and do not set an incremental_dir.
command = self.restore_cmd
self.content_length += self._unpack(location, checksum, command)
self._incremental_prepare(incremental_dir)
# Delete unpacked incremental backup metadata
if incremental_dir:
utils.execute("rm", "-fr", incremental_dir, root_helper="sudo",
run_as_root=True)
def _run_restore(self):
"""Run incremental restore.
First grab all parents and prepare them with '--redo-only'. After
all backups are restored the super class InnoBackupEx post_restore
method is called to do the final prepare with '--apply-log'
"""
self._incremental_restore(self.location, self.checksum)
return self.content_length
|
CMSS-BCRDB/RDS
|
trove/guestagent/strategies/restore/mysql_impl.py
|
Python
|
apache-2.0
| 13,702
| 0.000146
|
__author__ = 'dracks'
import unittest
import models
from Api.Manager import DataManager
import main
import dateutil.parser
import datetime
import models_tests
import mocks
class DayWeekTests(unittest.TestCase):
def test_sunday(self):
day = dateutil.parser.parse("2015-09-27")
self.assertEqual(main.day_week(day), 'sunday')
def test_monday(self):
day = dateutil.parser.parse("2015-09-21")
self.assertEqual(main.day_week(day), 'monday')
class WeekStartDateTests(unittest.TestCase):
def test_monday(self):
day = "2015-09-28"
monday = dateutil.parser.parse(day)
self.assertEqual(monday, main.week_start_date(monday))
def test_sunday(self):
monday = dateutil.parser.parse("2015-09-21")
day = dateutil.parser.parse("2015-09-27")
self.assertEqual(monday, main.week_start_date(day))
class GetFilteredTimesTests(unittest.TestCase):
def test_server_response_copy(self):
activity = models.Activity()
activity.sort = "100"
activity.times = "125,167,140,140,128,128,141,128,128,143,127,129,129,129,142,116,127,269"
session = models_tests.generate_session(activities=[activity])
deleted, words_minute = main.get_filtered_times(session)
self.assertEqual(deleted, 1)
self.assertEqual(int(words_minute*1000), 449933)
def test_activities_sort(self):
a = models.Activity()
a.sort = "20"
a.times = "1,2,3,2,1,2,3"
a2 = models.Activity()
a2.sort = "100"
a2.times = "1,1,1,1,2,2,2,2,8,10"
deleted, words_minute = main.get_filtered_times(models_tests.generate_session(activities=[a, a2]))
self.assertEqual(deleted, 2)
self.assertEqual(words_minute, 60*1000/1.5)
class GetPercentileTest(unittest.TestCase):
def setUp(self):
self.percentiles = []
this = self
self.get_filtered_times_value = (0,0)
self.get_filtered_times_last_session = None
self.mock_get_filtered_times = main.get_filtered_times
def mock_retrieve_all(model):
return this.percentiles
def mock_get_filtered_times(session):
self.get_filtered_times_last_session = session
return self.get_filtered_times_value
main.get_filtered_times = mock_get_filtered_times
DataManager.sharedManager().retrieve_all = mock_retrieve_all
self.mock_older = models.Older()
self.mock_older.group = models.Group()
mock_course = models.Course()
mock_course.id = 2
self.mock_older.group.course = mock_course
self.model_session1 = models.ModelSession()
self.model_session1.type_percentile = 1
self.model_session2 = models.ModelSession()
self.model_session2.type_percentile = 4
def tearDown(self):
main.get_filtered_times = self.mock_get_filtered_times
def test_get_not_valid(self):
session1 = models.Session()
session1.completed_time = dateutil.parser.parse('2015-08-01')
session1.model_based = self.model_session1
self.percentiles = []
self.assertIsNone(main.get_percentile(self.mock_older, session1))
def test_get_multiple_percentiles(self):
date = dateutil.parser.parse('2015-08-01')
trimester = main.get_trimester(date)
session = models.Session()
session.completed_time = date
session.model_based = self.model_session1
list_percentiles = []
for i in range(0, 10):
list_percentiles.append(models_tests.generate_percentile(
pk=i, seed=5, type=i * 2, course=i * 3, trimester=trimester
))
self.percentiles = list_percentiles
activity1 = models.Activity()
activity1.sort = 10
activity1.words_minute = 10
activity2 = models.Activity()
activity2.sort = 20
activity2.words_minute = 19
session.list_activities = [
activity2, activity1
]
self.assertIsNone(main.get_percentile(self.mock_older, session))
session.model_based = self.model_session2
self.assertIsNone(main.get_percentile(self.mock_older, session))
self.get_filtered_times_value = (0,19)
self.mock_older.group.course.id = 6
r = main.get_percentile(self.mock_older, session)
self.assertEqual(self.get_filtered_times_last_session, session)
self.assertIsNotNone(r)
self.assertEqual(r, 20)
class GetAverageDataTest(unittest.TestCase):
def setUp(self):
self.get_percentile = main.get_percentile
def tearDown(self):
main.get_percentile = self.get_percentile
def test_no_data(self):
p, m = main.get_average_data(None, [], [])
self.assertIsNone(p)
self.assertIsNone(m)
def test_with_data(self):
self.tmp = 0
def mock_get_percentile(older, session):
self.tmp += 1
return self.tmp
main.get_percentile = mock_get_percentile
ms1 = models_tests.generate_model_session(1)
ms2 = models_tests.generate_model_session(2)
ms3 = models_tests.generate_model_session(3)
sessions = [
models_tests.generate_session(model=ms1),
models_tests.generate_session(model=ms1),
models_tests.generate_session(model=ms1)
]
p, m = main.get_average_data(None, sessions, [ms2, ms3])
self.assertIsNone(p)
self.assertIsNone(m)
sessions = [
models_tests.generate_session(model=ms1, motivation=1),
models_tests.generate_session(model=ms2),
models_tests.generate_session(model=ms3)
]
p, m = main.get_average_data(None, sessions, [ms1])
self.assertEqual(p, 1)
self.assertEqual(m, 1)
sessions = [
models_tests.generate_session(model=ms1, motivation=1),
models_tests.generate_session(model=ms2, motivation=5),
models_tests.generate_session(model=ms3)
]
p, m = main.get_average_data(None, sessions, [ms1, ms2])
self.assertEqual(p, 2.5)
self.assertEqual(m, 3)
class JumpTests(unittest.TestCase):
def setUp(self):
def generate_sessions():
list_mock_sessions = []
for level in range(1, 5):
list_mock_sessions.extend([
models_tests.generate_block_session(level * 5, level=level, session=models.ModelSession(),
order=10),
models_tests.generate_block_session(level * 5 + 1, level=level, session=models.ModelSession(),
order=20),
models_tests.generate_block_session(level * 5 + 2, level=level, session=models.ModelSession(),
order=30),
])
return list_mock_sessions
self.configuration = models.OlderConfig()
self.b1 = models_tests.generate_block(order=10, sessions=generate_sessions())
self.b2 = models_tests.generate_block(order=20, sessions=generate_sessions())
self.b3 = models_tests.generate_block(order=30, sessions=generate_sessions())
self.configuration.pattern = models_tests.generate_pattern(blocks=[self.b1, self.b3, self.b2])
def test_not_repeat(self):
self.configuration.block = self.b1
bj = models.BlockJumpDefault()
bj.repeatBlock = False
bj.nextLevel = 2
main.jump(self.configuration, bj)
self.assertEqual(self.configuration.block, self.b2)
self.assertEqual(self.configuration.level, 2)
sessions = filter(lambda e: e.level == 2, self.b2.sessions)
self.assertEqual(self.configuration.session, sessions[0])
def test_repeat(self):
bj = models.BlockJumpDefault()
bj.repeatBlock = True
bj.nextLevel = 3
self.configuration.block = self.b1
main.jump(self.configuration, bj)
self.assertEqual(self.configuration.block, self.b1)
self.assertEqual(self.configuration.level, 3)
sessions = filter(lambda e: e.level == 3, self.b1.sessions)
self.assertEqual(self.configuration.session, sessions[0])
def test_save_old(self):
self.configuration.block = self.b1
self.configuration.level = 1
bj = models.BlockJumpCondition()
bj.repeatBlock = False
bj.nextLevel = 2
main.jump(self.configuration, bj)
self.assertEqual(self.configuration.lastBlock, self.b1)
self.assertEqual(self.configuration.lastLevel, 1)
class UpdateConfigTests(unittest.TestCase):
def setUp(self):
self.last_jump_configuration = None
self.last_jump_condition = None
self.mock_avg_percentile = None
self.mock_avg_motivation = None
def mock_jump(configuration, condition):
self.last_jump_configuration = configuration
self.last_jump_condition = condition
def mock_get_average_data(older, s1, s2):
return self.mock_avg_percentile, self.mock_avg_motivation
self.get_average_data = main.get_average_data
main.get_average_data = mock_get_average_data
self.jump = main.jump
main.jump = mock_jump
self.warnings = models.Warnings
models.Warnings = mocks.MockWarning
self.bjc = models_tests.generate_block_jump_condition(level="1")
bjc2 = models_tests.generate_block_jump_condition(level="1")
self.bjd = models_tests.generate_block_jump_default(level="2")
bj = models_tests.generate_block_jump(conditions=[self.bjc, bjc2], defaults=[self.bjd])
self.configuration = models.OlderConfig()
self.configuration.warnings = []
self.configuration.level = "1"
self.list_sessions = [
models_tests.generate_block_session(order=10, level="1"),
models_tests.generate_block_session(order=20, level="1"),
models_tests.generate_block_session(order=15, level="1"),
models_tests.generate_block_session(order=10, level="2"),
models_tests.generate_block_session(order=20, level="3"),
models_tests.generate_block_session(order=10, level="3")
]
self.block = models_tests.generate_block(block_jump=bj, sessions=self.list_sessions)
self.configuration.block = self.block
def tearDown(self):
main.get_average_data = self.get_average_data
main.jump = self.jump
def test_next_session(self):
self.configuration.session = self.list_sessions[2]
self.configuration.level = "1"
main.update_config(self.configuration, [], [])
self.assertEqual(self.configuration.session, self.list_sessions[1])
def test_with_found_average(self):
self.last_jump_configuration = None
self.last_jump_condition = None
self.mock_avg_percentile = 0
self.configuration.session = self.list_sessions[1]
self.configuration.level = 1
main.update_config(self.configuration, [], [])
self.assertEqual(self.last_jump_configuration, self.configuration)
self.assertEqual(self.last_jump_condition, self.bjc)
def test_with_not_found_average(self):
self.last_jump_configuration = None
self.last_jump_condition = None
self.mock_avg_percentile = None
self.configuration.session = self.list_sessions[3]
self.configuration.level = 2
self.configuration.block = self.block
main.update_config(self.configuration, [], [])
self.assertEqual(self.last_jump_configuration, self.configuration)
self.assertEqual(self.last_jump_condition, self.bjd)
def test_with_not_found_anything(self):
self.last_jump_configuration = None
self.last_jump_condition = None
self.mock_avg_percentile = None
self.configuration.session = self.list_sessions[4]
self.configuration.level = 3
self.configuration.block = self.block
main.update_config(self.configuration, [], [])
self.assertEqual(self.last_jump_configuration, None)
self.assertEqual(self.last_jump_condition, None)
self.assertEqual(self.configuration.session, self.list_sessions[5])
def test_with_not_block_jump(self):
mock_warning = mocks.MockWarning()
mocks.MockWarning.retrieve_value = mock_warning
self.configuration.block = models_tests.generate_block(sessions=self.list_sessions)
self.configuration.session = self.list_sessions[1]
main.update_config(self.configuration, [], [])
self.assertEqual(self.configuration.session, self.list_sessions[0])
self.assertEqual(len(self.configuration.warnings), 1)
self.assertEqual(self.configuration.warnings[0], mock_warning)
class PautaTests(unittest.TestCase):
def test(self):
mock_session = mocks.MockSession()
configuration = mocks.MockOlderConfig()
configuration.session = models_tests.generate_block_session(session=mock_session)
session = main.pauta(configuration)
self.assertEqual(session.student, configuration.older)
self.assertEqual(session.publish_date.date(), datetime.date.today())
self.assertEqual(session.model_based, mock_session)
class GetCountersTests(unittest.TestCase):
def setUp(self):
self.days = map(lambda e: dateutil.parser.parse("2015/09/" + str(e)), range(21, 28))
self.model_sessions = [
models_tests.generate_model_session(),
models_tests.generate_model_session(),
models_tests.generate_model_session()
]
def test(self):
list = [
mocks.MockSession(self.model_sessions[0], publish_date=None, completed_time=self.days[0]),
mocks.MockSession(self.model_sessions[0], publish_date=self.days[1], completed_time=self.days[2]),
mocks.MockSession(models_tests.generate_model_session(), publish_date=self.days[0],
completed_time=self.days[0]),
mocks.MockSession(self.model_sessions[0], publish_date=self.days[2], completed_time=self.days[2]),
mocks.MockSession(models_tests.generate_model_session(), publish_date=self.days[2],
completed_time=self.days[2]),
mocks.MockSession(self.model_sessions[1], publish_date=self.days[3], completed_time=self.days[3]),
mocks.MockSession(models_tests.generate_model_session(), publish_date=self.days[4], completed_time=None),
mocks.MockSession(self.model_sessions[2], publish_date=self.days[5], completed_time=None),
]
not_done, not_done_pattern, s_week = main.get_counters(list, self.model_sessions, self.days[2])
self.assertEqual(not_done, 2)
self.assertEqual(not_done_pattern, 1)
self.assertEqual(s_week, 3)
class GenerateLists(unittest.TestCase):
def setUp(self):
self.older_config_get_list_block_session = mocks.MockOlderConfig.get_list_block_session
self.older_config_get_current_block_session = mocks.MockOlderConfig.get_current_block_session
self.older_config_get_list_block_session_value = []
self.older_config_get_current_block_Session_value = []
this = self
def mock_get_list_block_session(self):
return this.older_config_get_list_block_session_value
def mock_get_current_block_session(self):
return this.older_config_get_current_block_Session_value
mocks.MockOlderConfig.get_list_block_session = mock_get_list_block_session
mocks.MockOlderConfig.get_current_block_session = mock_get_current_block_session
def tearDown(self):
mocks.MockOlderConfig.get_list_block_session = self.older_config_get_list_block_session
mocks.MockOlderConfig.get_current_block_session = self.older_config_get_current_block_session
def test_global(self):
models_session = [
models_tests.generate_model_session(),
models_tests.generate_model_session(),
models_tests.generate_model_session()
]
self.older_config_get_list_block_session_value = [
models_tests.generate_block_session(session=models_session[0], use_data=False),
models_tests.generate_block_session(session=models_session[1], use_data=False),
models_tests.generate_block_session(session=models_session[2], use_data=True)
]
self.older_config_get_current_block_Session_value = self.older_config_get_list_block_session_value
configuration = mocks.MockOlderConfig()
sessions = [
mocks.MockSession(model=models_session[0], status_begin=5,
completed_time=dateutil.parser.parse("2015-09-21")),
mocks.MockSession(model=models_session[1], status_begin=5,
completed_time=dateutil.parser.parse("2015-09-21")),
mocks.MockSession(model=models_tests.generate_model_session(),
status_begin=5, completed_time=dateutil.parser.parse("2015-09-21")),
mocks.MockSession(model=models_session[2], status_begin=5, completed_time=None),
mocks.MockSession(model=models_tests.generate_model_session(), completed_time=None),
]
list_sessions, sessions_made, sessions_use_data = main.generate_lists(configuration,
sessions)
self.assertEqual(list_sessions, models_session)
self.assertEqual(sessions_made, [sessions[0], sessions[1]])
self.assertEqual(sessions_use_data, [models_session[2]])
class CheckWarningsTests(unittest.TestCase):
def setUp(self):
mocks.MockWarning.load()
self.append_warning = main.append_warning
self.append_warning_code_list = []
self.get_filtered_times = main.get_filtered_times
self.get_filtered_times_last_session = None
self.get_filtered_times_value = (0, 0)
self.get_percentile = main.get_percentile
self.get_percentile_value = None
def mock_append_warning(configuration, code):
self.assertTrue(code in mocks.MockWarning.code_list)
self.append_warning_code_list.append(code)
def mock_get_filtered_times(session):
self.get_filtered_times_last_session = session
return self.get_filtered_times_value
def mock_get_percentile(older, session):
return self.get_percentile_value
main.append_warning = mock_append_warning
main.get_filtered_times = mock_get_filtered_times
main.get_percentile = mock_get_percentile
self.sessions = [
mocks.MockSession(status_begin="7", status_end=7, difficulty=8,
completed_time=dateutil.parser.parse("2015-09-21 20:00+02:00")),
mocks.MockSession(status_begin="7", status_end=7, difficulty=8,
completed_time=dateutil.parser.parse("2015-09-22 20:00+02:00")),
mocks.MockSession(status_begin="7", status_end=7, difficulty=8,
completed_time=dateutil.parser.parse("2015-09-23 20:00+02:00")),
mocks.MockSession(status_begin="7", status_end=7, difficulty=8,
completed_time=dateutil.parser.parse("2015-09-24 20:00+02:00")),
mocks.MockSession(status_begin="7", status_end=7, difficulty=8,
completed_time=dateutil.parser.parse("2015-09-25 20:00+02:00")),
]
self.configuration = mocks.MockOlderConfig()
self.configuration.numberSessions = 2
def tearDown(self):
main.append_warning = self.append_warning
main.get_filtered_times = self.get_filtered_times
main.get_percentile = self.get_percentile
def test_avg_low_mot(self):
# self.get_filtered_times_value = (0, 0)
self.get_percentile_value = 10
self.sessions[-1].status_begin = 0
self.sessions[-2].status_begin = 0
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "MOT-1.1")
self.sessions[-1].status_begin = 4
self.sessions[-2].status_begin = 4
self.append_warning_code_list = []
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "MOT-1.4")
self.sessions[-1].status_begin = 4
self.sessions[-2].status_begin = 5
self.append_warning_code_list = []
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "MOT-1.3")
self.sessions[-1].status_begin = 6
self.sessions[-2].status_begin = 6
self.sessions[-3].status_begin = 5
self.sessions[-4].status_begin = 6
self.append_warning_code_list = []
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "MOT-1.2")
def test_mot_difference(self):
# self.get_filtered_times_value = (0, 0)
self.sessions[-1].status_end = int(self.sessions[-1].status_begin) - 1
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "MOT-3.3")
self.append_warning_code_list = []
self.sessions[-1].status_end = int(self.sessions[-1].status_begin) - 4
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "MOT-3.2")
self.append_warning_code_list = []
self.sessions[-1].status_end = int(self.sessions[-1].status_begin) - 4
self.sessions[-2].status_end = int(self.sessions[-2].status_begin) - 4
self.sessions[-3].status_end = int(self.sessions[-3].status_begin) - 5
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "MOT-3.1")
def test_filtered_times(self):
self.get_filtered_times_value = (1, 0)
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "CL-1.3")
self.append_warning_code_list = []
self.get_filtered_times_value = (3, 0)
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "CL-1.2")
self.append_warning_code_list = []
self.get_filtered_times_value = (3, 0)
self.sessions[-1].status_end = int(self.sessions[-1].status_begin) - 4
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 2)
self.assertEqual(self.append_warning_code_list[1], "CL-1.1")
def test_percentile(self):
self.get_percentile_value = 3
self.sessions[-1].difficulty = 3
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "CL-2.1")
def test_time(self):
self.sessions[-1].completed_time = dateutil.parser.parse("2015-09-30 20:59:59+02:00")
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 0)
self.sessions[-1].completed_time = dateutil.parser.parse("2015-09-30 21:00:01+02:00")
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "H-1.1")
self.append_warning_code_list = []
self.sessions[-1].completed_time = dateutil.parser.parse("2015-09-30 06:59:59+02:00")
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "H-1.1")
self.append_warning_code_list = []
self.sessions[-1].completed_time = dateutil.parser.parse("2015-09-30 07:00:00+02:00")
main.check_warnings(self.configuration, [], self.sessions)
self.assertEqual(len(self.append_warning_code_list), 0)
def test_not_done(self):
sessions = []
for session in self.sessions:
session.publish_date = dateutil.parser.parse("2015-09-21")
sessions.append(session)
sessions.append(mocks.MockSession(publish_date=dateutil.parser.parse("2015-09-22")))
main.check_warnings(self.configuration, sessions, self.sessions)
self.assertEqual(len(self.append_warning_code_list), 0)
sessions.append(mocks.MockSession(publish_date=dateutil.parser.parse("2015-09-22")))
main.check_warnings(self.configuration, sessions, self.sessions)
self.assertEqual(len(self.append_warning_code_list), 1)
self.assertEqual(self.append_warning_code_list[0], "S-1.1")
class RunTests(unittest.TestCase):
def setUp(self):
self.pauta = main.pauta
self.update_config = main.update_config
self.session = models.Session
self.pattern_history = models.PatternHistory
self.get_counters = main.get_counters
self.configuration_save = models.OlderConfig.save
self.check_warnings = main.check_warnings
self.last_check_warnings_sessions = None
self.last_check_warnings_all_sessions = None
self.count = {
"pauta": 0,
"update_config": 0,
"get_counters": 0,
}
self.list_sessions = []
self.get_counters_value = (0, 0, 0)
def mock_pauta(configuration):
self.count["pauta"] += 1
return mocks.MockSession()
def mock_update_config(configuration, sessions_made, sessions_use_data):
self.count["update_config"] += 1
def mock_get_counters(sessions, list_sessions, monday):
self.count["get_counters"] += 1
return self.get_counters_value
def mock_check_warnings(configuration, all_sessions, sessions):
self.last_check_warnings_sessions = sessions
self.last_check_warnings_all_sessions = all_sessions
models.Session = mocks.MockSession
models.PatternHistory = mocks.MockPatternHistory
main.pauta = mock_pauta
main.update_config = mock_update_config
main.get_counters = mock_get_counters
main.check_warnings = mock_check_warnings
self.configuration = mocks.MockOlderConfig()
self.configuration.block = models_tests.generate_block(sessions=[])
self.configuration.older = models.Older()
self.configuration.older.id = 1
self.configuration.level = "1"
self.configuration.numberSessions = 2
self.configuration.maxSessionsWeek = 5
def tearDown(self):
main.pauta = self.pauta
main.update_config = self.update_config
models.Session = self.session
main.get_counters = self.get_counters
models.OlderConfig.save = self.configuration_save
main.check_warnings = self.check_warnings
models.PatternHistory = self.pattern_history
def test_normal_state(self):
main.run(self.configuration, "monday")
self.assertEqual(self.count['pauta'], 2)
self.assertEqual(self.count['update_config'], 2)
self.assertEqual(self.count['get_counters'], 1)
self.assertEqual(mocks.MockSession.get_args['query'], 'student=1&count=20')
def test_max_sessions_week(self):
self.get_counters_value = (0, 0, 4)
self.configuration.older.id = 3
main.run(self.configuration, "monday")
self.assertEqual(self.count['pauta'], 1)
self.assertEqual(mocks.MockSession.get_args['query'], 'student=3&count=20')
def test_max_sessions(self):
self.get_counters_value = (9, 0, 0)
main.run(self.configuration, "monday")
self.assertEqual(self.count['pauta'], 1)
self.assertEqual(mocks.MockSession.get_args['query'], 'student=1&count=20')
def test_sessions(self):
self.get_counters_value = (0, 3, 0)
main.run(self.configuration, "monday")
self.assertEqual(self.count['pauta'], 1)
self.assertEqual(mocks.MockSession.get_args['query'], 'student=1&count=20')
def test_no_max_sessions_week(self):
self.configuration.maxSessionsWeek = None
self.get_counters_value = (0, 0, 4)
main.run(self.configuration, "monday")
self.assertEqual(self.count['pauta'], 2)
self.assertEqual(self.count['update_config'], 2)
self.assertEqual(self.count['get_counters'], 1)
def test_history(self):
self.get_counters_value = (0, 0, 0)
mocks.MockPatternHistory.list_mocks = []
self.configuration.pattern = models_tests.generate_pattern()
self.configuration.block = models_tests.generate_block()
self.configuration.level = 2
self.configuration.older = models.Older()
self.configuration.warnings = [mocks.MockWarning(), mocks.MockWarning()]
main.run(self.configuration, "monday")
self.assertEqual(len(mocks.MockPatternHistory.list_mocks), 1)
historic = mocks.MockPatternHistory.list_mocks[0]
self.assertEqual(self.configuration.pattern, historic.pattern)
self.assertEqual(self.configuration.block, historic.block)
self.assertEqual(self.configuration.level, historic.level)
self.assertEqual(self.configuration.older, historic.older)
self.assertEqual(self.configuration.warnings, historic.warnings)
self.assertEqual(len(historic.sessions), 2)
class MainTest(unittest.TestCase):
def setUp(self):
self.older_config = models.OlderConfig
self.run = main.run
self.run_count = 0
self.run_configurations = []
self.run_mondais = []
def mock_run(configuration, day):
self.run_configurations.append(configuration)
self.run_mondais.append(day)
models.OlderConfig = mocks.MockOlderConfig
main.run = mock_run
monday = models_tests.generate_working_days(monday=True)
wednesday = models_tests.generate_working_days(wednesday=True)
thursday = models_tests.generate_working_days(thursday=True)
multiple_days = models_tests.generate_working_days(wednesday=True, friday=True)
mocks.MockOlderConfig.get_value = [
mocks.MockOlderConfig(working_days=monday),
mocks.MockOlderConfig(working_days=thursday),
mocks.MockOlderConfig(working_days=monday),
mocks.MockOlderConfig(working_days=wednesday),
mocks.MockOlderConfig(working_days=multiple_days)
]
def tearDown(self):
models.OlderConfig = self.older_config
main.run = self.run
def test_sunday_and_monday(self):
day = dateutil.parser.parse("2015-09-27")
main.main(day)
self.assertEqual(len(self.run_configurations), 0)
day = dateutil.parser.parse("2015-09-21")
main.main(day)
self.assertEqual(len(self.run_configurations), 2)
self.assertEqual(len(self.run_mondais), 2)
self.assertEqual(self.run_mondais[0], dateutil.parser.parse("2015-09-21"))
def test_multiple_days(self):
day = dateutil.parser.parse("2015-09-23")
main.main(day)
self.assertEqual(len(self.run_configurations), 2)
self.assertEqual(self.run_mondais[0], dateutil.parser.parse("2015-09-21"))
self.run_configurations = []
self.run_mondais = []
day = dateutil.parser.parse("2015-09-25")
main.main(day)
self.assertEqual(len(self.run_configurations), 1)
self.assertEqual(len(self.run_mondais), 1)
self.assertEqual(self.run_mondais[0], dateutil.parser.parse("2015-09-21"))
|
ecorreig/automatic_pattern
|
tests/main_tests.py
|
Python
|
gpl-2.0
| 32,788
| 0.001921
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase
|
elego/tkobr-addons
|
unported/tko_purchase_show_only_supplier_products/__init__.py
|
Python
|
agpl-3.0
| 1,094
| 0
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ActivityLike'
#db.delete_table(u'spa_activitylike')
db.create_table(u'spa_activitylike', (
(u'activity_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['spa.Activity'], unique=True, primary_key=True)),
('mix', self.gf('django.db.models.fields.related.ForeignKey')(related_name='likes', to=orm['spa.Mix'])),
))
db.send_create_signal('spa', ['ActivityLike'])
def backwards(self, orm):
# Deleting model 'ActivityLike'
db.delete_table(u'spa_activitylike')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.activity': {
'Meta': {'object_name': 'Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']", 'null': 'True', 'blank': 'True'})
},
'spa.activityfavourite': {
'Meta': {'object_name': 'ActivityFavourite', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favourites'", 'to': "orm['spa.Mix']"})
},
'spa.activitylike': {
'Meta': {'object_name': 'ActivityLike', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'likes'", 'to': "orm['spa.Mix']"})
},
'spa.activitymix': {
'Meta': {'object_name': 'ActivityMix', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.activityplay': {
'Meta': {'object_name': 'ActivityPlay', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plays'", 'to': "orm['spa.Mix']"})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'spa.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attendees'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'event_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'event_description': ('tinymce.views.HTMLField', [], {}),
'event_recurrence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Recurrence']"}),
'event_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'event_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'event_venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Venue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.genre': {
'Meta': {'object_name': 'Genre'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'spa.label': {
'Meta': {'object_name': 'Label'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa.mix': {
'Meta': {'object_name': 'Mix'},
'description': ('django.db.models.fields.TextField', [], {}),
'download_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['spa.Genre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'mix_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'stream_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '38', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"}),
'waveform_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'spa.purchaselink': {
'Meta': {'object_name': 'PurchaseLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_link'", 'to': "orm['spa.Tracklist']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'spa.recurrence': {
'Meta': {'object_name': 'Recurrence', '_ormbases': ['spa._Lookup']},
u'_lookup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Lookup']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.release': {
'Meta': {'object_name': 'Release'},
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'release_artist': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 6, 8, 0, 0)'}),
'release_description': ('django.db.models.fields.TextField', [], {}),
'release_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'release_label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Label']"}),
'release_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"})
},
'spa.releaseaudio': {
'Meta': {'object_name': 'ReleaseAudio'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_audio'", 'null': 'True', 'to': "orm['spa.Release']"})
},
'spa.tracklist': {
'Meta': {'object_name': 'Tracklist'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.SmallIntegerField', [], {}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracklist'", 'to': "orm['spa.Mix']"}),
'remixer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timeindex': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'spa.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_sharing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'activity_sharing_networks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'social'", 'max_length': '15'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers_rel_+'", 'null': 'True', 'to': "orm['spa.UserProfile']"}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'following_rel_+'", 'null': 'True', 'to': "orm['spa.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'spa.venue': {
'Meta': {'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'venue_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['spa']
|
fergalmoran/dss
|
spa/migrations/0021_auto__add_activitylike.py
|
Python
|
bsd-2-clause
| 16,207
| 0.007836
|
from datetime import datetime
import fixtures
import constants
from authomatic.providers import oauth2
conf = fixtures.get_configuration('yammer')
LINK = 'https://www.yammer.com/peterhudec.com/users/{0}'\
.format(conf.user_username)
# Yammer allows users to only set month and day of their birth day.
# The year is always 1900.
BD = datetime.strptime(conf.user_birth_date, '%x')
BIRTH_DATE = datetime(1900, BD.month, BD.day).strftime('%x')
CONFIG = {
'login_xpath': '//*[@id="login"]',
'password_xpath': '//*[@id="password"]',
'consent_xpaths': [
'//*[@id="login-form"]/fieldset[2]/p[2]/button',
'//*[@id="oauth2-authorize"]/div[3]/div[3]/form/input[1]',
],
'class_': oauth2.Yammer,
'scope': oauth2.Yammer.user_info_scope,
'user': {
'birth_date': BIRTH_DATE,
'city': conf.user_city,
'country': conf.user_country,
'email': conf.user_email,
'first_name': conf.user_first_name,
'gender': None,
'id': conf.user_id,
'last_name': conf.user_last_name,
'link': LINK,
'locale': conf.user_locale,
'name': conf.user_name,
'nickname': None,
'phone': conf.user_phone,
'picture': conf.user_picture,
'postal_code': None,
'timezone': conf.user_timezone,
'username': conf.user_username,
},
'content_should_contain': [
conf.user_city,
conf.user_country,
conf.user_email,
conf.user_first_name,
conf.user_id,
conf.user_last_name,
LINK,
conf.user_locale,
conf.user_name,
conf.user_phone,
conf.user_picture,
conf.user_timezone.replace('&', '\\u0026'),
conf.user_username,
# User info JSON keys
'type', 'id', 'network_id', 'state', 'guid', 'job_title', 'location',
'significant_other', 'kids_names', 'interests', 'summary', 'expertise',
'full_name', 'activated_at', 'show_ask_for_photo', 'first_name',
'last_name', 'network_name', 'network_domains', 'url', 'web_url',
'name', 'mugshot_url', 'mugshot_url_template', 'birth_date', 'timezone',
'external_urls', 'admin', 'verified_admin', 'can_broadcast',
'department', 'email', 'can_create_new_network',
'can_browse_external_networks', 'previous_companies', 'schools',
'contact', 'im', 'provider', 'username', 'phone_numbers', 'number',
'email_addresses', 'address', 'has_fake_email', 'stats', 'following',
'followers', 'updates', 'settings', 'xdr_proxy', 'web_preferences',
'absolute_timestamps', 'threaded_mode', 'network_settings',
'message_prompt', 'allow_attachments', 'show_communities_directory',
'enable_groups', 'allow_yammer_apps', 'admin_can_delete_messages',
'allow_inline_document_view', 'allow_inline_video',
'enable_private_messages', 'allow_external_sharing', 'enable_chat',
'home_tabs', 'select_name', 'feed_description', 'ordering_index',
'enter_does_not_submit_message', 'preferred_my_feed',
'prescribed_my_feed', 'sticky_my_feed', 'dismissed_feed_tooltip',
'dismissed_group_tooltip', 'dismissed_profile_prompt',
'dismissed_invite_tooltip', 'dismissed_apps_tooltip',
'dismissed_invite_tooltip_at', 'dismissed_browser_lifecycle_banner',
'make_yammer_homepage', 'locale', 'yammer_now_app_id', 'has_yammer_now',
'has_mobile_client', 'follow_general_messages'
],
# Case insensitive
'content_should_not_contain':
conf.no_gender +
conf.no_nickname +
conf.no_postal_code,
# True means that any thruthy value is expected
'credentials': {
'token_type': 'Bearer',
'provider_type_id': '2-15',
'_expiration_time': None,
'consumer_key': None,
'provider_id': None,
'consumer_secret': None,
'token': True,
'token_secret': None,
'_expire_in': True,
'provider_name': 'yammer',
'refresh_token': None,
'provider_type': 'authomatic.providers.oauth2.OAuth2',
'refresh_status': constants.CREDENTIALS_REFRESH_NOT_SUPPORTED,
},
}
|
scorphus/authomatic
|
tests/functional_tests/expected_values/yammer.py
|
Python
|
mit
| 4,202
| 0.000714
|
from __future__ import absolute_import
input_name = '../examples/dg/advection_2D.py'
output_name = 'advection_sol.msh'
from tests_basic import TestInput
class Test( TestInput ):
pass
|
vlukes/sfepy
|
tests/test_dg_input_advection.py
|
Python
|
bsd-3-clause
| 187
| 0.026738
|
#!/usr/bin/python2.7
from app import app
if __name__ == '__main__':
app.run(debug=True)
|
mrcl/HakketyYaks
|
run.py
|
Python
|
mit
| 94
| 0
|
import os
DEBUG = False
try:
from local_settings import * # noqa
except ImportError:
pass
|
pjdufour/slackbot-osm
|
slackbotosm/settings.py
|
Python
|
bsd-3-clause
| 101
| 0
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import numba
import numpy as np
import os
import pandas as pd
import pyarrow.parquet as pq
import random
import string
import unittest
from numba import types
import sdc
from sdc import hiframes
from sdc.str_arr_ext import StringArray
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (count_array_OneDs,
count_array_REPs,
count_parfor_OneDs,
count_parfor_REPs,
dist_IR_contains,
get_start_end,
skip_numba_jit,
skip_sdc_jit)
class TestHiFrames(TestCase):
@skip_numba_jit
def test_column_list_select2(self):
# make sure SDC copies the columns like Pandas does
def test_impl(df):
df2 = df[['A']]
df2['A'] += 10
return df2.A, df.A
hpat_func = self.jit(test_impl)
n = 11
df = pd.DataFrame(
{'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
np.testing.assert_array_equal(hpat_func(df.copy())[1], test_impl(df)[1])
@skip_numba_jit
def test_pd_DataFrame_from_series_par(self):
def test_impl(n):
S1 = pd.Series(np.ones(n))
S2 = pd.Series(np.random.ranf(n))
df = pd.DataFrame({'A': S1, 'B': S2})
return df.A.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
@skip_numba_jit
def test_getitem_bool_series(self):
def test_impl(df):
return df['A'][df['B']].values
hpat_func = self.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
np.testing.assert_array_equal(test_impl(df), hpat_func(df))
@skip_numba_jit
def test_fillna(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
B = df.A.fillna(5.0)
return B.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_fillna_inplace(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
df.A.fillna(5.0, inplace=True)
return df.A.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_column_mean(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
return df.A.mean()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_column_var(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.var()
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
@skip_numba_jit
def test_column_std(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.std()
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
@skip_numba_jit
def test_column_map(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
df['B'] = df.A.map(lambda a: 2 * a)
return df.B.sum()
n = 121
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_column_map_arg(self):
def test_impl(df):
df['B'] = df.A.map(lambda a: 2 * a)
return
n = 121
df1 = pd.DataFrame({'A': np.arange(n)})
df2 = pd.DataFrame({'A': np.arange(n)})
hpat_func = self.jit(test_impl)
hpat_func(df1)
self.assertTrue(hasattr(df1, 'B'))
test_impl(df2)
np.testing.assert_equal(df1.B.values, df2.B.values)
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_cumsum(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df.A.cumsum()
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_array_OneDs(), 2)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 2)
self.assertTrue(dist_IR_contains('dist_cumsum'))
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_column_distribution(self):
# make sure all column calls are distributed
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df.A.fillna(5.0, inplace=True)
DF = df.A.fillna(5.0)
s = DF.sum()
m = df.A.mean()
v = df.A.var()
t = df.A.std()
Ac = df.A.cumsum()
return Ac.sum() + s + m + v + t
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(dist_IR_contains('dist_cumsum'))
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_quantile_parallel(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_parallel_float_nan(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32)})
df.A[0:100] = np.nan
df.A[200:331] = np.nan
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_parallel_int(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.int32)})
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_sequential(self):
def test_impl(A):
df = pd.DataFrame({'A': A})
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
A = np.arange(0, n, 1, np.float64)
np.testing.assert_almost_equal(hpat_func(A), test_impl(A))
@skip_numba_jit
def test_nunique(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
df.A[2] = 0
return df.A.nunique()
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_nunique_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.four.nunique()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@skip_numba_jit
def test_nunique_str(self):
def test_impl(n):
df = pd.DataFrame({'A': ['aa', 'bb', 'aa', 'cc', 'cc']})
return df.A.nunique()
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@unittest.skip('AssertionError - fix needed\n'
'5 != 3\n')
def test_nunique_str_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.two.nunique()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@skip_numba_jit
def test_unique_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return (df.four.unique() == 3.0).sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@unittest.skip('AssertionError - fix needed\n'
'2 != 1\n')
def test_unique_str_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return (df.two.unique() == 'foo').sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_describe(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
return df.A.describe()
hpat_func = self.jit(test_impl)
n = 1001
hpat_func(n)
# XXX: test actual output
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_contains_regex(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'ADEF'])
df = pd.DataFrame({'A': A})
B = df.A.str.contains('AB*', regex=True)
return B.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), 2)
@skip_numba_jit
def test_str_contains_noregex(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'ADEF'])
df = pd.DataFrame({'A': A})
B = df.A.str.contains('BB', regex=False)
return B.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), 1)
@skip_numba_jit
def test_str_replace_regex(self):
def test_impl(df):
return df.A.str.replace('AB*', 'EE', regex=True)
df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_replace_noregex(self):
def test_impl(df):
return df.A.str.replace('AB', 'EE', regex=False)
df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_replace_regex_parallel(self):
def test_impl(df):
B = df.A.str.replace('AB*', 'EE', regex=True)
return B
n = 5
A = ['ABCC', 'CABBD', 'CCD', 'CCDAABB', 'ED']
start, end = get_start_end(n)
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_split(self):
def test_impl(df):
return df.A.str.split(',')
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split_default(self):
def test_impl(df):
return df.A.str.split()
df = pd.DataFrame({'A': ['AB CC', 'C ABB D', 'G ', ' ', 'g\t f']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split_filter(self):
def test_impl(df):
B = df.A.str.split(',')
df2 = pd.DataFrame({'B': B})
return df2[df2.B.str.len() > 1]
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(
hpat_func(df), test_impl(df).reset_index(drop=True))
@skip_numba_jit
def test_str_split_box_df(self):
def test_impl(df):
return pd.DataFrame({'B': df.A.str.split(',')})
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df).B, test_impl(df).B, check_names=False)
@skip_numba_jit
def test_str_split_unbox_df(self):
def test_impl(df):
return df.A.iloc[0]
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
df2 = pd.DataFrame({'A': df.A.str.split(',')})
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(df2), test_impl(df2))
@unittest.skip('Getitem Series with list values not implement')
def test_str_split_bool_index(self):
def test_impl(df):
C = df.A.str.split(',')
return C[df.B == 'aa']
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D'], 'B': ['aa', 'bb']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split_parallel(self):
def test_impl(df):
B = df.A.str.split(',')
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_get(self):
def test_impl(df):
B = df.A.str.split(',')
return B.str.get(1)
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split(self):
def test_impl(df):
return df.A.str.split(',')
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_get_parallel(self):
def test_impl(df):
A = df.A.str.split(',')
B = A.str.get(1)
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD,F', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_get_to_numeric(self):
def test_impl(df):
B = df.A.str.split(',')
C = pd.to_numeric(B.str.get(1), errors='coerce')
return C
df = pd.DataFrame({'A': ['AB,12', 'C,321,D']})
hpat_func = self.jit(locals={'C': types.int64[:]})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_flatten(self):
def test_impl(df):
A = df.A.str.split(',')
return pd.Series(list(itertools.chain(*A)))
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_flatten_parallel(self):
def test_impl(df):
A = df.A.str.split(',')
B = pd.Series(list(itertools.chain(*A)))
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_to_numeric(self):
def test_impl(df):
B = pd.to_numeric(df.A, errors='coerce')
return B
df = pd.DataFrame({'A': ['123.1', '331.2']})
hpat_func = self.jit(locals={'B': types.float64[:]})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_1D_Var_len(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n) + 1.0})
df1 = df[df.A > 5]
return len(df1.B)
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_rolling1(self):
# size 3 without unroll
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.random.ranf(n)})
Ac = df.A.rolling(3).sum()
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
# size 7 with unroll
def test_impl_2(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.rolling(7).sum()
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_rolling2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df['moving average'] = df.A.rolling(window=5, center=True).mean()
return df['moving average'].sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_rolling3(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df.A.rolling(3, center=True).apply(lambda a: a[0] + 2 * a[1] + a[2])
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_shift1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.shift(1)
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_shift2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.pct_change(1)
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 11
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_df_input(self):
def test_impl(df):
return df.B.sum()
n = 121
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_df_input2(self):
def test_impl(df):
C = df.B == 'two'
return C.sum()
n = 11
df = pd.DataFrame({'A': np.random.ranf(3 * n), 'B': ['one', 'two', 'three'] * n})
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_df_input_dist1(self):
def test_impl(df):
return df.B.sum()
n = 121
A = [3, 4, 5, 6, 1]
B = [5, 6, 2, 1, 3]
n = 5
start, end = get_start_end(n)
df = pd.DataFrame({'A': A, 'B': B})
df_h = pd.DataFrame({'A': A[start:end], 'B': B[start:end]})
hpat_func = self.jit(distributed={'df'})(test_impl)
np.testing.assert_almost_equal(hpat_func(df_h), test_impl(df))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_concat(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
df3 = pd.concat([df1, df2])
return df3.A.sum() + df3.key2.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_concat_str(self):
def test_impl():
df1 = pq.read_table('example.parquet').to_pandas()
df2 = pq.read_table('example.parquet').to_pandas()
A3 = pd.concat([df1, df2])
return (A3.two == 'foo').sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_concat_series(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
A3 = pd.concat([df1.A, df2.A])
return A3.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_concat_series_str(self):
def test_impl():
df1 = pq.read_table('example.parquet').to_pandas()
df2 = pq.read_table('example.parquet').to_pandas()
A3 = pd.concat([df1.two, df2.two])
return (A3 == 'foo').sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
@unittest.skipIf(int(os.getenv('SDC_NP_MPI', '0')) > 1, 'Test hangs on NP=2 and NP=3 on all platforms')
def test_intraday(self):
def test_impl(nsyms):
max_num_days = 100
all_res = 0.0
for i in sdc.prange(nsyms):
s_open = 20 * np.ones(max_num_days)
s_low = 28 * np.ones(max_num_days)
s_close = 19 * np.ones(max_num_days)
df = pd.DataFrame({'Open': s_open, 'Low': s_low, 'Close': s_close})
df['Stdev'] = df['Close'].rolling(window=90).std()
df['Moving Average'] = df['Close'].rolling(window=20).mean()
df['Criteria1'] = (df['Open'] - df['Low'].shift(1)) < -df['Stdev']
df['Criteria2'] = df['Open'] > df['Moving Average']
df['BUY'] = df['Criteria1'] & df['Criteria2']
df['Pct Change'] = (df['Close'] - df['Open']) / df['Open']
df['Rets'] = df['Pct Change'][df['BUY']]
all_res += df['Rets'].mean()
return all_res
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_OneDs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
@skip_numba_jit
def test_var_dist1(self):
def test_impl(A, B):
df = pd.DataFrame({'A': A, 'B': B})
df2 = df.groupby('A', as_index=False)['B'].sum()
# TODO: fix handling of df setitem to force match of array dists
# probably with a new node that is appended to the end of basic block
# df2['C'] = np.full(len(df2.B), 3, np.int8)
# TODO: full_like for Series
df2['C'] = np.full_like(df2.B.values, 3, np.int8)
return df2
A = np.array([1, 1, 2, 3])
B = np.array([3, 4, 5, 6])
hpat_func = self.jit(locals={'A:input': 'distributed',
'B:input': 'distributed', 'df2:return': 'distributed'})(test_impl)
start, end = get_start_end(len(A))
df2 = hpat_func(A[start:end], B[start:end])
# TODO:
# pd.testing.assert_frame_equal(
# hpat_func(A[start:end], B[start:end]), test_impl(A, B))
if __name__ == "__main__":
unittest.main()
|
IntelLabs/hpat
|
sdc/tests/test_hiframes.py
|
Python
|
bsd-2-clause
| 29,355
| 0.000477
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/EquippedBadgeSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/EquippedBadgeSettings.proto',
package='POGOProtos.Settings.Master',
syntax='proto3',
serialized_pb=_b('\n6POGOProtos/Settings/Master/EquippedBadgeSettings.proto\x12\x1aPOGOProtos.Settings.Master\"y\n\x15\x45quippedBadgeSettings\x12\x1f\n\x17\x65quip_badge_cooldown_ms\x18\x01 \x01(\x03\x12\x1f\n\x17\x63\x61tch_probability_bonus\x18\x02 \x03(\x02\x12\x1e\n\x16\x66lee_probability_bonus\x18\x03 \x03(\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EQUIPPEDBADGESETTINGS = _descriptor.Descriptor(
name='EquippedBadgeSettings',
full_name='POGOProtos.Settings.Master.EquippedBadgeSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='equip_badge_cooldown_ms', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.equip_badge_cooldown_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='catch_probability_bonus', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.catch_probability_bonus', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flee_probability_bonus', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.flee_probability_bonus', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=207,
)
DESCRIPTOR.message_types_by_name['EquippedBadgeSettings'] = _EQUIPPEDBADGESETTINGS
EquippedBadgeSettings = _reflection.GeneratedProtocolMessageType('EquippedBadgeSettings', (_message.Message,), dict(
DESCRIPTOR = _EQUIPPEDBADGESETTINGS,
__module__ = 'POGOProtos.Settings.Master.EquippedBadgeSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.EquippedBadgeSettings)
))
_sym_db.RegisterMessage(EquippedBadgeSettings)
# @@protoc_insertion_point(module_scope)
|
favll/pogom
|
pogom/pgoapi/protos/POGOProtos/Settings/Master/EquippedBadgeSettings_pb2.py
|
Python
|
mit
| 3,197
| 0.006569
|
"""
Different implementations of the ``./setup.py test`` command depending on
what's locally available.
If Astropy v1.1 or later is available it should be possible to import
AstropyTest from ``astropy.tests.command``. Otherwise there is a skeleton
implementation that allows users to at least discover the ``./setup.py test``
command and learn that they need Astropy to run it.
"""
import os
from ..utils import import_file
# Previously these except statements caught only ImportErrors, but there are
# some other obscure exceptional conditions that can occur when importing
# astropy.tests (at least on older versions) that can cause these imports to
# fail
try:
# If we are testing astropy itself, we need to use import_file to avoid
# actually importing astropy (just the file we need).
command_file = os.path.join('astropy', 'tests', 'command.py')
if os.path.exists(command_file):
AstropyTest = import_file(command_file, 'astropy_tests_command').AstropyTest
else:
import astropy # noqa
from astropy.tests.command import AstropyTest
except Exception:
# No astropy at all--provide the dummy implementation
from ._dummy import _DummyCommand
class AstropyTest(_DummyCommand):
command_name = 'test'
description = 'Run the tests for this package'
error_msg = (
"The 'test' command requires the astropy package to be "
"installed and importable.")
|
astropy/astropy-helpers
|
astropy_helpers/commands/test.py
|
Python
|
bsd-3-clause
| 1,466
| 0.000682
|
from .analyzer import Pep8Analyzer
from .issues_data import issues_data
analyzers = {
'pep8' :
{
'title' : 'Pep-8',
'class' : Pep8Analyzer,
'language' : 'python',
'issues_data' : issues_data,
},
}
|
quantifiedcode/checkmate
|
checkmate/contrib/plugins/python/pep8/setup.py
|
Python
|
mit
| 266
| 0.018797
|
#!env/bin/python
"""Migrate the database"""
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
dianvaltodorov/happy-commas
|
db_migrate.py
|
Python
|
mit
| 963
| 0
|
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
def test_generators():
import os
import tempfile
from fusesoc.config import Config
from fusesoc.coremanager import CoreManager
from fusesoc.edalizer import Edalizer
from fusesoc.librarymanager import Library
from fusesoc.vlnv import Vlnv
tests_dir = os.path.dirname(__file__)
cores_dir = os.path.join(tests_dir, "capi2_cores", "misc", "generate")
lib = Library("edalizer", cores_dir)
cm = CoreManager(Config())
cm.add_library(lib)
core = cm.get_core(Vlnv("::generate"))
build_root = tempfile.mkdtemp(prefix="export_")
cache_root = tempfile.mkdtemp(prefix="export_cache_")
export_root = os.path.join(build_root, "exported_files")
edalizer = Edalizer(
toplevel=core.name,
flags={"tool": "icarus"},
core_manager=cm,
cache_root=cache_root,
work_root=os.path.join(build_root, "work"),
export_root=export_root,
system_name=None,
)
edalizer.run()
gendir = os.path.join(
cache_root, "generated", "generate-testgenerate_without_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_without_params_input.yml"))
gendir = os.path.join(
cache_root, "generated", "generate-testgenerate_with_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_with_params_input.yml"))
|
lowRISC/fusesoc
|
tests/test_edalizer.py
|
Python
|
gpl-3.0
| 1,641
| 0.001219
|
from ConfigParser import SafeConfigParser
import requests
class Client(object):
def __init__(self, configFileName):
self.config = SafeConfigParser()
self.config.read(configFileName)
def send(self, recepient, message):
if message.platform == "facebook":
return self.sendFacebookMessage(recepient, message)
else:
raise Exception('Unknown Message\' Platform')
def sendFacebookMessage(self,recepient, message):
token = self.config.get('onechat','FACEBOOK_MESSENGER_TOKEN')
sendUrl = "https://graph.facebook.com/v2.6/me/messages?access_token=" + token
try:
response = requests.post(sendUrl, json=message.toPayload(recepient))
return response
except Exception:
print "Failed"
return "Failed sending request"
|
yaohongkok/py-one-chat
|
onechat/client.py
|
Python
|
mit
| 747
| 0.034806
|
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from account.models import RegistrationForm
from account.utils import *
from account.forms import EditProfileForm
from django.contrib.auth.models import User
from django.template.response import TemplateResponse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from django.forms.util import ErrorList
# Create your views here.
def register(request):
if request.method == "POST":
form = RegistrationForm(request.POST)
if form.is_valid() == True:
# Enforce ACCOUNT_REQUIRE_REG_CODE
if settings.ACCOUNT_REQUIRE_REG_CODE:
if len(get_groups_for_code(form.cleaned_data['regcode'])) != 0:
newUser = form.save()
newUser.email = form.cleaned_data['email']
newUser.save()
register_groups(newUser, form.cleaned_data['regcode'])
return HttpResponseRedirect(reverse('login'))
else:
form._errors['regcode'] = ErrorList([u'Invalid Registration Code.'])
else:
newUser = form.save()
newUser.email = form.cleaned_data['email']
newUser.save()
register_groups(newUser, form.cleaned_data['regcode'])
return HttpResponseRedirect(reverse('login'))
else:
form = RegistrationForm()
context = {'form': form}
return TemplateResponse(request, "register.html", context)
def edit_profile(request):
if request.method == "POST":
form = EditProfileForm(request.POST)
if form.is_valid():
if not request.user.check_password(form.cleaned_data['password']):
form._errors['password'] = ErrorList([u'The password you entered is incorrect.'])
else:
request.user.email = form.cleaned_data['email']
if form.cleaned_data['password1']:
request.user.set_password(form.cleaned_data['password1'])
request.user.save()
return HttpResponseRedirect('/settings/')
else:
form = EditProfileForm()
form.fields['email'].initial = request.user.email
return TemplateResponse(request, "edit_profile_form.html",
{'form': form})
def password_reset_confirm(*args, **kwargs):
from django.contrib.auth import views
return views.password_reset_confirm(*args, post_reset_redirect=reverse('login'),
template_name='password_reset_confirm.html',
**kwargs)
|
dagoaty/eve-wspace
|
evewspace/account/views.py
|
Python
|
gpl-3.0
| 3,492
| 0.002864
|
#!/usr/bin/python
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import logging; log = logging.getLogger(__name__)
import collections
import itertools
import pprint
from django import forms
from django.forms import widgets
from django.core.exceptions import ValidationError
from amcat.scripts.script import Script
from amcat.models import ArticleSet
try:
import Levenshtein
except ImportError:
Levenshtein = None
log.error("Levenshtein module not installed. Deduplicate cannot be used.")
class Deduplicate(Script):
"""
Deduplicate articles using two articlesets. For all duplicated articles
the articles in set 2 will be removed.
"""
def __init__(self, *args, **kwargs):
super(Deduplicate, self).__init__(*args, **kwargs)
self._articles_cache_contains = None
self._articles_cache = None
class options_form(forms.Form):
articleset_1 = forms.ModelChoiceField(queryset=ArticleSet.objects.all())
articleset_2 = forms.ModelChoiceField(queryset=ArticleSet.objects.all())
dry_run = forms.BooleanField(initial=False, required=False)
text_ratio = forms.IntegerField(initial=99, help_text="Match articles which text match ..%%")
headline_ratio = forms.IntegerField(initial=80, help_text="Compare articles which headlines match ..%%")
delete_same = forms.BooleanField(initial=False, required=False, help_text="Remove articles with same id's")
skip_simple = forms.BooleanField(initial=False, required=False, help_text="Do not use an approximation of levenhstein ratio")
def clean_ratio(self, ratio):
if not (0 <= self.cleaned_data[ratio] <= 100):
raise ValidationError("{}: give a percentage. For example: 20.".format(ratio))
return self.cleaned_data[ratio] / 100.0
def clean_text_ratio(self):
return self.clean_ratio("text_ratio")
def clean_headline_ratio(self):
return self.clean_ratio("headline_ratio")
def get_matching(self, compare_with, article, ratio, prop):
return (ca for ca in compare_with if Levenshtein.ratio(
getattr(article, prop), getattr(ca, prop)) >= ratio)
def get_simple_levenhstein(self, articles, article, text_ratio):
text_length = len(article.text)
min_length = text_ratio * text_length
max_length = ((1 - text_ratio) + 1) * text_length
for comp_article in articles:
if min_length <= len(comp_article.text) <= max_length:
yield comp_article
def get_articles(self, articleset, article, text_ratio):
medium_id, date = article.medium_id, article.date
# Same medium / date since previous call?
if not self._articles_cache_contains == (medium_id, date):
# Fill cache
self._articles_cache_contains = (medium_id, date)
self._articles_cache = articleset.articles.filter(date=date, medium__id=medium_id)
self._articles_cache = self._articles_cache.only("id", "text", "headline")
return self._articles_cache
def _get_deduplicates(self, articleset_1, articleset_2, text_ratio, headline_ratio, skip_simple, delete_same):
log.info("Start deduplicating ({articleset_1}, {articleset_2})..".format(**locals()))
all_articles = articleset_1.articles.only("id", "date", "medium", "text", "headline")
n_articles = all_articles.count()
articles = all_articles.order_by("medium", "date")
for i, article in enumerate(articles.iterator(), start=1):
if not i % 100 or i == n_articles:
log.info("Checking article {i} of {n_articles}".format(**locals()))
compare_with = self.get_articles(articleset_2, article, text_ratio)
if not skip_simple:
compare_with = self.get_simple_levenhstein(compare_with, article, text_ratio)
compare_with = self.get_matching(compare_with, article, headline_ratio, "headline")
compare_with = set(self.get_matching(compare_with, article, text_ratio, "text"))
if not delete_same:
discard = None
for a in compare_with:
if a.id == article.id:
discard = a
compare_with.discard(discard)
if compare_with:
yield (article, compare_with)
def _run(self, dry_run, articleset_2, **kwargs):
duplicates = collections.defaultdict(list)
for art, dupes in self._get_deduplicates(articleset_2=articleset_2, **kwargs):
for dupe in dupes:
duplicates[art].append(dupe)
if not dry_run:
articleset_2.articles.through.objects.filter(articleset=articleset_2,
article__in=itertools.chain.from_iterable(duplicates.values())
).delete()
else:
pprint.pprint(dict(duplicates))
return duplicates
if __name__ == '__main__':
from amcat.scripts.tools import cli
cli.run_cli()
|
tschmorleiz/amcat
|
amcat/scripts/actions/deduplicate.py
|
Python
|
agpl-3.0
| 6,399
| 0.003907
|
'''
Test change cpu and memory configuration when VM is running
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
#import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Test update instance offering')
vm = test_stub.create_basic_vm()
instance_offering = test_lib.lib_get_instance_offering_by_uuid(vm.get_vm().instanceOfferingUuid)
test_obj_dict.add_vm(vm)
vm_ops.update_vm(vm.get_vm().uuid, instance_offering.cpuNum * 2, None)
vm_ops.update_vm(vm.get_vm().uuid, None, instance_offering.memorySize * 2)
vm.update()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum is expected to change")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize is expected to change")
vm.stop()
vm.update()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum is expected to change")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize is expected to change")
vm.start()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum change is expected to take effect after Vm restart")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize change is expected to take effect after Vm restart")
vm.check()
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Test update instance cpu memory Pass')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/virtualrouter/test_update_vm_cpu_memory.py
|
Python
|
apache-2.0
| 2,076
| 0.003372
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="x", parent_name="streamtube.starts", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/streamtube/starts/_x.py
|
Python
|
mit
| 391
| 0.002558
|
import os
import tempfile
import unittest
from nose.tools import raises
from nose.tools import timed
from ansible import errors
from ansible.module_common import ModuleReplacer
from ansible.utils import checksum as utils_checksum
TEST_MODULE_DATA = """
from ansible.module_utils.basic import *
def get_module():
return AnsibleModule(
argument_spec = dict(),
supports_check_mode = True,
no_log = True,
)
get_module()
"""
class TestModuleUtilsBasic(unittest.TestCase):
def cleanup_temp_file(self, fd, path):
try:
os.close(fd)
os.remove(path)
except:
pass
def cleanup_temp_dir(self, path):
try:
os.rmdir(path)
except:
pass
def setUp(self):
# create a temporary file for the test module
# we're about to generate
self.tmp_fd, self.tmp_path = tempfile.mkstemp()
os.write(self.tmp_fd, TEST_MODULE_DATA)
# template the module code and eval it
module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
d = {}
exec(module_data, d, d)
self.module = d['get_module']()
# module_utils/basic.py screws with CWD, let's save it and reset
self.cwd = os.getcwd()
def tearDown(self):
self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
# Reset CWD back to what it was before basic.py changed it
os.chdir(self.cwd)
#################################################################################
# run_command() tests
# test run_command with a string command
def test_run_command_string(self):
(rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'")
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
(rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with an array of args (with both use_unsafe_shell=True|False)
def test_run_command_args(self):
(rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"])
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
(rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with leading environment variables
@raises(SystemExit)
def test_run_command_string_with_env_variables(self):
self.module.run_command('FOO=bar /bin/echo -n "foo bar"')
@raises(SystemExit)
def test_run_command_args_with_env_variables(self):
self.module.run_command(['FOO=bar', '/bin/echo', '-n', 'foo bar'])
def test_run_command_string_unsafe_with_env_variables(self):
(rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with a command pipe (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_pipe(self):
(rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with a shell redirect in (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_redirect_in(self):
(rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with a shell redirect out (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_redirect_out(self):
tmp_fd, tmp_path = tempfile.mkstemp()
try:
(rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
checksum = utils_checksum(tmp_path)
self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
except:
raise
finally:
self.cleanup_temp_file(tmp_fd, tmp_path)
# test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_double_redirect_out(self):
tmp_fd, tmp_path = tempfile.mkstemp()
try:
(rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
checksum = utils_checksum(tmp_path)
self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
except:
raise
finally:
self.cleanup_temp_file(tmp_fd, tmp_path)
# test run_command with data
def test_run_command_string_with_data(self):
(rc, out, err) = self.module.run_command('cat', data='foo bar')
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with binary data
def test_run_command_string_with_binary_data(self):
(rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'ABCD')
# test run_command with a cwd set
def test_run_command_string_with_cwd(self):
tmp_path = tempfile.mkdtemp()
try:
(rc, out, err) = self.module.run_command('pwd', cwd=tmp_path)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
self.assertEqual(out.strip(), os.path.realpath(tmp_path))
except:
raise
finally:
self.cleanup_temp_dir(tmp_path)
class TestModuleUtilsBasicHelpers(unittest.TestCase):
''' Test some implementation details of AnsibleModule
Some pieces of AnsibleModule are implementation details but they have
potential cornercases that we need to check. Go ahead and test at
this level that the functions are behaving even though their API may
change and we'd have to rewrite these tests so that we know that we
need to check for those problems in any rewrite.
In the future we might want to restructure higher level code to be
friendlier to unittests so that we can test at the level that the public
is interacting with the APIs.
'''
MANY_RECORDS = 7000
URL_SECRET = 'http://username:pas:word@foo.com/data'
SSH_SECRET = 'username:pas:word@foo.com/data'
def cleanup_temp_file(self, fd, path):
try:
os.close(fd)
os.remove(path)
except:
pass
def cleanup_temp_dir(self, path):
try:
os.rmdir(path)
except:
pass
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {'host%s' % i:
{'pstack':
{'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']},
}}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def setUp(self):
self.many_url = repr(self._gen_data(self.MANY_RECORDS, True, True,
self.URL_SECRET))
self.many_ssh = repr(self._gen_data(self.MANY_RECORDS, True, True,
self.SSH_SECRET))
self.one_url = repr(self._gen_data(self.MANY_RECORDS, False, True,
self.URL_SECRET))
self.one_ssh = repr(self._gen_data(self.MANY_RECORDS, False, True,
self.SSH_SECRET))
self.zero_secrets = repr(self._gen_data(self.MANY_RECORDS, False,
False, ''))
self.few_url = repr(self._gen_data(2, True, True, self.URL_SECRET))
self.few_ssh = repr(self._gen_data(2, True, True, self.SSH_SECRET))
# create a temporary file for the test module
# we're about to generate
self.tmp_fd, self.tmp_path = tempfile.mkstemp()
os.write(self.tmp_fd, TEST_MODULE_DATA)
# template the module code and eval it
module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
d = {}
exec(module_data, d, d)
self.module = d['get_module']()
# module_utils/basic.py screws with CWD, let's save it and reset
self.cwd = os.getcwd()
def tearDown(self):
self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
# Reset CWD back to what it was before basic.py changed it
os.chdir(self.cwd)
#################################################################################
#
# Speed tests
#
# Previously, we used regexes which had some pathologically slow cases for
# parameters with large amounts of data with many ':' but no '@'. The
# present function gets slower when there are many replacements so we may
# want to explore regexes in the future (for the speed when substituting
# or flexibility). These speed tests will hopefully tell us if we're
# introducing code that has cases that are simply too slow.
#
# Some regex notes:
# * re.sub() is faster than re.match() + str.join().
# * We may be able to detect a large number of '@' symbols and then use
# a regex else use the present function.
@timed(5)
def test_log_sanitize_speed_many_url(self):
self.module._heuristic_log_sanitize(self.many_url)
@timed(5)
def test_log_sanitize_speed_many_ssh(self):
self.module._heuristic_log_sanitize(self.many_ssh)
@timed(5)
def test_log_sanitize_speed_one_url(self):
self.module._heuristic_log_sanitize(self.one_url)
@timed(5)
def test_log_sanitize_speed_one_ssh(self):
self.module._heuristic_log_sanitize(self.one_ssh)
@timed(5)
def test_log_sanitize_speed_zero_secrets(self):
self.module._heuristic_log_sanitize(self.zero_secrets)
#
# Test that the password obfuscation sanitizes somewhat cleanly.
#
def test_log_sanitize_correctness(self):
url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
url_output = self.module._heuristic_log_sanitize(url_data)
ssh_output = self.module._heuristic_log_sanitize(ssh_data)
# Basic functionality: Successfully hid the password
try:
self.assertNotIn('pas:word', url_output)
self.assertNotIn('pas:word', ssh_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', url_output)
self.assertNotIn('pas', ssh_output)
except AttributeError:
# python2.6 or less's unittest
self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output))
self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output))
self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output))
self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output))
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
self.assertEqual(len(url_output), len(url_data))
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("'}}}}"))
try:
self.assertIn(":********@foo.com/data',", ssh_output)
except AttributeError:
# python2.6 or less's unittest
self.assertTrue(":********@foo.com/data'," in ssh_output, '%s is not present in %s' % (":********@foo.com/data',", ssh_output))
# The overzealous-ness here may lead to us changing the algorithm in
# the future. We could make it consume less of the data (with the
# possiblity of leaving partial passwords exposed) and encourage
# people to use no_log instead of relying on this obfuscation.
|
jody-frankowski/ansible
|
test/units/TestModuleUtilsBasic.py
|
Python
|
gpl-3.0
| 13,044
| 0.003527
|
from urllib.parse import urlparse
from urllib.request import url2pathname
from django.conf import settings
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
from django.http import Http404
from .http import AsgiHandler
class StaticFilesWrapper:
"""
ASGI application which wraps another and intercepts requests for static
files, passing them off to Django's static file serving.
"""
def __init__(self, application, staticfiles_handler=None):
self.application = application
self.staticfiles_handler_class = staticfiles_handler or StaticFilesHandler
self.base_url = urlparse(self.get_base_url())
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the static files path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
async def __call__(self, scope, receive, send):
# Only even look at HTTP requests
if scope["type"] == "http" and self._should_handle(scope["path"]):
# Serve static content
return await self.staticfiles_handler_class()(
dict(scope, static_base_url=self.base_url), receive, send
)
# Hand off to the main app
return await self.application(scope, receive, send)
class StaticFilesHandler(AsgiHandler):
"""
Subclass of AsgiHandler that serves directly from its get_response.
"""
# TODO: Review hierarchy here. Do we NEED to inherit BaseHandler, AsgiHandler?
async def __call__(self, scope, receive, send):
self.static_base_url = scope["static_base_url"][2]
return await super().__call__(scope, receive, send)
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.static_base_url) :]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
"""
Always tries to serve a static file as you don't even get into this
handler subclass without the wrapper directing you here.
"""
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
|
andrewgodwin/django-channels
|
channels/staticfiles.py
|
Python
|
bsd-3-clause
| 2,778
| 0.00108
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from astrobin.models import SolarSystem_Acquisition
class SolarSystem_AcquisitionForm(forms.ModelForm):
error_css_class = 'error'
date = forms.DateField(
required=False,
input_formats=['%Y-%m-%d'],
widget=forms.TextInput(attrs={'class': 'datepickerclass', 'autocomplete': 'off'}),
help_text=_("Please use the following format: yyyy-mm-dd"),
label=_("Date"),
)
def clean_seeing(self):
data = self.cleaned_data['seeing']
if data and data not in list(range(1, 6)):
raise forms.ValidationError(_("Please enter a value between 1 and 5."))
return data
def clean_transparency(self):
data = self.cleaned_data['transparency']
if data and data not in list(range(1, 11)):
raise forms.ValidationError(_("Please enter a value between 1 and 10."))
return data
class Meta:
model = SolarSystem_Acquisition
fields = (
'date',
'time',
'frames',
'fps',
'exposure_per_frame',
'focal_length',
'cmi',
'cmii',
'cmiii',
'seeing',
'transparency',
)
widgets = {
'date': forms.TextInput(attrs={'class': 'datepickerclass', 'autocomplete': 'off'}),
'time': forms.TextInput(attrs={'class': 'timepickerclass', 'autocomplete': 'off'}),
}
|
astrobin/astrobin
|
astrobin/forms/solar_system_acquisition_form.py
|
Python
|
agpl-3.0
| 1,528
| 0.003272
|
# Generated by Django 2.0.3 on 2018-12-21 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketing', '0005_auto_20180123_1157'),
]
operations = [
migrations.AlterField(
model_name='uploadedcell',
name='content',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='uploadedrow',
name='error_message',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
asterix135/infonex_crm
|
marketing/migrations/0006_auto_20181221_1059.py
|
Python
|
mit
| 594
| 0
|
"""
escape/unescape routines available for backends which need
alphanumeric usernames, services, or other values
"""
import re
import string
import sys
# True if we are running on Python 3.
# taken from six.py
PY3 = sys.version_info[0] == 3
# allow use of unicode literals
if PY3:
def u(s):
return s
def _unichr(c):
return chr(c)
else:
def u(s):
return unicode(s, "unicode_escape")
def _unichr(c):
return unichr(c)
LEGAL_CHARS = (
getattr(string, 'letters', None) # Python 2
or getattr(string, 'ascii_letters') # Python 3
) + string.digits
ESCAPE_FMT = "_%02X"
def _escape_char(c):
"Single char escape. Return the char, escaped if not already legal"
if isinstance(c, int):
c = _unichr(c)
return c if c in LEGAL_CHARS else ESCAPE_FMT % ord(c)
def escape(value):
"""
Escapes given string so the result consists of alphanumeric chars and
underscore only.
"""
return "".join(_escape_char(c) for c in value.encode('utf-8'))
def _unescape_code(regex_match):
ordinal = int(regex_match.group('code'), 16)
if sys.version_info >= (3,):
return bytes([ordinal])
return chr(ordinal)
def unescape(value):
"""
Inverse of escape.
"""
re_esc = re.compile(
# the pattern must be bytes to operate on bytes
ESCAPE_FMT.replace('%02X', '(?P<code>[0-9A-F]{2})').encode('ascii')
)
return re_esc.sub(_unescape_code, value.encode('ascii')).decode('utf-8')
|
b-jesch/service.fritzbox.callmonitor
|
resources/lib/PhoneBooks/pyicloud/vendorlibs/keyring/util/escape.py
|
Python
|
gpl-2.0
| 1,497
| 0.004008
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
password commands plugin
"""
import base64
import binascii
import os
import subprocess
from Crypto.Cipher import AES
import crypt
import commands
# This is to support older python versions that don't have hashlib
try:
import hashlib
except ImportError:
import md5
class hashlib(object):
"""Fake hashlib module as a class"""
@staticmethod
def md5():
return md5.new()
class PasswordError(Exception):
"""
Class for password command exceptions
"""
def __init__(self, response):
# Should be a (ResponseCode, ResponseMessage) tuple
self.response = response
def __str__(self):
return "%s: %s" % self.response
def get_response(self):
return self.response
def _make_salt(length):
"""Create a salt of appropriate length"""
salt_chars = 'abcdefghijklmnopqrstuvwxyz'
salt_chars += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
salt_chars += '0123456789./'
rand_data = os.urandom(length)
salt = ''
for c in rand_data:
salt += salt_chars[ord(c) % len(salt_chars)]
return salt
def _create_temp_password_file(user, password, filename):
"""Read original passwd file, generating a new temporary file.
Returns: The temporary filename
"""
with open(filename) as f:
file_data = f.readlines()
stat_info = os.stat(filename)
tmpfile = '%s.tmp.%d' % (filename, os.getpid())
# We have to use os.open() so that we can create the file with
# the appropriate modes. If we create it and set modes later,
# there's a small point of time where a non-root user could
# potentially open the file and wait for data to be written.
fd = os.open(tmpfile,
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
stat_info.st_mode)
f = None
success = False
try:
os.chown(tmpfile, stat_info.st_uid, stat_info.st_gid)
f = os.fdopen(fd, 'w')
for line in file_data:
if line.startswith('#'):
f.write(line)
continue
try:
(s_user, s_password, s_rest) = line.split(':', 2)
except ValueError:
f.write(line)
continue
if s_user != user:
f.write(line)
continue
if s_password.startswith('$'):
# Format is '$ID$SALT$HASH' where ID defines the
# ecnryption type. We'll re-use that, and make a salt
# that's the same size as the old
salt_data = s_password[1:].split('$')
salt = '$%s$%s$' % (salt_data[0],
_make_salt(len(salt_data[1])))
else:
# Default to MD5 as a minimum level of compatibility
salt = '$1$%s$' % _make_salt(8)
enc_pass = crypt.crypt(password, salt)
f.write("%s:%s:%s" % (s_user, enc_pass, s_rest))
f.close()
f = None
success = True
except Exception, e:
logging.error("Couldn't create temporary password file: %s" % str(e))
raise
finally:
if not success:
# Close the file if it's open
if f:
try:
os.unlink(tmpfile)
except Exception:
pass
# Make sure to unlink the tmpfile
try:
os.unlink(tmpfile)
except Exception:
pass
return tmpfile
def set_password(user, password):
"""Set the password for a particular user"""
INVALID = 0
PWD_MKDB = 1
RENAME = 2
files_to_try = {'/etc/shadow': RENAME,
'/etc/master.passwd': PWD_MKDB}
for filename, ftype in files_to_try.iteritems():
if not os.path.exists(filename):
continue
tmpfile = _create_temp_password_file(user, password, filename)
if ftype == RENAME:
bakfile = '/etc/shadow.bak.%d' % os.getpid()
os.rename(filename, bakfile)
os.rename(tmpfile, filename)
os.remove(bakfile)
return
if ftype == PWD_MKDB:
pipe = subprocess.PIPE
p = subprocess.Popen(['/usr/sbin/pwd_mkdb', tmpfile],
stdin=pipe, stdout=pipe, stderr=pipe)
(stdoutdata, stderrdata) = p.communicate()
if p.returncode != 0:
if stderrdata:
stderrdata.strip('\n')
else:
stderrdata = '<None>'
logging.error("pwd_mkdb failed: %s" % stderrdata)
try:
os.unlink(tmpfile)
except Exception:
pass
raise PasswordError(
(500, "Rebuilding the passwd database failed"))
return
raise PasswordError((500, "Unknown password file format"))
@commands.command_add('password', 'password')
def password_cmd(data_values):
""" change password """
try:
set_password('root', data)
except PasswordError, e:
return e.get_response()
return True
|
abhishekkr/nix-bootstrapper
|
commands/command_plugins/password/__init__.py
|
Python
|
apache-2.0
| 5,851
| 0.000855
|
#!/usr/bin/env python
from time import sleep
import os
import RPi.GPIO as GPIO
import subprocess
import datetime
GPIO.setmode(GPIO.BCM)
GPIO.setup(24, GPIO.IN)
count = 0
up = False
down = False
command = ""
filename = ""
index = 0
camera_pause = "500"
def takepic(imageName):
print("picture")
command = "sudo raspistill -o " + imageName + " -q 100 -t " + camera_pause
print(command)
os.system(command)
while(True):
if(up==True):
if(GPIO.input(24)==False):
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d_%H%M%S")
filename = "photo-"+timeString+".jpg"
takepic(filename)
subprocess.call(['./processImage.sh', filename, '&'])
up = GPIO.input(24)
count = count+1
sleep(.1)
print "done"
|
sgonzalez/wsn-parking-project
|
sensor-pi/testimages.py
|
Python
|
gpl-2.0
| 764
| 0.027487
|
# This file starts the WSGI web application.
# - Heroku starts gunicorn, which loads Procfile, which starts manage.py
# - Developers can run it from the command line: python runserver.py
import logging
from logging.handlers import RotatingFileHandler
from app import create_app
app = create_app()
# Start a development web server if executed from the command line
if __name__ == "__main__":
# Manage the command line parameters such as:
# - python manage.py runserver
# - python manage.py db
from app import manager
manager.run()
|
dleicht/planx
|
manage.py
|
Python
|
mit
| 559
| 0.003578
|
import httplib
import functools
from modularodm.exceptions import NoResultsFound
from modularodm.storage.base import KeyExistsException
from framework.auth.decorators import must_be_signed
from framework.exceptions import HTTPError
from addons.osfstorage.models import OsfStorageFileNode, OsfStorageFolder
from osf.models import OSFUser, AbstractNode
from website.files import exceptions
from website.project.decorators import (
must_not_be_registration, must_have_addon,
)
def handle_odm_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except NoResultsFound:
raise HTTPError(httplib.NOT_FOUND)
except KeyExistsException:
raise HTTPError(httplib.CONFLICT)
except exceptions.VersionNotFoundError:
raise HTTPError(httplib.NOT_FOUND)
return wrapped
def autoload_filenode(must_be=None, default_root=False):
"""Implies both must_have_addon osfstorage node and
handle_odm_errors
Attempts to load fid as a OsfStorageFileNode with viable constraints
"""
def _autoload_filenode(func):
@handle_odm_errors
@must_have_addon('osfstorage', 'node')
@functools.wraps(func)
def wrapped(*args, **kwargs):
node = kwargs['node']
if 'fid' not in kwargs and default_root:
file_node = kwargs['node_addon'].get_root()
else:
file_node = OsfStorageFileNode.get(kwargs.get('fid'), node)
if must_be and file_node.kind != must_be:
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'incorrect type',
'message_long': 'FileNode must be of type {} not {}'.format(must_be, file_node.kind)
})
kwargs['file_node'] = file_node
return func(*args, **kwargs)
return wrapped
return _autoload_filenode
def waterbutler_opt_hook(func):
@must_be_signed
@handle_odm_errors
@must_not_be_registration
@must_have_addon('osfstorage', 'node')
@functools.wraps(func)
def wrapped(payload, *args, **kwargs):
try:
user = OSFUser.load(payload['user'])
dest_node = AbstractNode.load(payload['destination']['node'])
source = OsfStorageFileNode.get(payload['source'], kwargs['node'])
dest_parent = OsfStorageFolder.get(payload['destination']['parent'], dest_node)
kwargs.update({
'user': user,
'source': source,
'destination': dest_parent,
'name': payload['destination']['name'],
})
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
return func(*args, **kwargs)
return wrapped
|
caneruguz/osf.io
|
addons/osfstorage/decorators.py
|
Python
|
apache-2.0
| 2,839
| 0.000704
|
import functools
from framework.auth import Auth
from website.archiver import (
StatResult, AggregateStatResult,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
ARCHIVER_FORCED_FAILURE,
)
from website import (
mails,
settings
)
from osf.utils.sanitize import unescape_entities
def send_archiver_size_exceeded_mails(src, user, stat_result, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
user=user,
src=src,
stat_result=stat_result,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
user=user,
src=src,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_copy_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
user=user,
src=src,
results=results,
url=url,
can_change_preferences=False,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_COPY_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_file_not_found_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_FILE_NOT_FOUND_DESK,
can_change_preferences=False,
user=user,
src=src,
results=results,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_FILE_NOT_FOUND_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_uncaught_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_DESK,
user=user,
src=src,
results=results,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def handle_archive_fail(reason, src, dst, user, result):
url = settings.INTERNAL_DOMAIN + src._id
if reason == ARCHIVER_NETWORK_ERROR:
send_archiver_copy_error_mails(src, user, result, url)
elif reason == ARCHIVER_SIZE_EXCEEDED:
send_archiver_size_exceeded_mails(src, user, result, url)
elif reason == ARCHIVER_FILE_NOT_FOUND:
send_archiver_file_not_found_mails(src, user, result, url)
elif reason == ARCHIVER_FORCED_FAILURE: # Forced failure using scripts.force_fail_registration
pass
else: # reason == ARCHIVER_UNCAUGHT_ERROR
send_archiver_uncaught_error_mails(src, user, result, url)
dst.root.sanction.forcibly_reject()
dst.root.sanction.save()
dst.root.delete_registration_tree(save=True)
def archive_provider_for(node, user):
"""A generic function to get the archive provider for some node, user pair.
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.get_addon(settings.ARCHIVE_PROVIDER)
def has_archive_provider(node, user):
"""A generic function for checking whether or not some node, user pair has
an attached provider for archiving
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.has_addon(settings.ARCHIVE_PROVIDER)
def link_archive_provider(node, user):
"""A generic function for linking some node, user pair with the configured
archive provider
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
addon = node.get_or_add_addon(settings.ARCHIVE_PROVIDER, auth=Auth(user), log=False)
if hasattr(addon, 'on_add'):
addon.on_add()
node.save()
def aggregate_file_tree_metadata(addon_short_name, fileobj_metadata, user):
"""Recursively traverse the addon's file tree and collect metadata in AggregateStatResult
:param src_addon: AddonNodeSettings instance of addon being examined
:param fileobj_metadata: file or folder metadata of current point of reference
in file tree
:param user: archive initatior
:return: top-most recursive call returns AggregateStatResult containing addon file tree metadata
"""
disk_usage = fileobj_metadata.get('size')
if fileobj_metadata['kind'] == 'file':
result = StatResult(
target_name=fileobj_metadata['name'],
target_id=fileobj_metadata['path'].lstrip('/'),
disk_usage=disk_usage or 0,
)
return result
else:
return AggregateStatResult(
target_id=fileobj_metadata['path'].lstrip('/'),
target_name=fileobj_metadata['name'],
targets=[aggregate_file_tree_metadata(addon_short_name, child, user) for child in fileobj_metadata.get('children', [])],
)
def before_archive(node, user):
from osf.models import ArchiveJob
link_archive_provider(node, user)
job = ArchiveJob.objects.create(
src_node=node.registered_from,
dst_node=node,
initiator=user
)
job.set_targets()
def _do_get_file_map(file_tree):
"""Reduces a tree of folders and files into a list of (<sha256>, <file_metadata>) pairs
"""
file_map = []
stack = [file_tree]
while len(stack):
tree_node = stack.pop(0)
if tree_node['kind'] == 'file':
file_map.append((tree_node['extra']['hashes']['sha256'], tree_node))
else:
stack = stack + tree_node['children']
return file_map
def _memoize_get_file_map(func):
cache = {}
@functools.wraps(func)
def wrapper(node):
if node._id not in cache:
osf_storage = node.get_addon('osfstorage')
file_tree = osf_storage._get_file_tree(user=node.creator)
cache[node._id] = _do_get_file_map(file_tree)
return func(node, cache[node._id])
return wrapper
@_memoize_get_file_map
def get_file_map(node, file_map):
"""
note:: file_map is injected implictly by the decorator; this method is called like:
get_file_map(node)
"""
for (key, value) in file_map:
yield (key, value, node._id)
for child in node.nodes_primary:
for key, value, node_id in get_file_map(child):
yield (key, value, node_id)
def find_registration_file(value, node):
"""
some annotations:
- `value` is the `extra` from a file upload in `registered_meta`
(see `Uploader.addFile` in website/static/js/registrationEditorExtensions.js)
- `node` is a Registration instance
- returns a `(file_info, node_id)` or `(None, None)` tuple, where `file_info` is from waterbutler's api
(see `addons.base.models.BaseStorageAddon._get_fileobj_child_metadata` and `waterbutler.core.metadata.BaseMetadata`)
"""
from osf.models import AbstractNode
orig_sha256 = value['sha256']
orig_name = unescape_entities(
value['selectedFileName'],
safe={
'<': '<',
'>': '>'
}
)
orig_node = value['nodeId']
file_map = get_file_map(node)
for sha256, file_info, node_id in file_map:
registered_from_id = AbstractNode.load(node_id).registered_from._id
if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == file_info['name']:
return file_info, node_id
return None, None
def find_registration_files(values, node):
"""
some annotations:
- `values` is from `registered_meta`, e.g. `{ comments: [], value: '', extra: [] }`
- `node` is a Registration model instance
- returns a list of `(file_info, node_id, index)` or `(None, None, index)` tuples,
where `file_info` is from `find_registration_file` above
"""
ret = []
for i in range(len(values.get('extra', []))):
ret.append(find_registration_file(values['extra'][i], node) + (i,))
return ret
def get_title_for_question(schema, path):
path = path.split('.')
root = path.pop(0)
item = None
for page in schema['pages']:
questions = {
q['qid']: q
for q in page['questions']
}
if root in questions:
item = questions[root]
title = item.get('title')
while len(path):
item = item.get(path.pop(0), {})
title = item.get('title', title)
return title
def find_selected_files(schema, metadata):
"""
some annotations:
- `schema` is a RegistrationSchema instance
- `metadata` is from `registered_meta` (for the given schema)
- returns a dict that maps from each `osf-upload` question id (`.`-delimited path) to its chunk of metadata,
e.g. `{ 'q1.uploader': { comments: [], extra: [...], value: 'foo.pdf' } }`
"""
targets = []
paths = [('', p) for p in schema.schema['pages']]
while len(paths):
prefix, path = paths.pop(0)
if path.get('questions'):
paths = paths + [('', q) for q in path['questions']]
elif path.get('type'):
qid = path.get('qid', path.get('id'))
if path['type'] == 'object':
paths = paths + [('{}.{}.value'.format(prefix, qid), p) for p in path['properties']]
elif path['type'] == 'osf-upload':
targets.append('{}.{}'.format(prefix, qid).lstrip('.'))
selected = {}
for t in targets:
parts = t.split('.')
value = metadata.get(parts.pop(0))
while value and len(parts):
value = value.get(parts.pop(0))
if value:
selected[t] = value
return selected
VIEW_FILE_URL_TEMPLATE = '/project/{node_id}/files/osfstorage/{file_id}/'
def deep_get(obj, path):
parts = path.split('.')
item = obj
key = None
while len(parts):
key = parts.pop(0)
item[key] = item.get(key, {})
item = item[key]
return item
def migrate_file_metadata(dst, schema):
metadata = dst.registered_meta[schema._id]
missing_files = []
selected_files = find_selected_files(schema, metadata)
for path, selected in selected_files.items():
target = deep_get(metadata, path)
for archived_file_info, node_id, index in find_registration_files(selected, dst):
if not archived_file_info:
missing_files.append({
'file_name': selected['extra'][index]['selectedFileName'],
'question_title': get_title_for_question(schema.schema, path)
})
continue
archived_file_id = archived_file_info['path'].lstrip('/')
target['extra'][index]['viewUrl'] = VIEW_FILE_URL_TEMPLATE.format(node_id=node_id, file_id=archived_file_id)
if missing_files:
from website.archiver.tasks import ArchivedFileNotFound
raise ArchivedFileNotFound(
registration=dst,
missing_files=missing_files
)
dst.registered_meta[schema._id] = metadata
dst.registration_responses = dst.flatten_registration_metadata()
dst.save()
|
baylee-d/osf.io
|
website/archiver/utils.py
|
Python
|
apache-2.0
| 11,737
| 0.003238
|
from pylab import figure,show,connect,hist,plot,legend
from numpy import array, append, arange, empty, exp
from shogun import Gaussian, GMM
from shogun import RealFeatures
import util
util.set_title('EM for 1d GMM example')
#set the parameters
min_cov=1e-9
max_iter=1000
min_change=1e-9
#setup the real GMM
real_gmm=GMM(3)
real_gmm.set_nth_mean(array([-2.0]), 0)
real_gmm.set_nth_mean(array([0.0]), 1)
real_gmm.set_nth_mean(array([2.0]), 2)
real_gmm.set_nth_cov(array([[0.3]]), 0)
real_gmm.set_nth_cov(array([[0.1]]), 1)
real_gmm.set_nth_cov(array([[0.2]]), 2)
real_gmm.set_coef(array([0.3, 0.5, 0.2]))
#generate training set from real GMM
generated=array([real_gmm.sample()])
for i in range(199):
generated=append(generated, array([real_gmm.sample()]), axis=1)
feat_train=RealFeatures(generated)
#train GMM using EM
est_gmm=GMM(3)
est_gmm.train(feat_train)
est_gmm.train_em(min_cov, max_iter, min_change)
#get and print estimated means and covariances
est_mean1=est_gmm.get_nth_mean(0)
est_mean2=est_gmm.get_nth_mean(1)
est_mean3=est_gmm.get_nth_mean(2)
est_cov1=est_gmm.get_nth_cov(0)
est_cov2=est_gmm.get_nth_cov(1)
est_cov3=est_gmm.get_nth_cov(2)
est_coef=est_gmm.get_coef()
print est_mean1
print est_cov1
print est_mean2
print est_cov2
print est_mean3
print est_cov3
print est_coef
#plot real GMM, data and estimated GMM
min_gen=min(min(generated))
max_gen=max(max(generated))
plot_real=empty(0)
plot_est=empty(0)
for i in arange(min_gen, max_gen, 0.001):
plot_real=append(plot_real, array([exp(real_gmm.cluster(array([i]))[3])]))
plot_est=append(plot_est, array([exp(est_gmm.cluster(array([i]))[3])]))
real_plot=plot(arange(min_gen, max_gen, 0.001), plot_real, "b")
est_plot=plot(arange(min_gen, max_gen, 0.001), plot_est, "r")
real_hist=hist(generated.transpose(), bins=50, normed=True, fc="gray")
legend(("Real GMM", "Estimated GMM"))
connect('key_press_event', util.quit)
show()
|
MikeLing/shogun
|
examples/undocumented/python/graphical/em_1d_gmm.py
|
Python
|
gpl-3.0
| 1,911
| 0.018315
|
# http://www.creatis.insa-lyon.fr/~bernard/creaseg/
# http://ascratchpad.blogspot.com/2011/03/image-segmentation-using-active.html
#------------------------------------------------------------------------
# Region Based Active Contour Segmentation
#
# seg = region_seg(I,init_mask,max_its,alpha,display)
#
# Inputs: I 2D image
# init_mask Initialization (1 = foreground, 0 = bg)
# max_its Number of iterations to run segmentation for
# alpha (optional) Weight of smoothing term
# higer = smoother. default = 0.2
# display (optional) displays intermediate outputs
# default = true
#
# Outputs: seg Final segmentation mask (1=fg, 0=bg)
#
# Description: This code implements the paper: "Active Contours Without
# Edges" By Chan Vese. This is a nice way to segment images whose
# foregrounds and backgrounds are statistically different and homogeneous.
#
# Example:
# img = imread('tire.tif');
# m = zeros(size(img));
# m(33:33+117,44:44+128) = 1;
# seg = region_seg(img,m,500);
#
# Coded by: Shawn Lankton (www.shawnlankton.com)
#------------------------------------------------------------------------
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
eps = np.finfo(np.float).eps
def chanvese(I,init_mask,max_its=200,alpha=0.2,thresh=0,color='r',display=False):
I = I.astype('float')
#-- Create a signed distance map (SDF) from mask
phi = mask2phi(init_mask)
if display:
plt.ion()
showCurveAndPhi(I, phi, color)
plt.savefig('levelset_start.pdf',bbox_inches='tight')
#--main loop
its = 0
stop = False
prev_mask = init_mask
c = 0
while (its < max_its and not stop):
# get the curve's narrow band
idx = np.flatnonzero( np.logical_and( phi <= 1.2, phi >= -1.2) )
if len(idx) > 0:
#-- intermediate output
if display:
if np.mod(its,50) == 0:
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
print 'iteration:', its
showCurveAndPhi(I, phi, color)
else:
if np.mod(its,10) == 0:
print 'iteration:', its
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
#drawnow;
#-- find interior and exterior mean
upts = np.flatnonzero(phi<=0) # interior points
vpts = np.flatnonzero(phi>0) # exterior points
u = np.sum(I.flat[upts])/(len(upts)+eps) # interior mean
v = np.sum(I.flat[vpts])/(len(vpts)+eps) # exterior mean
F = (I.flat[idx]-u)**2-(I.flat[idx]-v)**2 # force from image information
curvature = get_curvature(phi,idx) # force from curvature penalty
dphidt = F /np.max(np.abs(F)) + alpha*curvature # gradient descent to minimize energy
#-- maintain the CFL condition
dt = 0.45/(np.max(np.abs(dphidt))+eps)
#-- evolve the curve
phi.flat[idx] += dt*dphidt
#-- Keep SDF smooth
phi = sussman(phi, 0.5)
new_mask = phi<=0
c = convergence(prev_mask,new_mask,thresh,c)
if c <= 5:
its = its + 1
prev_mask = new_mask
else: stop = True
else:
break
#-- final output
if display:
showCurveAndPhi(I, phi, color)
#plt.savefig('levelset_end.pdf',bbox_inches='tight')
time.sleep(10)
#-- make mask from SDF
seg = phi<=0 #-- Get mask from levelset
return seg,phi,its
#---------------------------------------------------------------------
#---------------------------------------------------------------------
#-- AUXILIARY FUNCTIONS ----------------------------------------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
def bwdist(a):
"""
this is an intermediary function, 'a' has only True, False vals,
so we convert them into 0, 1 values -- in reverse. True is 0,
False is 1, distance_transform_edt wants it that way.
"""
return nd.distance_transform_edt(a == 0)
import time
#-- Displays the image with curve superimposed
def showCurveAndPhi(I, phi, color):
# subplot(numRows, numCols, plotNum)
#myplot = plt.subplot(121)
#fig, axes = plt.subplots()
#axes = myplot.axes
#axes.get_xaxis().set_visible(False)
#axes.get_yaxis().set_visible(False)
plt.clf()
plt.imshow(I, cmap='gray')
#plt.hold(True)
CS = plt.contour(phi, 0, colors=color)
plt.draw()
#plt.hold(False)
# myplot = plt.subplot(122)
# axes = myplot.axes
# axes.get_xaxis().set_visible(False)
# axes.get_yaxis().set_visible(False)
# plt.imshow(phi)
plt.draw()
#time.sleep(1)
def im2double(a):
a = a.astype('float')
a /= a.max()
return a
#-- converts a mask to a SDF
def mask2phi(init_a):
phi = bwdist(init_a)-bwdist(1-init_a)+im2double(init_a) -0.5
return phi
#-- compute curvature along SDF
def get_curvature(phi,idx):
dimy, dimx = phi.shape
yx = np.array([np.unravel_index(i, phi.shape)for i in idx]) # get subscripts
y = yx[:,0]
x = yx[:,1]
#-- get subscripts of neighbors
ym1 = y-1; xm1 = x-1; yp1 = y+1; xp1 = x+1;
#-- bounds checking
ym1[ym1<0] = 0; xm1[xm1<0] = 0;
yp1[yp1>=dimy]=dimy - 1; xp1[xp1>=dimx] = dimx - 1;
#-- get indexes for 8 neighbors
idup = np.ravel_multi_index( (yp1,x),phi.shape)
iddn = np.ravel_multi_index( (ym1,x),phi.shape)
idlt = np.ravel_multi_index( (y,xm1),phi.shape)
idrt = np.ravel_multi_index( (y,xp1),phi.shape)
idul = np.ravel_multi_index( (yp1,xm1),phi.shape)
idur = np.ravel_multi_index( (yp1,xp1),phi.shape)
iddl = np.ravel_multi_index( (ym1,xm1),phi.shape)
iddr = np.ravel_multi_index( (ym1,xp1),phi.shape)
#-- get central derivatives of SDF at x,y
phi_x = -phi.flat[idlt]+phi.flat[idrt]
phi_y = -phi.flat[iddn]+phi.flat[idup]
phi_xx = phi.flat[idlt]-2*phi.flat[idx]+phi.flat[idrt]
phi_yy = phi.flat[iddn]-2*phi.flat[idx]+phi.flat[idup]
phi_xy = (-0.25*phi.flat[iddl]-0.25*phi.flat[idur]
+0.25*phi.flat[iddr]+0.25*phi.flat[idul])
phi_x2 = phi_x**2
phi_y2 = phi_y**2
#-- compute curvature (Kappa)
curvature = ( ((phi_x2*phi_yy + phi_y2*phi_xx - 2*phi_x*phi_y*phi_xy)
/ (phi_x2 + phi_y2 +eps)**(3/2))
*(phi_x2 + phi_y2)**(1/2))
return curvature
#-- level set re-initialization by the sussman method
def sussman(D, dt):
# forward/backward differences
a = D - shiftR(D) # backward
b = shiftL(D) - D # forward
c = D - shiftD(D) # backward
d = shiftU(D) - D # forward
a_p = a.copy(); a_n = a.copy(); # a+ and a-
b_p = b.copy(); b_n = b.copy();
c_p = c.copy(); c_n = c.copy();
d_p = d.copy(); d_n = d.copy();
a_p[a < 0] = 0
a_n[a > 0] = 0
b_p[b < 0] = 0
b_n[b > 0] = 0
c_p[c < 0] = 0
c_n[c > 0] = 0
d_p[d < 0] = 0
d_n[d > 0] = 0
dD = np.zeros(D.shape)
D_neg_ind = np.flatnonzero(D < 0)
D_pos_ind = np.flatnonzero(D > 0)
dD.flat[D_pos_ind] = np.sqrt( np.max( np.concatenate( ([a_p.flat[D_pos_ind]**2],
[b_n.flat[D_pos_ind]**2]) ),
axis=0
)
+ np.max( np.concatenate( ([c_p.flat[D_pos_ind]**2],
[d_n.flat[D_pos_ind]**2])),
axis=0
)
) - 1
dD.flat[D_neg_ind] = np.sqrt( np.max( np.concatenate( ([a_n.flat[D_neg_ind]**2],
[b_p.flat[D_neg_ind]**2])),
axis=0
)
+ np.max( np.concatenate( ([c_n.flat[D_neg_ind]**2],
[d_p.flat[D_neg_ind]**2]) ),
axis=0
)
) - 1
D = D - dt * sussman_sign(D) * dD
return D
#-- whole matrix derivatives
def shiftD(M):
return shiftR(M.transpose()).transpose()
def shiftL(M):
#shift = np.concatenate( (M[:,1:], np.zeros((M.shape[1],1))), axis=1 )
#shift = np.concatenate( (M[:,1:], M[:,-1]), axis=1 )
shift = M[:,range(1,M.shape[1])+[M.shape[1]-1]]
return shift
def shiftR(M):
#shift = np.concatenate( (np.zeros((M.shape[1],1)), M[:,:-1]), axis=1 )
#shift = np.concatenate( (M[:,0], M[:,:-1]), axis=1 )
shift = M[:,[0]+range(0,M.shape[1]-1)]
return shift
def shiftU(M):
return shiftL(M.transpose()).transpose()
def sussman_sign(D):
return D / np.sqrt(D**2 + 1)
# Convergence Test
def convergence(p_mask,n_mask,thresh,c):
diff = p_mask - n_mask
n_diff = np.sum(np.abs(diff))
if n_diff < thresh:
c = c + 1
else:
c = 0
return c
if __name__ == "__main__":
import cv2
img = cv2.imread("/home/kevin/Imperial/PhD/DATASETS/Training/positive/246_cropped_c/8.png_0022_0115_0117_0132_0132_0.png",0)
#img = nd.imread('sagittal8.png')
mask = np.zeros(img.shape)
mask[55:65,55:65] = 1
chanvese(img,mask,max_its=2000,display=True,alpha=0.1)
|
BioMedIA/irtk-legacy
|
wrapping/cython/irtk/ext/chanvese.py
|
Python
|
bsd-3-clause
| 9,929
| 0.022258
|
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
""" Test recognizers. """
import unittest
from storage_alerts.sources.generic.by_line.recognizers import LazyRecognizer
from storage_alerts.sources.generic.by_line.recognizers import ManyRecognizer
from storage_alerts.sources.generic.by_line.recognizers import NoRecognizer
from storage_alerts.sources.generic.by_line.recognizers import YesRecognizer
from storage_alerts.sources.generic.by_line.states import RecognizerStates
class YesRecognizerTestCase(unittest.TestCase):
""" Test the recognizer that says yes to any line. """
def testZero(self):
""" It always says no at start. """
rec = YesRecognizer()
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testOne(self):
""" It says yes whatever it reads. """
rec = YesRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
def testCopy(self):
""" Test that the copy does not behave like the original. """
rec = YesRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec2 = rec.initializeNew()
self.assertEqual(rec2.state, RecognizerStates.NO)
self.assertEqual(len(rec2.evidence), 0)
self.assertEqual(len(rec2.info), 0)
self.assertEqual(rec, rec2)
self.assertFalse(rec != YesRecognizer())
class MaybeYesRecognizerTestCase(unittest.TestCase):
""" Test the maybe yes recognizer. """
def testZero(self):
""" It always says no at start. """
rec = LazyRecognizer()
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testOne(self):
""" It says maybe whatever it reads. """
rec = LazyRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
def testCopy(self):
""" Test that copy really resets. """
rec = LazyRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_YES)
self.assertEqual(len(rec.evidence), 1)
self.assertEqual(len(rec.info), 0)
rec2 = rec.initializeNew()
self.assertEqual(rec2.state, RecognizerStates.NO)
self.assertEqual(rec2.evidence, [])
self.assertEqual(len(rec2.info), 0)
self.assertEqual(rec, rec2)
self.assertFalse(rec != LazyRecognizer())
class NoRecognizerTestCase(unittest.TestCase):
""" Test the recognizer that always says no. """
def testZero(self):
""" It always says no at start. """
rec = NoRecognizer()
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testOne(self):
""" It says no whatever it reads. """
rec = NoRecognizer()
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.NO)
self.assertEqual(rec.evidence, [])
self.assertEqual(len(rec.info), 0)
def testCopy(self):
""" Test copying. """
rec = NoRecognizer()
rec2 = rec.initializeNew()
self.assertFalse(rec is rec2)
self.assertEqual(rec, rec2)
self.assertFalse(rec != NoRecognizer())
class ManyRecognizerTestCase(unittest.TestCase):
""" Test the many recognizer. """
def testZero(self):
""" If zero are enough it should be in yes state already. """
rec = ManyRecognizer(0)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(rec.evidence, [])
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(rec.evidence, [])
def testOne(self):
""" Should behave just like the yes recognizer. """
rec = ManyRecognizer(1)
self.assertEqual(rec.state, RecognizerStates.NO)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
def testTwo(self):
""" If two are required it should pass through the maybe state. """
rec = ManyRecognizer(2)
self.assertEqual(rec.state, RecognizerStates.NO)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.MAYBE_NO)
self.assertEqual(len(rec.evidence), 1)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 2)
def testInfo(self):
""" The info is a bunch of key/value pairs. """
rec = ManyRecognizer(2)
info = rec.info
self.assertEqual(info['COUNT'], 0)
self.assertEqual(info['REQUIRED'], 2)
def testStr(self):
""" The description contains some relevant information. """
rec = ManyRecognizer(2)
self.assertIn(str(rec.NUMBER), str(rec))
def testCopy(self):
""" Test copying. """
rec = ManyRecognizer(1)
self.assertEqual(rec.state, RecognizerStates.NO)
rec.consume(None)
self.assertEqual(rec.state, RecognizerStates.YES)
self.assertEqual(len(rec.evidence), 1)
rec2 = rec.initializeNew()
self.assertEqual(rec2.state, RecognizerStates.NO)
self.assertEqual(rec, rec2)
self.assertNotEqual(rec, ManyRecognizer(2))
class HashTestCase(unittest.TestCase):
""" Test hashing properties. """
_RECS = [
LazyRecognizer(),
ManyRecognizer(2),
NoRecognizer(),
YesRecognizer()
]
def setUp(self):
self._recs = [r.initializeNew() for r in self._RECS]
def testEqualNew(self):
""" Test that newly initialized recognizers hash to same value. """
for r in self._RECS:
self.assertEqual(hash(r), hash(r.initializeNew()))
|
mulkieran/storage_alerts
|
tests/sources/generic/by_line/recognizers_test.py
|
Python
|
gpl-2.0
| 7,550
| 0.000662
|
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Bc Rx Recorded
# Generated: Sun Jun 8 13:46:34 2014
##################################################
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.wxgui import fftsink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class bc_rx_recorded(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Bc Rx Recorded")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 32000
##################################################
# Blocks
##################################################
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.GetWin(),
baseband_freq=0,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title="FFT Plot",
peak_hold=False,
)
self.Add(self.wxgui_fftsink2_0.win)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, "/home/sinu/sdr/fcd_capture_sample.raw", True)
self.audio_sink_0 = audio.sink(48000, "", True)
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=192000,
audio_decimation=4,
)
##################################################
# Connections
##################################################
self.connect((self.blocks_file_source_0, 0), (self.wxgui_fftsink2_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.analog_wfm_rcv_0, 0), (self.audio_sink_0, 0))
# QT sink close method reimplementation
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = bc_rx_recorded()
tb.Start(True)
tb.Wait()
|
riyas-org/sdr
|
bc_rx_recorded.py
|
Python
|
gpl-2.0
| 3,006
| 0.012641
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.