code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import numpy as np
import pygame
from sklearn.mixture import GMM
from math import sqrt, atan, pi
def emFit(results, numComponents):
if len(results) == 0:
return None
m =np.matrix(results)
gmm = GMM(numComponents,covariance_type='full', n_iter= 100, n_init = 4)
gmm.fit(results)
components = []
for componentID in xrange(numComponents):
mu = gmm.means_[componentID]
cov = gmm.covars_[componentID]
proba = gmm.weights_[componentID]
components.append((mu,cov,proba))
components = sorted(components,key=lambda x: x[0][0])
return components
def drawComponents(surface, windowSize, scaleFactor, components):
if components is None:
return
colors = [(255, 150, 150),(150, 150, 255),(150, 255, 150)]
for color,(mu,cov, proba) in zip(colors[:len(components)],components):
eigenvalues, eigenvectors = np.linalg.eig(cov)
major = 2.0 * sqrt(5.991 * eigenvalues.max())
minor = 2.0 * sqrt(5.991 * eigenvalues.min())
angle1 = atan(eigenvectors[1][0]/eigenvectors[0][0])
angle2 = atan(eigenvectors[1][1]/eigenvectors[0][1])
if eigenvalues[0] > eigenvalues[1]:
angle = angle1
else:
angle = angle2
mu_x,mu_y = mu
if major < 1.0 or minor < 1.0:
continue
s = pygame.Surface((major*scaleFactor[0], minor*scaleFactor[1]),pygame.SRCALPHA, 32)
ellipse = pygame.draw.ellipse(s, color, (0, 0, major*scaleFactor[0], minor*scaleFactor[0]))
s2 = pygame.transform.rotate(s, angle*360.0/(2.0*pi))
height, width = s2.get_rect().height,s2.get_rect().width
surface.blit(s2,(mu_x*scaleFactor[0]-width/2.0,mu_y*scaleFactor[1]-height/2.0))#(mu_x*scaleFactor[0]-height/2.0,mu_y*scaleFactor[1]-width/2.0))
#s = pygame.Surface((major*scaleFactor[0], minor*scaleFactor[1]))
#s.fill((255,255,255))
#s.set_alpha(128)
#ellipse = pygame.draw.ellipse(s, blue, (0, 0, major*scaleFactor[0], minor*scaleFactor[0]))
#s3 = pygame.transform.rotate(s, angle1*360.0/(2.0*pi))
#height, width = s3.get_rect().height,s3.get_rect().width
#surface.blit(s3,(mu_x*scaleFactor[0]-width/2.0,mu_y*scaleFactor[1]-height/2.0))#(mu_x*scaleFactor[0]-height/2.0,mu_y*scaleFactor[1]-width/2.0))
#surface.blit(s,(0,0))
#print angle*360.0/(2.0*pi)
|
sondree/Master-thesis
|
Python Simulator/simulator/FMM.py
|
Python
|
gpl-3.0
| 2,479
|
#!/usr/bin/env python
# coding: utf-8
class Frequent():
def __init__(self):
self.counters = {}
def add(self, item, k, k2, t):
if item in self.counters:
counters[item] = counters[item] + 1
elif len(self.counters) <= k:
self.counters[item] = 1
else:
for key, value in self.counters.copy().items():
if value > 1:
self.counters[key] = value - 1
else:
del self.counters[key]
return key
def returnItems(self):
return self.counters
|
markuskont/kirka
|
algorithms/Frequent.py
|
Python
|
gpl-3.0
| 610
|
import numpy
class RotationTranslation:
"""Combined translational and rotational transformation.
This is a subclass of Transformation.
Objects of this class are not created directly, but can be the
result of a composition of rotations and translations.
"""
def __init__(self, tensor, vector):
self.tensor = tensor.copy()
self.vector = vector.copy()
is_rotation_translation = 1
def __mul__(self, other):
if hasattr(other, 'is_rotation'):
return RotationTranslation(numpy.dot(self.tensor, other.tensor),
self.vector)
elif hasattr(other, 'is_translation'):
return RotationTranslation(self.tensor,
numpy.dot(self.tensor, other.vector)+self.vector)
elif hasattr(other, 'is_rotation_translation'):
return RotationTranslation(numpy.dot(self.tensor, other.tensor),
numpy.dot(self.tensor, other.vector)+self.vector)
else:
raise ValueError, 'incompatible object'
def __call__(self, vector):
return numpy.dot(self.tensor, vector) + self.vector
def inverse(self):
return RotationTranslation(numpy.transpose(self.tensor),
numpy.dot(numpy.transpose(self.tensor),
-self.vector))
|
lidaobing/itcc
|
itcc/ccs2/transformation.py
|
Python
|
gpl-3.0
| 1,262
|
""" Store only meta data but no real data (except from store state of nodes) """
import logging
import os
import pwd
import yaml
from pySPACE.resources.dataset_defs.base import BaseDataset
class DummyDataset(BaseDataset):
""" Class to store only meta data of collection
This class overrides the 'store' method
in a way that only the collection meta data files are stored.
This type is intended to be passed to pySPACE as a result
by the NilSinkNode.
**Parameters**
:dataset_md:
The meta data of the current dataset.
(*optional, default: None*)
:Author: David Feess (david.feess@dfki.de)
:Created: 2010/03/30
"""
def __init__(self, dataset_md = None):
super(DummyDataset, self).__init__(dataset_md = dataset_md)
def store(self, result_dir, s_format = "None"):
if not s_format == "None":
self._log("The format %s is not supported!"%s_format, level=logging.CRITICAL)
return
# Update the meta data
try:
author = pwd.getpwuid(os.getuid())[4]
except:
author = "unknown"
self._log("Author could not be resolved.",level=logging.WARNING)
self.update_meta_data({"type": "only output of individual nodes stored",
"storage_format": s_format,
"author" : author,
"data_pattern": "no data stored"})
# Store meta data
BaseDataset.store_meta_data(result_dir,self.meta_data)
|
pyspace/test
|
pySPACE/resources/dataset_defs/dummy.py
|
Python
|
gpl-3.0
| 1,581
|
"""
WSGI config for ncbi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ncbi.settings")
application = get_wsgi_application()
|
erickpeirson/django-ncbi
|
ncbi/ncbi/wsgi.py
|
Python
|
gpl-3.0
| 385
|
import logging
from unittest import TestCase
from sass_to_scss import SassToSCSS
class TestSassToSCSS(TestCase):
def setUp(self):
self.sassy = SassToSCSS(loglevel=logging.DEBUG)
def trim_docstring(self, docstring):
return '\n'.join(docstring.split('\n')[1:-1])
def test_double_spaces(self):
self.sassy.spaces = 2
sass = """
body
background: red
header
background: blue
"""
scss = """
body {
background: red;
header {
background: blue;
}
}
"""
actual = self.sassy.convert(self.trim_docstring(sass).split('\n'))
expected = self.trim_docstring(scss)
self.assertEquals(expected, actual)
def test_triple_nested_class(self):
sass = """
@import bourbon/bourbon
@import "http://fonts.googleapis.com/css?family=PT+Serif:400,700,400italic,700italic|Oswald:400,300,700|Droid+Sans:400,700"
$teal: #00917d
$serif: 'PT Serif', serif
=red-text
color: red
background-color: $teal
=rounded($amount, $background_color)
-moz-border-radius: $amount
-webkit-border-radius: $amount
border-radius: $amount
background-color: saturate(lighten($background_color, 30%), 100%)
.error
+red-text
.details
border: 3px solid #777
+rounded(0.5em, desaturate(#5336a2, 10%))
.container
position: absolute
top: 0
color: green
bottom: 0
.right, .row:hover, &.touch .row
right: 0
border: 1px solid #fff
h1
color: blue
.left
left: 0
h1
color: red
"""
scss = """
@import "bourbon/bourbon";
@import "http://fonts.googleapis.com/css?family=PT+Serif:400,700,400italic,700italic|Oswald:400,300,700|Droid+Sans:400,700";
$teal: #00917d;
$serif: 'PT Serif', serif;
@mixin red-text {
color: red;
background-color: $teal;
}
@mixin rounded($amount, $background_color) {
-moz-border-radius: $amount;
-webkit-border-radius: $amount;
border-radius: $amount;
background-color: saturate(lighten($background_color, 30%), 100%);
}
.error {
@include red-text;
.details {
border: 3px solid #777;
@include rounded(0.5em, desaturate(#5336a2, 10%));
}
}
.container {
position: absolute;
top: 0;
color: green;
bottom: 0;
.right, .row:hover, &.touch .row {
right: 0;
border: 1px solid #fff;
h1 {
color: blue;
}
}
.left {
left: 0;
h1 {
color: red;
}
}
}
"""
actual = self.sassy.convert(self.trim_docstring(sass).split('\n'))
expected = self.trim_docstring(scss)
self.assertEquals(expected, actual)
|
luqmaan/sass_to_scss
|
test_sass_to_scss.py
|
Python
|
gpl-3.0
| 2,703
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyright (c) 2008-2013 Nicolas Wack <wackou@smewt.com>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from smewt.base.utils import tolist, toresult
from smewt.base.textutils import u
from smewt.base.subtitletask import SubtitleTask
from smewt.plugins import mplayer
from guessit.language import Language
import smewt
import os, sys, time
import subprocess
import logging
log = logging.getLogger(__name__)
def get_episodes_and_subs(language, series, season=None):
if season:
episodes = set(ep for ep in tolist(series.episodes) if ep.season == int(season))
else:
episodes = set(tolist(series.episodes))
subs = []
for ep in episodes:
subs.extend(tolist(ep.get('subtitles')))
return episodes, subs
def get_subtitles(media_type, title, season=None, language=None):
db = smewt.SMEWTD_INSTANCE.database
language = language or db.config.get('subtitleLanguage') or 'en'
if media_type == 'episode':
series = db.find_one('Series', title=title)
episodes, subs = get_episodes_and_subs(language, series, season)
already_good = set(s.metadata for s in subs)
episodes = episodes - already_good
if episodes:
subtask = SubtitleTask(episodes, language)
smewt.SMEWTD_INSTANCE.taskManager.add(subtask)
return 'OK'
else:
msg = 'All episodes already have %s subtitles!' % Language(language).english_name
log.info(msg)
return msg
elif media_type == 'movie':
movie = db.find_one('Movie', title=title)
# check if we already have it
for sub in tolist(movie.get('subtitles')):
if sub.language == language:
msg = 'Movie already has a %s subtitle' % Language(language).english_name
log.info(msg)
return msg
subtask = SubtitleTask(movie, language)
smewt.SMEWTD_INSTANCE.taskManager.add(subtask)
return 'OK'
else:
msg = 'Don\'t know how to fetch subtitles for type: %s' % media_type
log.error(msg)
return msg
def _play(files, subs):
# launch external player
args = files
# make sure subs is as long as args so as to not cut it when zipping them together
subs = subs + [None] * (len(files) - len(subs))
if mplayer.variant != 'undefined':
# if we have mplayer (or one of its variant) installed, use it with
# subtitles support
opts = []
return mplayer.play(files, subs, opts)
elif sys.platform == 'linux2':
action = 'xdg-open'
# FIXME: xdg-open only accepts 1 argument, this will break movies split in multiple files...
args = args[:1]
# if we have smplayer installed, use it with subtitles support
if os.system('which smplayer') == 0:
action = 'smplayer'
args = [ '-fullscreen', '-close-at-end' ]
for video, subfile in zip(files, subs):
args.append(video)
if subfile:
args += [ '-sub', subfile ]
elif sys.platform == 'darwin':
action = 'open'
elif sys.platform == 'win32':
action = 'open'
log.info('launching %s with args = %s' % (action, str(args)))
subprocess.call([action]+args)
def play_video(metadata, sublang=None):
# FIXME: this should be handled properly with media player plugins
# files should be a list of (Metadata, sub), where sub is possibly None
# then we would look into the available graphs where such a Metadata has files,
# and choose the one on the fastest media (ie: local before nfs before tcp)
# it should also choose subtitles the same way, so we could even imagine reading
# the video from one location and the subs from another
# find list of all files to be played
# returns a list of (video_filename, sub_filename)
if sublang:
msg = 'Playing %s with %s subtitles' % (metadata, Language(sublang).english_name)
else:
msg = 'Playing %s with no subtitles' % metadata
log.info(u(msg))
# FIXME: we assume that sorting alphanumerically is good enough, but that is
# not necessarily the case...
# we should also look whether the file also has the 'cdNumber' attribute
files = tolist(metadata.get('files'))
files = sorted(files, key=lambda f: f.get('filename'))
if sublang is not None:
sublang = Language(sublang)
for sub in tolist(metadata.get('subtitles')):
if sub.language == sublang:
subs = sorted(tolist(sub.get('files')), key=lambda f: f.get('filename'))
break
else:
subs = [None]*len(files)
# update last viewed info
metadata.lastViewed = time.time()
metadata.watched = True
_play([ f.filename for f in files],
[ s.filename for s in subs if s ])
def play_file(filename):
_play([filename], [None])
|
wackou/smewt
|
smewt/actions.py
|
Python
|
gpl-3.0
| 5,613
|
# -*- coding: utf-8 -*-
# sync.py
# Copyright (C) 2017 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Synchronization between blobs client/server
"""
from collections import defaultdict
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.internet import error
from .sql import SyncStatus
from .errors import RetriableTransferError
logger = Logger()
def sleep(seconds):
d = defer.Deferred()
reactor.callLater(seconds, d.callback, None)
return d
MAX_WAIT = 60 # In seconds. Max time between retries
@defer.inlineCallbacks
def with_retry(func, *args, **kwargs):
"""
Run func repeatedly until success, as long as the exception raised is
a "retriable error". If an exception of another kind is raised by func,
the retrying stops and that exception is propagated up the stack.
"""
retry_wait = 1
retriable_errors = (error.ConnectError, error.ConnectionClosed,
RetriableTransferError,)
while True:
try:
yield func(*args, **kwargs)
break
except retriable_errors:
yield sleep(retry_wait)
retry_wait = min(retry_wait + 10, MAX_WAIT)
class BlobsSynchronizer(object):
def __init__(self):
self.locks = defaultdict(defer.DeferredLock)
@defer.inlineCallbacks
def refresh_sync_status_from_server(self, namespace=''):
d1 = self.remote_list(namespace=namespace)
d2 = self.local_list(namespace=namespace)
remote_list, local_list = yield defer.gatherResults([d1, d2])
pending_download_ids = tuple(set(remote_list) - set(local_list))
pending_upload_ids = tuple(set(local_list) - set(remote_list))
yield self.local.update_batch_sync_status(
pending_download_ids,
SyncStatus.PENDING_DOWNLOAD,
namespace=namespace)
yield self.local.update_batch_sync_status(
pending_upload_ids,
SyncStatus.PENDING_UPLOAD,
namespace=namespace)
@defer.inlineCallbacks
def _apply_deletions_from_server(self, namespace=''):
remote_deletions = self.remote_list(namespace=namespace, deleted=True)
remote_deletions = yield remote_deletions
yield self.local.batch_delete(remote_deletions)
yield self.local.update_batch_sync_status(
remote_deletions,
SyncStatus.SYNCED,
namespace=namespace)
def send_missing(self, namespace=''):
"""
Compare local and remote blobs and send what's missing in server.
:param namespace:
Optional parameter to restrict operation to a given namespace.
:type namespace: str
:return: A deferred that fires when all local blobs were sent to
server.
:rtype: twisted.internet.defer.Deferred
"""
lock = self.locks['send_missing']
d = lock.run(self._send_missing, namespace)
return d
@defer.inlineCallbacks
def _send_missing(self, namespace):
# the list of priorities must be refreshed every time a new blob will
# be transferred. To do that, we use a semaphore and get a new ordered
# list only when there are free slots for new transfers.
max_transfers = self.concurrent_transfers_limit
semaphore = defer.DeferredSemaphore(max_transfers)
scheduled = set()
while True:
d = semaphore.run(self._send_next, namespace, scheduled)
success = yield d
if not success:
break
@defer.inlineCallbacks
def _send_next(self, namespace, scheduled):
status = SyncStatus.PENDING_UPLOAD
pending = yield self.local_list_status(status, namespace)
pending = [x for x in pending if x not in scheduled]
logger.info("There are %d pending blob uploads." % len(pending))
if not pending:
# we are finished, indicate that to our caller
defer.returnValue(False)
blob_id = pending[0]
logger.info("Sending blob: %s" % (blob_id,))
yield with_retry(self._send, blob_id, namespace)
defer.returnValue(True)
def fetch_missing(self, namespace=''):
"""
Compare local and remote blobs and fetch what's missing in local
storage.
:param namespace:
Optional parameter to restrict operation to a given namespace.
:type namespace: str
:return: A deferred that fires when all remote blobs were received from
server.
:rtype: twisted.internet.defer.Deferred
"""
lock = self.locks['fetch_missing']
d = lock.run(self._fetch_missing, namespace)
return d
@defer.inlineCallbacks
def _fetch_missing(self, namespace=''):
# the list of priorities must be refreshed every time a new blob will
# be transferred. To do that, we use a semaphore and get a new ordered
# list only when there are free slots for new transfers.
max_transfers = self.concurrent_transfers_limit
semaphore = defer.DeferredSemaphore(max_transfers)
scheduled = set()
while True:
d = semaphore.run(self._fetch_next, namespace, scheduled)
success = yield d
if not success:
break
@defer.inlineCallbacks
def _fetch_next(self, namespace, scheduled):
status = SyncStatus.PENDING_DOWNLOAD
pending = yield self.local_list_status(status, namespace)
pending = [x for x in pending if x not in scheduled]
logger.info("There are %d pending blob downloads." % len(pending))
if not pending:
# we are finished, indicate that to our caller
defer.returnValue(False)
blob_id = pending[0]
logger.info("Fetching blob: %s" % (blob_id,))
yield with_retry(self._fetch, blob_id, namespace)
defer.returnValue(True)
@defer.inlineCallbacks
def sync(self, namespace=''):
try:
yield self._apply_deletions_from_server(namespace)
yield self.refresh_sync_status_from_server(namespace)
yield self.fetch_missing(namespace)
yield self.send_missing(namespace)
except defer.FirstError as e:
e.subFailure.raiseException()
@property
def sync_progress(self):
return self.local.get_sync_progress()
|
leapcode/soledad
|
src/leap/soledad/client/_db/blobs/sync.py
|
Python
|
gpl-3.0
| 7,065
|
import logging
import pickle
import random
from gevent import Greenlet, sleep
from threading import Lock
from app import create_app
from dota_bot import DotaBot
from models import db, DynamicConfiguration, Game, GameStatus, GameVIP
from helpers.general import divide_vip_list_per_type
# Log
logging.basicConfig(format='[%(asctime)s] %(levelname)s %(message)s', level=logging.INFO)
class Credential:
"""A Steam account credentials.
Attributes:
login: Steam user login.
password: Steam user password.
"""
def __init__(self, login, password):
"""Create a user credentials.
Args:
login: user login.
password: user password.
"""
self.login = login
self.password = password
class WorkerManager(Greenlet):
"""Master class starting Dota bots to process jobs.
The manager contains a initial pool of Steam Credentials.
It is a thread pooling jobs from the database, starting new Dota bots when a new job is available.
After a job process, the Dota bot informs that the credentials are available again.
Attributes:
app: The flask application the manager is linked to, containing configuration objects and database access.
working_bots: A dictionary of all currently working Dota bots, indexed by bot login.
"""
def __init__(self):
"""Initialize the worker manager thread."""
Greenlet.__init__(self)
# Initialize
self.app = create_app()
self.working_bots = {}
self.credentials = []
self.mutex = Lock()
# Parse credentials from config
bot_credentials_string = self.app.config['STEAM_BOTS']
bot_credentials = bot_credentials_string.split('@')
i = 0
while i < len(bot_credentials):
login = bot_credentials[i]
password = bot_credentials[i+1]
self.credentials.append(Credential(login, password))
i = i + 2
def _run(self):
"""Start the main loop of the thread, creating Dota bots to process available jobs."""
while True:
with self.app.app_context():
admins, casters = divide_vip_list_per_type(GameVIP.get_all_vips())
bot_pause = DynamicConfiguration.get('bot_pause', 'False')
for game in db.session().query(Game)\
.filter(Game.status==GameStatus.WAITING_FOR_BOT)\
.order_by(Game.id).all():
if len(self.credentials) == 0 or bot_pause == 'True':
continue
# Start a Dota bot to process the game
self.mutex.acquire()
credential = self.credentials.pop(random.randint(0, len(self.credentials) - 1))
g = DotaBot(self, credential, admins, casters, game.id, game.name, game.password,
game.team1, game.team2, game.team1_ids, game.team2_ids, game.team_choosing_first)
self.working_bots[credential.login] = g
game.status = GameStatus.CREATION_IN_PROGRESS
game.bot = credential.login
db.session().commit()
g.start()
self.mutex.release()
sleep(60)
def bot_end(self, credential):
"""Signal that a bot has finished it work and the credential is free to use again.
Args:
credential: `Credential` of the bot.
"""
self.mutex.acquire()
self.working_bots.pop(credential.login)
self.credentials.append(credential)
self.mutex.release()
# Start a Manager if this file is the main script.
if __name__ == '__main__':
g = WorkerManager()
g.start()
g.join()
|
FroggedTV/grenouilleAPI
|
backend/bot_app.py
|
Python
|
gpl-3.0
| 3,850
|
#/logics/zeit.py
#!/usr/bin/env python
sh_now = sh.now()
debug = False
# Funktionen
def leap_year(year):
if (year % 400 == 0) or ((year % 4 == 0) and not (year % 100 == 0)):
return True
else:
return False
def days_of_month(month, year):
if month in [1, 3, 5, 7, 8, 10, 12]:
days = 31
elif month in [4, 6, 9, 11]:
days = 30
elif leap_year(year):
days = 29
else:
days = 28
return days
def days_of_year(year):
period_end = datetime.datetime(year,12,31)
days_of_year = (period_end - datetime.datetime(period_end.year, 1, 1)).days + 1
return(days_of_year)
def day_of_year(year,month,day):
period_end = datetime.datetime(year,month,day)
day_of_year = (period_end - datetime.datetime(period_end.year, 1, 1)).days + 1
return(day_of_year)
if debug == True:
print("RUNNING LOGIC OF TIME - REMOVE AFTER DEBUG")
print(sh_now.hour) #Stunde
print(sh_now.minute) #Minute
print(sh_now.second) #Sekunde
print(sh_now.day) #Tag
print(sh_now.month) #Monat
print(sh_now.isoweekday()) #Wochentag
print(sh.now().isocalendar()[1]) #Kalenderwoche
# Sekunde/Minute
sh.second.since.minute(sh_now.second)
sh.second.until.minute(60 - sh_now.second - 1)
# Minute/Stunde
sh.minute.since.hour(sh_now.minute)
sh.minute.until.hour(60 - sh_now.minute - 1)
# Stunde/Tag
sh.hour.since.midnight(sh_now.hour)
sh.hour.until.midnight(24 - sh_now.hour - 1)
# Tag/Woche
sh.day.since.week(sh_now.isoweekday())
sh.day.until.week(7 - sh_now.isoweekday())
# Stunde/Woche
sh.hour.since.week(sh.hour.since.midnight() + (24 * (sh.day.since.week() - 1)))
sh.hour.until.week(sh.hour.until.midnight() + (24 * sh.day.until.week()))
# Kalenderwoche/Jahr
sh.week.since.year(sh.now().isocalendar()[1])
# Monat/Jahr
sh.month.since.year(sh_now.month)
sh.month.until.year(12-sh_now.month)
# Sekunde/Stunde
sh.second.since.hour(sh.second.since.minute() + (60 * sh.minute.since.hour()))
sh.second.until.hour(sh.second.until.minute() + (60 * sh.minute.until.hour()))
# Sekunde/Tag
sh.second.since.midnight(sh.second.since.minute() + (3600 * sh.hour.since.midnight()))
sh.second.until.midnight(sh.second.until.minute() + (3600 * sh.hour.until.midnight()))
# Minute/Tag
sh.minute.since.midnight(sh.minute.since.hour() + (60 * sh.hour.since.midnight()))
sh.minute.until.midnight(sh.minute.until.hour() + (60 * sh.hour.until.midnight()))
# Minute/Woche
sh.minute.since.week(sh.minute.since.hour() + (60 * sh.hour.since.week()))
sh.minute.until.week(sh.minute.until.hour() + (60 * sh.hour.until.week()))
# Sekunde/Woche
sh.second.since.week(sh.second.since.minute() + (60 * sh.minute.since.week()))
sh.second.until.week(sh.second.until.minute() + (60 * sh.minute.until.week()))
# Tage/Monat
sh.day.since.month(sh_now.day - 1)
sh.day.until.month(days_of_month(sh_now.month,sh_now.year) - sh.day.since.month() - 1)
# Tage/Jahr
sh.day.since.year(day_of_year(sh_now.year,sh_now.month,sh_now.day) - 1)
sh.day.until.year(days_of_year(sh_now.year) - sh.day.since.year() - 1)
# Stunde/Monat
sh.hour.since.month((24 * (sh.day.since.month() - 1)) + sh.hour.since.midnight())
sh.hour.until.month((24 * days_of_month(sh_now.month,sh_now.year)) - sh.hour.since.month() - 1)
# Stunde/Jahr
sh.hour.since.year((24 * (sh.day.since.year() -1)) + sh.hour.since.midnight())
sh.hour.until.year((24 * days_of_year(sh_now.year)) - sh.hour.since.year() - 1)
# Minute/Monat
sh.minute.since.month((60 * sh.hour.since.month()) + sh.minute.since.hour())
sh.minute.until.month(sh.minute.since.month() - (60 * sh.hour.until.month()) - 1)
# Minute/Jahr
sh.minute.since.year((60 * sh.hour.since.year()) + sh.minute.since.hour())
sh.minute.until.year((60 * sh.hour.until.year()) + sh.minute.until.hour())
# Sekunde/Monat
sh.second.since.month((60 * sh.minute.since.month()) + sh.second.since.minute())
sh.second.until.month((60 * sh.minute.until.month()) + sh.second.until.minute())
# Sekunde/Jahr
sh.second.since.year((60 * sh.minute.since.year()) + sh.second.since.minute())
sh.second.until.year((60 * sh.minute.until.year()) + sh.second.until.minute())
|
martinb07/mysmarthome
|
logics/zeit.py
|
Python
|
gpl-3.0
| 4,137
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
from autopilot.matchers import Eventually
from testtools.matchers import Equals
from bmicalc import tests
class MainViewTestCase(tests.BaseTestCase):
"""Tests for the mainview"""
def setUp(self):
super(MainViewTestCase, self).setUp()
def test_click_button(self):
# Find and click the button
button = self.app.main_view.get_button()
self.app.pointing_device.click_object(button)
# Make an assertion about what should happen
label = self.app.main_view.get_label()
self.assertThat(label.text, Eventually(Equals('..world!')))
|
avi-software/bmicalc
|
app/tests/autopilot/weightcalc/tests/test_main.py
|
Python
|
gpl-3.0
| 671
|
from __future__ import absolute_import, division, unicode_literals
import codecs
import re
from io import StringIO
from pip._vendor import webencodings
from pip._vendor.six import text_type, binary_type
from pip._vendor.six.moves import http_client, urllib
from . import _utils
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import ReparseException
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except: # pylint:disable=bare-except
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overriden, we've been overriden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
# pylint:disable=unused-argument
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for _ in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def lookupEncoding(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, binary_type):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None
|
mars-knowsnothing/amos-bot
|
src/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py
|
Python
|
gpl-3.0
| 32,530
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test copying a package category in the library editor
"""
def test(library_editor, helpers):
"""
Copy package category with "New Library Element" wizard
"""
le = library_editor
# Open "New Library Element" wizard
le.action('libraryEditorActionNewElement').trigger(blocking=False)
# Choose "Copy existing element"
le.widget('libraryEditorNewElementWizardChooseTypeCopyRadioButton').set_property('checked', True)
# Choose type of element
le.widget('libraryEditorNewElementWizardChooseTypePkgCatButton').click()
# Choose category
category_tree = le.widget('libraryEditorNewElementWizardCopyFromCategoriesTree')
helpers.wait_for_model_items_count(category_tree, 1)
category = category_tree.model().items().items[0]
category_tree.select_item(category)
le.widget('libraryEditorNewElementWizardNextButton').click()
# Check metadata
widget_properties = {
('NameEdit', 'text'): 'C-SMT',
('DescriptionEdit', 'plainText'): '',
('KeywordsEdit', 'text'): '',
('VersionEdit', 'text'): '0.1',
}
for (widget, property), value in widget_properties.items():
props = le.widget('libraryEditorNewElementWizardMetadata' + widget).properties()
assert props[property] == value
# Finish
dialog = le.widget('libraryEditorNewElementWizard')
le.widget('libraryEditorNewElementWizardFinishButton').click()
helpers.wait_until_widget_hidden(dialog)
# Check if a new tab is opened (indicates that the element was created)
tab_props = le.widget('libraryEditorStackedWidget').properties()
assert tab_props['count'] == 2
assert tab_props['currentIndex'] == 1
# Check metadata
assert le.widget('libraryEditorPkgCatNameEdit').properties()['text'] == 'C-SMT'
assert le.widget('libraryEditorPkgCatDescriptionEdit').properties()['plainText'] == ''
assert le.widget('libraryEditorPkgCatKeywordsEdit').properties()['text'] == ''
assert le.widget('libraryEditorPkgCatVersionEdit').properties()['text'] == '0.1'
|
rnestler/LibrePCB
|
tests/funq/libraryeditor/test_copy_package_category.py
|
Python
|
gpl-3.0
| 2,109
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8
"""
quartet_samping.py: Quartet Sampling method for
phylogenetic branch support evaluation
<http://www.github.com/FePhyFoFum/quartetsampling>
"""
import argparse
import os
import sys
import time
from multiprocessing import Manager, Pool
from shutil import copyfile
from tree_data import TreeData, write_test_trees
from rep_data import DataStore
from rep_data import process_replicate_raxml, process_replicate_raxml_lrt
from rep_data import process_replicate_raxmlng, process_replicate_raxmlng_lrt
from rep_data import process_replicate_iqtree, process_replicate_iqtree_lrt
from rep_data import process_replicate_paup
from rep_data import get_replicates_exhaustive, get_replicates_random
from rep_data import write_run_stats
from paramset import ParamSet, read_config
from alignment import Alignment
LICENSE = """from rep_data import
This file is part of 'quartetsampling'.
'quartetsampling' is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
'quartetsampling' is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with 'quartetsampling'. If not, see <http://www.gnu.org/licenses/>.
"""
def generate_argparser():
parser = argparse.ArgumentParser(
prog="quartet_sampling.py",
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=LICENSE)
parser.add_argument("--tree", type=open, nargs=1, required=True,
# Prev -t
help=("The input tree in Newick "
"(parenthetical) format."))
parser.add_argument("--align", "--alignment", type=open, nargs=1,
# Prev -a
required=True, dest="align",
help=("Alignment file in \"relaxed phylip\" format, "
"as used by RAxML."))
parser.add_argument("--reps", "--number-of-reps", type=int, nargs=1,
# Prev -N
required=True, default=100, dest="reps",
help=("The number of replicate quartet topology "
"searches to be performed at each node."))
parser.add_argument("--threads", "--number-of-threads", type=int, nargs=1,
# Prev -T
required=True, default=1, dest="threads",
help=("The number of parallel threads to be used "
"by Python for quartet topology searches."))
parser.add_argument("--lnlike", "--lnlike-thresh", type=float, nargs=1,
# Prev -L
default=2.0, dest="lnlike",
help=("The lnlike threshhold that is the minimum "
"value by which the log-likelihood value "
"of the best-likelihood tree must be "
"higher than the second-best-likelihood tree "
"for the replicate to register as the "
"best-likelihood topology rather than "
"'uncertain'. If set to zero, this turns off "
"likelihood evaluation mode and invokes tree "
"inference mode where a tree is simply inferred "
"from the alignment without considering "
"likelihood (QI values are N/A in this case)."))
parser.add_argument("--result-prefix", type=str, nargs=1,
# Prev -r
help="A prefix to put on the result files.")
parser.add_argument("--data-type", choices=('nuc', 'amino', 'cat'),
# Prev -d
default=["nuc"], nargs=1,
help=("(nuc)leotide, (amino) acid, "
"or (cat)egorical data"))
parser.add_argument("--min-overlap", type=int,
# Prev -O
help=("The minimum sites required to be sampled for "
"all taxa in a given quartet."))
parser.add_argument("--results-dir", type=os.path.abspath, nargs=1,
# Prev -o
help=("A directory to which output files will "
"be saved. If not supplied, the current working "
"directory will be used. (default is current "
"folder)."))
parser.add_argument("--verbout", action="store_true",
# Prev -V
help=("Provide output of the frequencies of each "
"topology and QC."))
parser.add_argument("--partitions", type=os.path.abspath, nargs=1,
# Prev -q
help=("Partitions file in RAxML format. If omitted "
"then the entire alignment will be treated "
"as one partition for all quartet replicate "
"topology searches."))
parser.add_argument("--genetrees", type=os.path.abspath, nargs=1,
# Prev -g
help=("Use partitions file (RAxML format) to divide "
"the alignment into separate gene tree regions. "
"Gene alignments will be sampled random for the "
"quartet topology searches."))
parser.add_argument("--temp-dir", type=os.path.abspath, nargs=1,
# Prev -e
help=("A directory to which temporary files will be "
"saved. If not supplied, 'QuartetSampling' "
"will be created in the current "
"working directory. "
"When specifying a custom temporary output "
"the characters 'QuartetSampling' must appear "
"in the directory name to prevent accidental "
"file deletion. (default='./QuartetSampling'"))
parser.add_argument("--retain-temp", action="store_true",
help=("Do not remove temporary files"))
parser.add_argument("--clade", type=str,
# Prev: -C
help=("Conduct analysis on specific clade identified "
"by CSV taxon list"))
parser.add_argument("--start-node-number", type=int, nargs=1,
# Prev -s
help=("An integer denoting the node to which to start "
"from. Nodes will be read from topologically "
"identical (and isomorphic!) input trees in "
"deterministic order, so this argument may be "
"used to restart at an intermediate position "
"(in case the previous run was canceled before "
"completion, for example)."))
parser.add_argument("--stop-node-number", type=int, nargs=1,
# Prev -p
help=("An integer denoting the node at which to stop. "
"Will include nodes with indices <= the stop "
"node number. This argument may be used to "
"limit the length of a given run in case only "
"a certain part of the tree is of interest. "
"Nodes will be read from topologically "
"identical (and isomorphic!) input trees "
"in deterministic order."))
parser.add_argument("--engine", nargs=1, default=('raxml-ng',),
choices=('raxml-ng', 'raxml', 'paup', 'iqtree'),
help=("Name of the program to use to infer trees or"
" evaluate tree model likelihoods."))
parser.add_argument("--engine-exec", nargs=1,
help=("Full file path of the tree inference or"
" likelihood evaluation engine."))
parser.add_argument("--engine-model", nargs=1,
help=("Advanced: specify a custom model name "
"for the tree engine"))
# parser.add_argument("--raxml-model", nargs=1,
# help=("Advanced: specify a custom RAxML model name "
# "for the raxml '-m' parameter"))
# parser.add_argument("-X", "--raxml-executable", nargs=1,
# help=("The name (or absolute path) of the raxml "
# "executable to be used for calculating "
# "likelihoods on quartet topologies."
# "(default='raxml')"))
# parser.add_argument("--raxml-model", nargs=1,
# help=("Advanced: specify a custom RAxML model name "
# "for the raxml '-m' parameter"))
# parser.add_argument("-P", "--paup", action="store_true",
# help="Use PAUP instead of RAxML.")
# parser.add_argument("--paup-executable", nargs=1, default=["paup"],
# help=("The name or path of the PAUP executable to "
# "be used for calculated quartets."))
parser.add_argument("--ignore-errors", action="store_true",
help=("Ignore RAxML and PAUP erroneous runs"))
parser.add_argument("--low-mem", action="store_true",
help=("Do not store large alignment in memory "
"for whole-alignment (non-genetree) mode"))
parser.add_argument('--max-random-sample-proportion', type=float,
help=("The proportion of possible replicates explored "
"unsuccessfully by the random generation "
"procedure before it gives up. Because this "
"generates random replicates, it takes "
"progressively longer as it proceeds. To avoid "
"long runtimes, the recommended range is < 0.5 "
"(which is the default)."))
parser.add_argument("--calc-qdstats", action="store_true",
help=("EXPERIMENTAL: Calculates Chi-square test "
"for QD tree frequencies. Use only "
" if Scipy is available. "
"Will increase running time."))
parser.add_argument("--verbose", action="store_true",
help="Provide more verbose output if specified.")
parser.add_argument('--version', action='version',
version='%(prog)s version 1.3.1.b')
return parser
def main(arguments=None):
"""Main method for quartet_sampling"""
if arguments is None:
if (len(sys.argv) == 2 and
sys.argv[1] not in ('-h', '--help', '--version')):
arguments = read_config(sys.argv[1])
print("Config file used.")
print("Executing with arguments: ", " ".join(arguments))
else:
arguments = sys.argv[1:]
parser = generate_argparser()
args = parser.parse_args(arguments)
treedata = TreeData(args)
params = ParamSet()
params.setup(args, treedata.nleaves)
if args.verbose:
print("-----------")
print("PARAMETERS:")
print(params)
print("-----------")
maindata = DataStore(params)
# shared object access for multithreading
manager = Manager()
lock = manager.RLock()
aln = Alignment(params)
if params['using_genetrees']:
aln.read_genes(args.align[0], params)
else:
aln.read_align(args.align[0], params)
params['min_overlap'] = aln.min_overlap
# k is the node counter
k = 1
# if we are starting at the beginning, initialize the results file
# (otherwise assume it's already there and don't overwrite it)
if not params['startk'] > k:
maindata.write_headers(params['score_result_file_path'])
maindata.write_headers(params['nodecounts_result_file_path'],
restype="nodecounts", delim='\t')
# process the nodes in the tree
params['starttime'] = time.time()
for fnode in treedata.tree.iternodes():
if params['verbose'] is True:
print("testing node", [x.label for x in fnode.leaves()])
if treedata.clade is not None:
if fnode is not treedata.clade:
continue
os.chdir(params['temp_wd'])
if k > params['stopk']:
print("Processed all nodes up to the stop node. Exiting...")
break
write_test_trees()
# skip tips and root
k, leafsets = treedata.check_node(fnode, k, params)
if leafsets is False:
if params['verbose'] is True:
print("skipping node...")
continue
# Begin multiprocessing queue
results_queue = manager.Queue()
n_completed = manager.Value("i", 0, "lock")
# Establish replicates
n_possible_replicates = 1
for leafset in leafsets.values():
n_possible_replicates *= len(leafset)
if params['using_genetrees']:
n_possible_replicates *= len(aln.seqs)
if params['verbose'] is True:
print('number of possible gene-quartet combos: {}'.format(
n_possible_replicates))
elif params['verbose'] is True:
print('number of possible quartets: {}'.format(
n_possible_replicates))
if (n_possible_replicates *
params['max_quartet_enumeration_threshold'] < params['nreps']):
if params['verbose'] is True:
print('Number of possible quartets is close enough to the '
'total number to be sampled, so will generate all '
'and do a random draw')
replicates, repstats = get_replicates_exhaustive(
n_completed, results_queue, leafsets,
params, aln, fnode, lock)
else:
if params['verbose']:
print('Generating random quartets...')
replicates, repstats = get_replicates_random(
n_completed, results_queue, leafsets,
params, aln, fnode, lock)
nreplicates = len(replicates)
if nreplicates < 1: # no suitable replicates
maindata.process_empty_rep_results(fnode, params, nreplicates)
else:
# copy original partitions file, should not change throughout run
if params['partitions_file_path'] is not None:
copyfile(params['partitions_file_path'], "temp_parts")
# run the raxml calls in parallel
# now designate multiprocessing resource pool.
# important to do outside node loop. garbage collecting does not
# apply to threads! set maxtasksperchild to release mem and files
pool = Pool(params['nprocs'], maxtasksperchild=1)
# PAUP Case
if params['engine'] == 'paup':
pool.map(process_replicate_paup, replicates)
# IQ-TREE with likelihood threshold
elif params['lnlikethresh'] > 0 and params['engine'] == 'iqtree':
pool.map(process_replicate_iqtree_lrt, replicates)
# IQ-TREE with likelihood threshold
elif params['engine'] == 'iqtree':
pool.map(process_replicate_iqtree, replicates)
# RAxML Classic with likelihood threshold
elif params['lnlikethresh'] > 0 and params['engine'] == 'raxml':
pool.map(process_replicate_raxml_lrt, replicates)
# RAxML Classic without likelihood threshold
elif params['engine'] == 'raxml':
pool.map(process_replicate_raxml, replicates)
# RAxML-ng with likelihood threshold
elif params['lnlikethresh'] > 0:
pool.map(process_replicate_raxmlng_lrt, replicates)
# RAxML-ng without likelihood threshold
else:
pool.map(process_replicate_raxmlng, replicates)
pool.close()
pool.join()
del pool
# print("")
# now process the results. first open a file to hold topologies
# sending params['just_clade'] = True will give back detailed
# name results
maindata.process_rep_results(fnode, results_queue, params,
nreplicates) # , leafsets)
# clean up
del results_queue
del n_completed
# break # Left in place for troubleshooting
if params['retain_temp'] is False:
for the_file in os.listdir(params['temp_wd']):
file_path = os.path.join(params['temp_wd'], the_file)
try:
if os.path.isfile(file_path):
if "QuartetSampling" not in file_path:
print(file_path,
" does not contain 'QuartetSampling' "
"and will not be deleted for safety")
else:
os.remove(file_path)
except FileNotFoundError as exc:
print(file_path, " not found")
if 'QuartetSampling' in params['temp_wd']:
os.rmdir(params['temp_wd'])
qf_scores = maindata.write_qf_scores(params["score_result_file_path"])
treedata.write_figtree(params['figtree_file_path'], qf_scores)
treedata.write_scoretrees(params)
write_run_stats(repstats, params)
print(("\ndone.\nscores written to: {}\nlabeled "
"tree written to: {}\ntotal time {:.2f} hours").format(
params['score_result_file_path'],
params['tree_result_file_path'],
(time.time() - params['starttime']) / 3600))
return ''
if __name__ == "__main__":
main()
|
FePhyFoFum/quartetsampling
|
pysrc/quartet_sampling.py
|
Python
|
gpl-3.0
| 18,821
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import time
from operator import itemgetter
from sqlalchemy.sql import func, select
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.modules.groups import GroupProxy
from indico_migrate.logger import logger_proxy
from indico_migrate.util import convert_to_unicode
class Importer(object):
step_name = ''
#: Specify plugins that need to be loaded for the import (e.g. to access its .settings property)
plugins = frozenset()
print_info = logger_proxy('info')
print_success = logger_proxy('success')
print_warning = logger_proxy('warning')
print_error = logger_proxy('error')
print_log = logger_proxy('log')
def __init__(self, logger, app, sqlalchemy_uri, zodb_root, verbose, dblog, default_group_provider, tz, **kwargs):
self.sqlalchemy_uri = sqlalchemy_uri
self.quiet = not verbose
self.dblog = dblog
self.zodb_root = zodb_root
self.app = app
self.tz = tz
self.default_group_provider = default_group_provider
self.logger = logger
self.initialize_global_ns(Importer._global_ns)
def initialize_global_ns(self, g):
pass
@property
def log_prefix(self):
return '%[cyan]{:<14}%[reset]'.format('[%[grey!]{}%[cyan]]'.format(self.step_name))
@property
def makac_info(self):
return self.zodb_root['MaKaCInfo']['main']
@property
def global_ns(self):
return Importer._global_ns
def __repr__(self):
return '<{}({})>'.format(type(self).__name__, self.sqlalchemy_uri)
def flushing_iterator(self, iterable, n=5000):
"""Iterates over `iterable` and flushes the ZODB cache every `n` items.
:param iterable: an iterable object
:param n: number of items to flush after
"""
conn = self.zodb_root._p_jar
for i, item in enumerate(iterable, 1):
yield item
if i % n == 0:
conn.sync()
def convert_principal(self, old_principal):
"""Converts a legacy principal to PrincipalMixin style"""
if old_principal.__class__.__name__ == 'Avatar':
principal = self.global_ns.avatar_merged_user.get(old_principal.id)
if not principal and 'email' in old_principal.__dict__:
email = convert_to_unicode(old_principal.__dict__['email']).lower()
principal = self.global_ns.users_by_primary_email.get(
email, self.global_ns.users_by_secondary_email.get(email))
if principal is not None:
self.print_warning('Using {} for {} (matched via {})'.format(principal, old_principal, email))
if not principal:
self.print_error("User {} doesn't exist".format(old_principal.id))
return principal
elif old_principal.__class__.__name__ == 'Group':
assert int(old_principal.id) in self.global_ns.all_groups
return GroupProxy(int(old_principal.id))
elif old_principal.__class__.__name__ in {'CERNGroup', 'LDAPGroup', 'NiceGroup'}:
return GroupProxy(old_principal.id, self.default_group_provider)
def convert_principal_list(self, opt):
"""Convert ACL principals to new objects"""
return set(filter(None, (self.convert_principal(principal) for principal in opt._PluginOption__value)))
def fix_sequences(self, schema=None, tables=None):
for name, cls in sorted(db.Model._decl_class_registry.iteritems(), key=itemgetter(0)):
table = getattr(cls, '__table__', None)
if table is None:
continue
elif schema is not None and table.schema != schema:
continue
elif tables is not None and cls.__tablename__ not in tables:
continue
# Check if we have a single autoincrementing primary key
candidates = [col for col in table.c if col.autoincrement and col.primary_key]
if len(candidates) != 1 or not isinstance(candidates[0].type, db.Integer):
continue
serial_col = candidates[0]
sequence_name = '{}.{}_{}_seq'.format(table.schema, cls.__tablename__, serial_col.name)
query = select([func.setval(sequence_name, func.max(serial_col) + 1)], table)
db.session.execute(query)
db.session.commit()
def protection_from_ac(self, target, ac, acl_attr='acl', ac_attr='allowed', allow_public=False):
"""Convert AccessController data to ProtectionMixin style.
This needs to run inside the context of `patch_default_group_provider`.
:param target: The new object that uses ProtectionMixin
:param ac: The old AccessController
:param acl_attr: The attribute name for the acl of `target`
:param ac_attr: The attribute name for the acl in `ac`
:param allow_public: If the object allows `ProtectionMode.public`.
Otherwise, public is converted to inheriting.
"""
if ac._accessProtection == -1:
target.protection_mode = ProtectionMode.public if allow_public else ProtectionMode.inheriting
elif ac._accessProtection == 0:
target.protection_mode = ProtectionMode.inheriting
elif ac._accessProtection == 1:
target.protection_mode = ProtectionMode.protected
acl = getattr(target, acl_attr)
for principal in getattr(ac, ac_attr):
principal = self.convert_principal(principal)
assert principal is not None
acl.add(principal)
else:
raise ValueError('Unexpected protection: {}'.format(ac._accessProtection))
class TopLevelMigrationStep(Importer):
def run(self):
start = time.time()
self.pre_migrate()
try:
self.migrate()
finally:
self.post_migrate()
self.print_log('%[cyan]{:.06f} seconds%[reset]\a'.format((time.time() - start)))
def pre_migrate(self):
pass
def migrate(self):
raise NotImplementedError
def post_migrate(self):
pass
|
indico/indico-migrate
|
indico_migrate/importer.py
|
Python
|
gpl-3.0
| 6,978
|
"""This module provides REST services for Layers"""
import cherrypy
from LmCommon.common.lmconstants import HTTPStatus
from LmWebServer.common.lmconstants import HTTPMethod
from LmWebServer.services.api.v2.base import LmService
from LmWebServer.services.common.access_control import check_user_permission
from LmWebServer.services.cp_tools.lm_format import lm_formatter
# .............................................................................
@cherrypy.expose
@cherrypy.popargs('path_layer_id')
class LayerService(LmService):
"""Class for layers service.
"""
# ................................
@lm_formatter
def GET(self, path_layer_id=None, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, epsg_code=None, env_code=None,
env_type_id=None, gcm_code=None, layerType=None, limit=100,
offset=0, url_user=None, scenario_id=None, squid=None, **params):
"""GET request. Individual layer, count, or list.
"""
# Layer type:
# 0 - Anything
# 1 - Environmental layer
# 2 - ? (Not implemented yet)
if layerType is None or layerType == 0:
if path_layer_id is None:
return self._list_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
before_time=before_time, epsg_code=epsg_code, limit=limit,
offset=offset, squid=squid)
if path_layer_id.lower() == 'count':
return self._count_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
before_time=before_time, epsg_code=epsg_code, squid=squid)
return self._get_layer(path_layer_id, env_layer=False)
if path_layer_id is None:
return self._list_env_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
alt_pred_code=alt_pred_code, before_time=before_time,
date_code=date_code, env_code=env_code,
env_type_id=env_type_id, epsg_code=epsg_code,
gcm_code=gcm_code, limit=limit, offset=offset,
scenario_id=scenario_id)
if path_layer_id.lower() == 'count':
return self._count_env_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
alt_pred_code=alt_pred_code, before_time=before_time,
date_code=date_code, env_code=env_code,
env_type_id=env_type_id, epsg_code=epsg_code,
gcm_code=gcm_code, scenario_code=scenario_id)
return self._get_layer(path_layer_id, env_layer=True)
# ................................
def _count_env_layers(self, user_id, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, env_code=None,
env_type_id=None, epsg_code=None, gcm_code=None,
scenario_code=None):
"""Count environmental layer objects matching the specified criteria
Args:
user_id: The user to list environmental layers for. Note that this
may not be the same user logged into the system
after_time: Return layers modified after this time (Modified Julian
Day)
alt_pred_code: Return layers with this alternate prediction code
before_time: Return layers modified before this time (Modified
Julian Day)
date_code: Return layers with this date code
env_code: Return layers with this environment code
env_type_id: Return layers with this environmental type
epsg_code: Return layers with this EPSG code
gcm_code: Return layers with this GCM code
scenario_id: Return layers from this scenario
"""
layer_count = self.scribe.count_env_layers(
user_id=user_id, env_code=env_code, gcm_code=gcm_code,
alt_pred_code=alt_pred_code, date_code=date_code,
after_time=after_time, before_time=before_time, epsg=epsg_code,
env_type_id=env_type_id, scenario_code=scenario_code)
return {'count': layer_count}
# ................................
def _count_layers(self, user_id, after_time=None, before_time=None,
epsg_code=None, squid=None):
"""Return a count of layers matching the specified criteria
Args:
user_id: The user to list layers for. Note that this may not be
the same user that is logged into the system
after_time: List layers modified after this time (Modified Julian
Day)
before_time: List layers modified before this time (Modified Julian
Day)
epsg_code: Return layers that have this EPSG code
limit: Return this number of layers, at most
offset: Offset the returned layers by this number
squid: Return layers with this species identifier
"""
layer_count = self.scribe.count_layers(
user_id=user_id, squid=squid, after_time=after_time,
before_time=before_time, epsg=epsg_code)
return {'count': layer_count}
# ................................
def _get_layer(self, path_layer_id, env_layer=False):
"""Attempt to get a layer
"""
try:
_ = int(path_layer_id)
except ValueError:
raise cherrypy.HTTPError(
HTTPStatus.BAD_REQUEST,
'{} is not a valid layer ID'.format(path_layer_id))
if env_layer:
lyr = self.scribe.get_env_layer(lyr_id=path_layer_id)
else:
lyr = self.scribe.get_layer(lyr_id=path_layer_id)
if lyr is None:
raise cherrypy.HTTPError(
HTTPStatus.NOT_FOUND,
'Environmental layer {} was not found'.format(path_layer_id))
if check_user_permission(self.get_user_id(), lyr, HTTPMethod.GET):
return lyr
raise cherrypy.HTTPError(
HTTPStatus.FORBIDDEN,
'User {} does not have permission to access layer {}'.format(
self.get_user_id(), path_layer_id))
# ................................
def _list_env_layers(self, user_id, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, env_code=None,
env_type_id=None, epsg_code=None, gcm_code=None,
limit=100, offset=0, scenario_id=None):
"""List environmental layer objects matching the specified criteria
Args:
user_id: The user to list environmental layers for. Note that this
may not be the same user logged into the system
after_time: (optional) Return layers modified after this time
(Modified Julian Day)
alt_pred_code: (optional) Return layers with this alternate
prediction code
before_time: (optional) Return layers modified before this time
(Modified Julian Day)
date_code: (optional) Return layers with this date code
env_code: (optional) Return layers with this environment code
env_type_id: (optional) Return layers with this environmental type
epsg_code: (optional) Return layers with this EPSG code
gcm_code: (optional) Return layers with this GCM code
limit: (optional) Return this number of layers, at most
offset: (optional) Offset the returned layers by this number
scenario_id: (optional) Return layers from this scenario
"""
lyr_atoms = self.scribe.list_env_layers(
offset, limit, user_id=user_id, env_code=env_code,
gcm_code=gcm_code, alt_pred_code=alt_pred_code,
date_code=date_code, after_time=after_time,
before_time=before_time, epsg=epsg_code, env_type_id=env_type_id)
return lyr_atoms
# ................................
def _list_layers(self, user_id, after_time=None, before_time=None,
epsg_code=None, limit=100, offset=0, squid=None):
"""Return a list of layers matching the specified criteria
Args:
user_id: The user to list layers for. Note that this may not be
the same user that is logged into the system
after_time: List layers modified after this time (Modified Julian
Day)
before_time: List layers modified before this time (Modified Julian
Day)
epsg_code: Return layers that have this EPSG code
limit: Return this number of layers, at most
offset: Offset the returned layers by this number
squid: Return layers with this species identifier
"""
layer_atoms = self.scribe.list_layers(
offset, limit, user_id=user_id, squid=squid, after_time=after_time,
before_time=before_time, epsg=epsg_code)
return layer_atoms
|
lifemapper/core
|
LmWebServer/services/api/v2/layer.py
|
Python
|
gpl-3.0
| 9,222
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import sys
sys.path.append('./MNIST_data')
import os.path
from download import download
have_data = os.path.exists('MNIST_data/train-images-idx3-ubyte.gz')
if not have_data:
download('./MNIST_data')
# load data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# batch
batch_size = 64
n_batch = mnist.train.num_examples // batch_size
# in [60000, 28 * 28] out [60000, 10]
x = tf.placeholder(tf.float32, [None,784])
y = tf.placeholder(tf.float32, [None,10])
keep_prob = tf.placeholder(tf.float32)
# 神经网络结构 784-1000-500-10
w1 = tf.Variable(tf.truncated_normal([784,1000], stddev=0.1))
b1 = tf.Variable(tf.zeros([1000]) + 0.1)
l1 = tf.nn.tanh(tf.matmul(x, w1) + b1)
l1_drop = tf.nn.dropout(l1, keep_prob)
w2 = tf.Variable(tf.truncated_normal([1000, 500], stddev=0.1))
b2 = tf.Variable(tf.zeros([500]) + 0.1)
l2 = tf.nn.tanh(tf.matmul(l1_drop, w2) + b2)
l2_drop = tf.nn.dropout(l2, keep_prob)
w3 = tf.Variable(tf.truncated_normal([500, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]) + 0.1)
prediction = tf.nn.softmax(tf.matmul(l2_drop, w3) + b3)
# 二次代价函数 - 回归问题
# loss = tf.losses.mean_squared_error(y, prediction)
# 交叉墒-分类问题
loss = tf.losses.softmax_cross_entropy(y, prediction)
# 梯度下降法优化器
train = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# save result to a bool array
# 1000 0000 00 -> 0
# 0100 0000 00 -> 1
# ...
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
# correct rate, bool -> float ->mean
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
# init variable
sess.run(tf.global_variables_initializer())
for epoch in range(10):
for batch in range(n_batch):
# get a batch data and label
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(train, feed_dict={x:batch_x, y:batch_y, keep_prob:0.5})
acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})
train_acc = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels, keep_prob:1.0})
print("Iter " + str(epoch + 1) + ", Testing Accuracy " + str(acc) + ", Training Accuracy " + str(train_acc))
|
shucommon/little-routine
|
python/AI/tensorflow/dropout.py
|
Python
|
gpl-3.0
| 2,353
|
import logging
from time import sleep
from ethereum import keys
from golem.ethereum import Client
from golem.ethereum.paymentprocessor import PaymentProcessor
from golem.transactions.transactionsystem import TransactionSystem
log = logging.getLogger('golem.pay')
class EthereumTransactionSystem(TransactionSystem):
""" Transaction system connected with Ethereum """
def __init__(self, datadir, node_priv_key):
""" Create new transaction system instance for node with given id
:param node_priv_key str: node's private key for Ethereum account (32b)
"""
super(EthereumTransactionSystem, self).__init__()
# FIXME: Passing private key all around might be a security issue.
# Proper account managment is needed.
if not isinstance(node_priv_key, basestring)\
or len(node_priv_key) != 32:
raise ValueError("Invalid private key: {}".format(node_priv_key))
self.__node_address = keys.privtoaddr(node_priv_key)
log.info("Node Ethereum address: " + self.get_payment_address())
self.__eth_node = Client(datadir)
self.__proc = PaymentProcessor(self.__eth_node, node_priv_key,
faucet=True)
self.__proc.start()
def stop(self):
if self.__proc.running:
self.__proc.stop()
if self.__eth_node.node is not None:
self.__eth_node.node.stop()
def add_payment_info(self, *args, **kwargs):
payment = super(EthereumTransactionSystem, self).add_payment_info(
*args,
**kwargs
)
self.__proc.add(payment)
return payment
def get_payment_address(self):
""" Human readable Ethereum address for incoming payments."""
return '0x' + self.__node_address.encode('hex')
def get_balance(self):
if not self.__proc.balance_known():
return None, None, None
gnt = self.__proc.gnt_balance()
av_gnt = self.__proc._gnt_available()
eth = self.__proc.eth_balance()
return gnt, av_gnt, eth
def pay_for_task(self, task_id, payments):
""" Pay for task using Ethereum connector
:param task_id: pay for task with given id
:param dict payments: all payments group by ethereum address
"""
pass
def sync(self):
syncing = True
while syncing:
try:
syncing = self.__eth_node.is_syncing()
except Exception as e:
log.error("IPC error: {}".format(e))
syncing = False
else:
sleep(0.5)
|
scorpilix/Golemtest
|
golem/transactions/ethereum/ethereumtransactionsystem.py
|
Python
|
gpl-3.0
| 2,734
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2, re, json, time, xbmc, traceback
from _header import *
BASE_URL = 'http://cinemaonline.kg/'
BASE_NAME = 'Cinema Online'
BASE_LABEL = 'oc'
GA_CODE = 'UA-34889597-1'
NK_CODE = '1744'
def default_oc_noty():
plugin.notify('Сервер недоступен', BASE_NAME, image=get_local_icon('noty_' + BASE_LABEL))
def get_oc_cookie():
result = {'phpsessid': '', 'utmp': '', 'set': ''}
cookie = plugin.get_storage(BASE_LABEL, TTL=1440)
try:
result['phpsessid'] = cookie['phpsessid']
result['utmp'] = cookie['utmp']
result['set'] = cookie['set']
except:
try:
a = common.fetchPage({'link': BASE_URL})
b = common.fetchPage({'link': BASE_URL + 'cinema.png?' + str(int(time.time()))})
cookie['set'] = a['header']['Set-Cookie'] + '; ' + b['header']['Set-Cookie']
result['set'] = cookie['set']
cookies = common.getCookieInfoAsHTML()
cookie['phpsessid'] = common.parseDOM(cookies, 'cookie', attrs={'name': 'PHPSESSID'}, ret='value')[0]
try:
cookie['utmp'] = common.parseDOM(cookies, 'cookie', attrs={'name': '_utmp'}, ret='value')[0]
except:
cookie['utmp'] = common.parseDOM(cookies, 'cookie', attrs={'name': '__utmp'}, ret='value')[0]
result['phpsessid'] = cookie['phpsessid']
result['utmp'] = cookie['utmp']
except:
pass
return result
COOKIE = '' # get_oc_cookie()
BASE_API_URL = BASE_URL + 'api.php?format=json' # &' + COOKIE['phpsessid'] + '&JsHttpRequest='+str(int(time.time()))+'-xml'
@plugin.route('/site/' + BASE_LABEL)
def oc_index():
items = [{
'label': set_color('[ Поиск ]', 'dialog', True),
'path': plugin.url_for('oc_search'),
'icon': get_local_icon('find')
}, {
'label': set_color('Новинки на CinemaOnline', 'light'),
'path': plugin.url_for('oc_category', id=0)
}, {
'label': set_color('По жанрам', 'bold'),
'path': plugin.url_for('oc_genres')
}, {
'label': 'Бестселлеры',
'path': plugin.url_for('oc_bestsellers')
}, {
'label': 'Лучшие по версии IMDB',
'path': plugin.url_for('oc_category', id=2)
}, {
'label': 'Лучшие по версии КиноПоиск',
'path': plugin.url_for('oc_category', id=9)
}]
return items
@plugin.route('/site/' + BASE_LABEL + '/genre')
def oc_genres():
item_list = get_genres()
items = [{
'label': item['label'],
'path': plugin.url_for('oc_genre', id=item['id'])
} for item in item_list]
return items
@plugin.route('/site/' + BASE_LABEL + '/bestsellers')
def oc_bestsellers():
item_list = get_bestsellers()
items = [{
'label': item['label'],
'path': plugin.url_for('oc_movie', id=item['id']),
'icon': item['icon'],
} for item in item_list]
return items
@plugin.route('/site/' + BASE_LABEL + '/genre/<id>')
def oc_genre(id):
item_list = get_genre_movie_list(id)
items = [{
'label': item['label'],
'path': plugin.url_for('oc_movie', id=item['id']),
'properties': item['properties'],
'icon': item['icon'],
} for item in item_list['items']]
if (item_list['sys_items']):
items = add_pagination(items, item_list['sys_items'], 'oc_genre_pagination', id)
return items
@plugin.route('/site/' + BASE_LABEL + '/genre/<id>/<page>')
def oc_genre_pagination(id, page='1'):
page = int(page)
item_list = get_genre_movie_list(id, page)
items = [{
'label': item['label'],
'path': plugin.url_for('oc_movie', id=item['id']),
'properties': item['properties'],
'icon': item['icon'],
} for item in item_list['items']]
if (item_list['sys_items']):
items = add_pagination(items, item_list['sys_items'], 'oc_genre_pagination', id)
return plugin.finish(items, update_listing=True)
@plugin.route('/site/' + BASE_LABEL + '/category/<id>')
def oc_category(id):
item_list = get_movie_list(id)
items = [{
'label': item['label'],
'path': plugin.url_for('oc_movie', id=item['id']),
'properties': item['properties'],
'icon': item['icon'],
} for item in item_list['items']]
if (item_list['sys_items']):
items = add_pagination(items, item_list['sys_items'], 'oc_category_pagination', id)
return items
@plugin.route('/site/' + BASE_LABEL + '/category/<id>/<page>')
def oc_category_pagination(id, page='1'):
page = int(page)
item_list = get_movie_list(id, page)
items = [{
'label': item['label'],
'path': plugin.url_for('oc_movie', id=item['id']),
'properties': item['properties'],
'icon': item['icon'],
} for item in item_list['items']]
if (item_list['sys_items']):
items = add_pagination(items, item_list['sys_items'], 'oc_category_pagination', id)
return plugin.finish(items, update_listing=True)
@plugin.route('/site/' + BASE_LABEL + '/to_page/category/<id>/<page>')
def oc_go_to_page(id, page=1):
search_page = common.getUserInputNumbers('Укажите страницу')
if (search_page):
search_page = int(search_page) - 1 if (int(search_page) > 0) else 1
item_list = get_movie_list(id, search_page)
items = [{
'label': item['label'],
'path': plugin.url_for('oc_movie', id=item['id']),
'properties': item['properties'],
'icon': item['icon'],
} for item in item_list['items']]
if (item_list['sys_items']):
for item in item_list['sys_items']:
items.insert(0, {
'label': item['label'],
'path': plugin.url_for('oc_go_to_page', id=id, page=item['page']) if (
item['search'] == True ) else plugin.url_for('oc_category_pagination', id=id,
page=item['page']),
'icon': item['icon']
})
return plugin.finish(items, update_listing=True)
else:
plugin.redirect('plugin://' + plugin.id + '/site/' + BASE_LABEL + '/category/' + id + '/' + str(int(page) - 1))
@plugin.route('/site/' + BASE_LABEL + '/movie/<id>')
def oc_movie(id):
item_list = get_movie(id)
# xbmc.log('Item list: ' + str(item_list))
items = [{
# 'title' : item['label'],
'label': item['label'],
'path': item['url'],
'thumbnail': item['icon'],
'properties': item['properties'],
'is_playable': True
} for item in item_list['items']]
if (item_list['playlist']):
# xbmc.log('Item list play: ' + str(item_list['items']))
kgontv_playlist(item_list['items'])
xbmc.executebuiltin('ActivateWindow(VideoPlaylist)')
else:
# xbmc.log('Item play: ' + str(items))
return items
@plugin.route('/site/' + BASE_LABEL + '/search')
def oc_search():
search_val = plugin.keyboard('', 'Что ищете?')
if (search_val):
item_list = get_search_results(str(search_val))
items = [{
'label': item['label'],
'path': plugin.url_for('oc_movie', id=item['id']),
'icon': item['icon'],
} for item in item_list]
return items
else:
plugin.redirect('plugin://' + plugin.id + '/site/' + BASE_LABEL)
# method
def get_bestsellers():
items = []
try:
result = common.fetchPage({'link': BASE_API_URL, 'post_data': {'action[0]': 'Video.getBestsellers'}})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
html = result['content']
data = json.loads(html)
for item in data['json'][0]['response']['bestsellers']:
for video in item['movies']:
label = video['name'] + ' [' + item['name'] + ']'
icon = BASE_URL + video['cover']
video_id = video['movie_id']
items.append({
'label': label,
'icon': icon,
'id': video_id
})
except:
default_oc_noty()
return items
# method
def get_genres():
items = []
try:
result = common.fetchPage({'link': BASE_API_URL, 'post_data': {'action[0]': 'Video.getGenres'}})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
html = result['content']
data = json.loads(html)
for item in data['json'][0]['response']['genres']:
items.append({
'label': item['name'],
'id': item['id']
})
except:
default_oc_noty()
return items
# method
def get_movie_list(order_id, page='0'):
sys_items = []
items = []
size = 40
try:
offset = int(page) * size
result = common.fetchPage({'link': BASE_API_URL,
'post_data': {'action[0]': 'Video.getCatalog', 'offset[0]': str(offset),
'size[0]': str(size), 'order[0]': order_id}})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
data = json.loads(result['content'])
data = data['json'][0]['response']
# ======== pagination ========#
sys_items = KG_get_pagination((offset / size + 1), total=data['total'], size=size, offset=1)
# ======== END pagination ========#
megogo = False
for item in data['movies']:
try:
try:
genres = ' [' + ', '.join(item['genres'][:3]) + ']'
except:
genres = ''
if 'Megogo' not in item['genres']:
imdb = {'rating': '0', 'votes': '0'}
kinopoisk = {'rating': '0', 'votes': '0'}
if ('rating_imdb_value' in item):
imdb = {'rating': item['rating_imdb_value'], 'votes': item['rating_imdb_count']}
if ('rating_kinopoisk_value' in item):
kinopoisk = {'rating': item['rating_kinopoisk_value'],
'votes': item['rating_kinopoisk_count']}
rating = ''
if (imdb['rating'] != '0' and kinopoisk['rating'] != '0'):
rating = ' ' + imdb['rating'] + ' (' + imdb['votes'] + ') / ' + kinopoisk[
'rating'] + ' (' + kinopoisk['votes'] + ')'
country = ''
if ('countries' in item):
country = item['countries'][0]
properties = {
'Country': country,
'PlotOutline': item['description'],
'Plot': item['long_description'],
'Year': item['year'],
'Rating': imdb['rating'],
'Votes': imdb['votes']
}
country = ' (' + country + ')' if (country) else ''
label = common.replaceHTMLCodes('[B]' + item['name'] + '[/B]' + country + genres + rating)
icon = BASE_URL + item['cover']
video_id = item['movie_id']
items.append({
'label': label,
'icon': icon,
'properties': properties,
'id': video_id
})
else:
megogo = True
except:
pass
# if megogo: plugin.notify('Megogo пропущен', BASE_NAME, 1000, get_local_icon('noty_' + BASE_LABEL))
except:
default_oc_noty()
return {'items': items, 'sys_items': sys_items}
# method
def get_genre_movie_list(genre, page='0'):
sys_items = []
items = []
size = 40
order_id = 0
try:
offset = int(page) * size
result = common.fetchPage({'link': BASE_API_URL,
'post_data': {'action[0]': 'Video.getCatalog', 'offset[0]': str(offset),
'size[0]': str(size), 'order[0]': order_id, 'genre[0]': genre}})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
data = json.loads(result['content'])
data = data['json'][0]['response']
# ======== pagination ========#
sys_items = KG_get_pagination((offset / size + 1), total=data['total'], size=size, offset=1)
# ======== END pagination ========#
megogo = False
for item in data['movies']:
try:
try:
genres = ' [' + ', '.join(item['genres'][:3]) + ']'
except:
genres = ''
if 'Megogo' not in item['genres']:
imdb = {'rating': '0', 'votes': '0'}
kinopoisk = {'rating': '0', 'votes': '0'}
if ('rating_imdb_value' in item):
imdb = {'rating': item['rating_imdb_value'], 'votes': item['rating_imdb_count']}
if ('rating_kinopoisk_value' in item):
kinopoisk = {'rating': item['rating_kinopoisk_value'],
'votes': item['rating_kinopoisk_count']}
rating = ''
if (imdb['rating'] != '0' and kinopoisk['rating'] != '0'):
rating = ' ' + imdb['rating'] + ' (' + imdb['votes'] + ') / ' + kinopoisk[
'rating'] + ' (' + kinopoisk['votes'] + ')'
country = ''
if ('countries' in item):
country = item['countries'][0]
properties = {
'Country': country,
'PlotOutline': item['description'],
'Plot': item['long_description'],
'Year': item['year'],
'Rating': imdb['rating'],
'Votes': imdb['votes']
}
country = ' (' + country + ')' if (country) else ''
label = common.replaceHTMLCodes('[B]' + item['name'] + '[/B]' + country + genres + rating)
icon = BASE_URL + item['cover']
video_id = item['movie_id']
items.append({
'label': label,
'icon': icon,
'properties': properties,
'id': video_id
})
else:
megogo = True
except:
pass
# if megogo: plugin.notify('Megogo пропущен', BASE_NAME, 1000, get_local_icon('noty_' + BASE_LABEL))
except:
default_oc_noty()
return {'items': items, 'sys_items': sys_items}
# method
def get_search_results(search_value=''):
items = []
try:
result = common.fetchPage({'link': BASE_URL + 'suggestion.php?q=' + urllib2.quote(search_value)})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
data = json.loads(result['content'])
data = data['json'][0]['response']
for item in data['movies']:
try:
label = item['name'] + ' | ' + item['international_name'] + ' (' + item['year'] + ')'
icon = BASE_URL + item['cover']
video_id = item['movie_id']
items.append({
'label': common.replaceHTMLCodes(label),
'icon': icon,
'id': video_id
})
except:
pass
except:
default_oc_noty()
return items
# method
def get_movie(id):
items = []
try:
result = common.fetchPage(
{'link': BASE_API_URL, 'post_data': {'action[0]': 'Video.getMovie', 'movie_id[0]': id}})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
data = json.loads(result['content'])
item = data['json'][0]['response']['movie']
icon = BASE_URL + item['covers'][0]['original']
try:
trailer = item['trailer']
try:
name = trailer['name']
except:
name = 'Трейлер'
items.append({
'title': name,
'label': name,
'icon': get_local_icon('kinopoisk'),
'properties': {'fanart_image': trailer['preview']},
'url': trailer['video']
})
except:
pass
for video in item['files']:
try:
label = item['name'] + ': ' + video['name']
url = get_playable_url(video['path']) + UserAgent
try:
fan = video['frames'][0]
except:
fan = ''
properties = {
'duration': video['metainfo']['playtime'],
'fanart_image': fan,
}
items.append({
'title': label,
'label': set_color('ПРОСМОТР: ', 'bold').decode('utf-8') + label,
'icon': icon,
'properties': properties,
'url': url
})
except:
# xbmc.log('Exception : ' + str(traceback.format_exc()))
continue
try:
for other in item['other_movies']:
try:
try:
fan = BASE_URL + other['cover']
except:
fan = ''
properties = {
'fanart_image': fan,
}
items.append({
'title': other['name'],
'label': set_color('ЕЩЕ: ', 'bold').decode('utf-8') + other['name'],
'icon': fan,
'properties': properties,
'url': plugin.url_for('oc_movie', id=other['movie_id'])
})
except:
# xbmc.log('Exception : ' + str(traceback.format_exc()))
continue
except:
# xbmc.log('Exception : ' + str(traceback.format_exc()))
pass
except:
default_oc_noty()
# xbmc.log('Exit list : ' + str(items))
return {'items': items, 'playlist': True if (len(items) > 1) else False}
def get_playable_url(url):
return str(url).replace('/home/video/', 'http://p0.oc.kg:8080/')
|
delletenebre/xbmc-addon-kilogramme
|
plugin.video.kilogramme/resources/lib/site_ockg.py
|
Python
|
gpl-3.0
| 20,423
|
from openpyxl import load_workbook
import os
# list of text to search for
keyWordList = ['Resume', 'Label', 'Description', 'ClueText', 'Title', 'QTEtitle']
# default path for docs on my PC for sh8 game xlsx documents
#docDir = "d:/svn/ue3/SH8Game/Production/Dialogs/"
docDir = "d:/svn/ue3/SH8Game/Production/Data/"
#docDir = "d:/sh8/xlsx_python_tests/"
# output for the log file
logFile = 'd:/sh8/xlsx_python_tests/genlog.txt'
# searching for INT column ID
# returns column serial nubmer
def FindBase(sheetName, keyWord):
for col in range(1,50):
findSpokenCoord = sheetName.cell(row = 1, column = col)
findSpokenVal = findSpokenCoord.value
if findSpokenVal == keyWord:
return col
# searching for all localization columns that is present
# returns list of columns serial number
def FindLoc(sheetName, keyWord):
TextColList = []
for col in range(1,100):
findSpokenCoord = sheetName.cell(row = 1, column = col)
findSpokenVal = findSpokenCoord.value
#print findSpokenVal
if findSpokenVal:
if ('.' + keyWord) in findSpokenVal:
TextColList.append(col)
return TextColList
# comparing INT cell content with localization content
# returns string if INT and LOC cell are indentical
# returns string if LOC is empty while INT is not
def FindAndLog(docPath, keyWordList):
# declaring var for storing log
logVal = ''
workBook = load_workbook(docPath)
# for test purposes
print docPath
# obtaining list of all sheets in document
sheetList = workBook.get_sheet_names()
# adding path to log
logVal += docPath + '\n'
# iterating through key words
for keyWord in keyWordList:
# iterating through sheets in document
for sheet in sheetList:
sheetName = workBook[sheet]
intColNum = FindBase(sheetName, keyWord)
locColNumList = FindLoc(sheetName, keyWord)
# checking if INT keyword is present in document
if intColNum:
# even for comments it is enough length
for row in range(4,200):
intRowCoord = sheetName.cell(row = row, column = intColNum)
# obtaining INT cell value
intRowVal = intRowCoord.value
# checking if INT cell is not empty
if intRowVal:
# iterating through LOC columns in list
for col in locColNumList:
locRowCoord = sheetName.cell(row = row, column = col)
# obtaining LOC cell value
locRowVal = locRowCoord.value
# checking whether LOC cell is duplicate of INT
if intRowVal == locRowVal:
#convering non ASCII characters
#locASCII = str(intRowVal).encode('ascii', 'ignore').decode('ascii')
#print intRowVal
logVal += str(locRowCoord) + str(intRowVal) + '\n'
# checking if LOC cell is empty while INT cell is not
elif locRowVal == None:
logVal += str(locRowCoord) + ' is empty\n'
return logVal
# collecting all .xlsxs from supplied path
genLog = ''
for path, dirs, fileNames in os.walk(docDir):
for fileName in fileNames:
docPath = os.path.join(path, fileName)
# filtering files except .xlsx
if '.xlsx' in docPath:
# filling log
genLog += FindAndLog(docPath, keyWordList)
# writing and saving the log file
filePath = open(logFile, 'wb')
filePath.write(genLog)
filePath.close()
|
AndreySibiryakov/coding
|
py/check_errors_xlsx.py
|
Python
|
gpl-3.0
| 3,308
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from email.header import decode_header
from email.utils import formataddr
from odoo import _, api, fields, models, SUPERUSER_ID, tools
from odoo.exceptions import UserError, AccessError
from odoo.osv import expression
_logger = logging.getLogger(__name__)
def decode(text):
"""Returns unicode() string conversion of the the given encoded smtp header text"""
# TDE proposal: move to tools ?
if text:
text = decode_header(text.replace('\r', ''))
# The joining space will not be needed as of Python 3.3
# See https://hg.python.org/cpython/rev/8c03fe231877
return ' '.join([tools.ustr(x[0], x[1]) for x in text])
class Message(models.Model):
""" Messages model: system notification (replacing res.log notifications),
comments (OpenChatter discussion) and incoming emails. """
_name = 'mail.message'
_description = 'Message'
_inherit = ['ir.needaction_mixin']
_order = 'id desc'
_rec_name = 'record_name'
_message_read_limit = 30
@api.model
def _get_default_from(self):
if self.env.user.alias_name and self.env.user.alias_domain:
return formataddr((self.env.user.name, '%s@%s' % (self.env.user.alias_name, self.env.user.alias_domain)))
elif self.env.user.email:
return formataddr((self.env.user.name, self.env.user.email))
raise UserError(_("Unable to send email, please configure the sender's email address or alias."))
@api.model
def _get_default_author(self):
return self.env.user.partner_id
# content
subject = fields.Char('Subject')
date = fields.Datetime('Date', default=fields.Datetime.now)
body = fields.Html('Contents', default='', strip_classes=True)
attachment_ids = fields.Many2many(
'ir.attachment', 'message_attachment_rel',
'message_id', 'attachment_id',
string='Attachments',
help='Attachments are linked to a document through model / res_id and to the message '
'through this field.')
parent_id = fields.Many2one(
'mail.message', 'Parent Message', select=True, ondelete='set null',
help="Initial thread message.")
child_ids = fields.One2many('mail.message', 'parent_id', 'Child Messages')
# related document
model = fields.Char('Related Document Model', select=1)
res_id = fields.Integer('Related Document ID', select=1)
record_name = fields.Char('Message Record Name', help="Name get of the related document.")
# characteristics
message_type = fields.Selection([
('email', 'Email'),
('comment', 'Comment'),
('notification', 'System notification')],
'Type', required=True, default='email',
help="Message type: email for email message, notification for system "
"message, comment for other messages such as user replies",
oldname='type')
subtype_id = fields.Many2one('mail.message.subtype', 'Subtype', ondelete='set null', select=1)
# origin
email_from = fields.Char(
'From', default=_get_default_from,
help="Email address of the sender. This field is set when no matching partner is found and replaces the author_id field in the chatter.")
author_id = fields.Many2one(
'res.partner', 'Author', select=1,
ondelete='set null', default=_get_default_author,
help="Author of the message. If not set, email_from may hold an email address that did not match any partner.")
author_avatar = fields.Binary("Author's avatar", related='author_id.image_small')
# recipients
partner_ids = fields.Many2many('res.partner', string='Recipients')
needaction_partner_ids = fields.Many2many(
'res.partner', 'mail_message_res_partner_needaction_rel', string='Partners with Need Action')
needaction = fields.Boolean(
'Need Action', compute='_get_needaction', search='_search_needaction',
help='Need Action')
channel_ids = fields.Many2many(
'mail.channel', 'mail_message_mail_channel_rel', string='Channels')
# user interface
starred_partner_ids = fields.Many2many(
'res.partner', 'mail_message_res_partner_starred_rel', string='Favorited By')
starred = fields.Boolean(
'Starred', compute='_get_starred', search='_search_starred',
help='Current user has a starred notification linked to this message')
# tracking
tracking_value_ids = fields.One2many(
'mail.tracking.value', 'mail_message_id',
string='Tracking values',
help='Tracked values are stored in a separate model. This field allow to reconstruct '
'the tracking and to generate statistics on the model.')
# mail gateway
no_auto_thread = fields.Boolean(
'No threading for answers',
help='Answers do not go in the original document discussion thread. This has an impact on the generated message-id.')
message_id = fields.Char('Message-Id', help='Message unique identifier', select=1, readonly=1, copy=False)
reply_to = fields.Char('Reply-To', help='Reply email address. Setting the reply_to bypasses the automatic thread creation.')
mail_server_id = fields.Many2one('ir.mail_server', 'Outgoing mail server')
@api.multi
def _get_needaction(self):
""" Need action on a mail.message = notified on my channel """
my_messages = self.sudo().filtered(lambda msg: self.env.user.partner_id in msg.needaction_partner_ids)
for message in self:
message.needaction = message in my_messages
@api.multi
def _is_accessible(self):
self.ensure_one()
return False
@api.model
def _search_needaction(self, operator, operand):
if operator == '=' and operand:
return [('needaction_partner_ids', 'in', self.env.user.partner_id.id)]
return [('needaction_partner_ids', 'not in', self.env.user.partner_id.id)]
@api.depends('starred_partner_ids')
def _get_starred(self):
""" Compute if the message is starred by the current user. """
# TDE FIXME: use SQL
starred = self.sudo().filtered(lambda msg: self.env.user.partner_id in msg.starred_partner_ids)
for message in self:
message.starred = message in starred
@api.model
def _search_starred(self, operator, operand):
if operator == '=' and operand:
return [('starred_partner_ids', 'in', [self.env.user.partner_id.id])]
return [('starred_partner_ids', 'not in', [self.env.user.partner_id.id])]
@api.model
def _needaction_domain_get(self):
return [('needaction', '=', True)]
#------------------------------------------------------
# Notification API
#------------------------------------------------------
@api.model
def mark_all_as_read(self, channel_ids=None, domain=None):
""" Remove all needactions of the current partner. If channel_ids is
given, restrict to messages written in one of those channels. """
partner_id = self.env.user.partner_id.id
if domain is None:
query = "DELETE FROM mail_message_res_partner_needaction_rel WHERE res_partner_id IN %s"
args = [(partner_id,)]
if channel_ids:
query += """
AND mail_message_id in
(SELECT mail_message_id
FROM mail_message_mail_channel_rel
WHERE mail_channel_id in %s)"""
args += [tuple(channel_ids)]
query += " RETURNING mail_message_id as id"
self._cr.execute(query, args)
self.invalidate_cache()
ids = [m['id'] for m in self._cr.dictfetchall()]
else:
# not really efficient method: it does one db request for the
# search, and one for each message in the result set to remove the
# current user from the relation.
msg_domain = [('needaction_partner_ids', 'in', partner_id)]
if channel_ids:
msg_domain += [('channel_ids', 'in', channel_ids)]
unread_messages = self.search(expression.AND([msg_domain, domain]))
unread_messages.sudo().write({'needaction_partner_ids': [(3, partner_id)]})
ids = unread_messages.mapped('id')
notification = {'type': 'mark_as_read', 'message_ids': ids, 'channel_ids': channel_ids}
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), notification)
return ids
@api.multi
def mark_as_unread(self, channel_ids=None):
""" Add needactions to messages for the current partner. """
partner_id = self.env.user.partner_id.id
for message in self:
message.write({'needaction_partner_ids': [(4, partner_id)]})
ids = [m.id for m in self]
notification = {'type': 'mark_as_unread', 'message_ids': ids, 'channel_ids': channel_ids}
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), notification)
@api.multi
def set_message_done(self):
""" Remove the needaction from messages for the current partner. """
partner_id = self.env.user.partner_id
messages = self.filtered(lambda msg: partner_id in msg.needaction_partner_ids)
if not len(messages):
return
messages.sudo().write({'needaction_partner_ids': [(3, partner_id.id)]})
# notifies changes in messages through the bus. To minimize the number of
# notifications, we need to group the messages depending on their channel_ids
groups = []
current_channel_ids = messages[0].channel_ids
current_group = []
for record in messages:
if record.channel_ids == current_channel_ids:
current_group.append(record.id)
else:
groups.append((current_group, current_channel_ids))
current_group = [record.id]
current_channel_ids = record.channel_ids
groups.append((current_group, current_channel_ids))
current_group = [record.id]
current_channel_ids = record.channel_ids
for (msg_ids, channel_ids) in groups:
notification = {'type': 'mark_as_read', 'message_ids': msg_ids, 'channel_ids': [c.id for c in channel_ids]}
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', partner_id.id), notification)
@api.model
def unstar_all(self):
""" Unstar messages for the current partner. """
partner_id = self.env.user.partner_id.id
starred_messages = self.search([('starred_partner_ids', 'in', partner_id)])
starred_messages.write({'starred_partner_ids': [(3, partner_id)]})
ids = [m.id for m in starred_messages]
notification = {'type': 'toggle_star', 'message_ids': ids, 'starred': False}
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), notification)
@api.multi
def toggle_message_starred(self):
""" Toggle messages as (un)starred. Technically, the notifications related
to uid are set to (un)starred.
"""
# a user should always be able to star a message he can read
self.check_access_rule('read')
starred = not self.starred
if starred:
self.sudo().write({'starred_partner_ids': [(4, self.env.user.partner_id.id)]})
else:
self.sudo().write({'starred_partner_ids': [(3, self.env.user.partner_id.id)]})
notification = {'type': 'toggle_star', 'message_ids': [self.id], 'starred': starred}
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), notification)
#------------------------------------------------------
# Message loading for web interface
#------------------------------------------------------
@api.model
def _message_read_dict_postprocess(self, messages, message_tree):
""" Post-processing on values given by message_read. This method will
handle partners in batch to avoid doing numerous queries.
:param list messages: list of message, as get_dict result
:param dict message_tree: {[msg.id]: msg browse record}
"""
# 1. Aggregate partners (author_id and partner_ids), attachments and tracking values
partners = self.env['res.partner']
attachments = self.env['ir.attachment']
trackings = self.env['mail.tracking.value']
for key, message in message_tree.iteritems():
if message.author_id:
partners |= message.author_id
if message.subtype_id and message.partner_ids: # take notified people of message with a subtype
partners |= message.partner_ids
elif not message.subtype_id and message.partner_ids: # take specified people of message without a subtype (log)
partners |= message.partner_ids
if message.attachment_ids:
attachments |= message.attachment_ids
if message.tracking_value_ids:
trackings |= message.tracking_value_ids
# Read partners as SUPERUSER -> display the names like classic m2o even if no access
partners_names = partners.sudo().name_get()
partner_tree = dict((partner[0], partner) for partner in partners_names)
# 2. Attachments as SUPERUSER, because could receive msg and attachments for doc uid cannot see
attachments_data = attachments.sudo().read(['id', 'datas_fname', 'name', 'mimetype'])
attachments_tree = dict((attachment['id'], {
'id': attachment['id'],
'filename': attachment['datas_fname'],
'name': attachment['name'],
'mimetype': attachment['mimetype'],
}) for attachment in attachments_data)
# 3. Tracking values
tracking_tree = dict((tracking.id, {
'id': tracking.id,
'changed_field': tracking.field_desc,
'old_value': tracking.get_old_display_value()[0],
'new_value': tracking.get_new_display_value()[0],
'field_type': tracking.field_type,
}) for tracking in trackings)
# 4. Update message dictionaries
for message_dict in messages:
message_id = message_dict.get('id')
message = message_tree[message_id]
if message.author_id:
author = partner_tree[message.author_id.id]
else:
author = (0, message.email_from)
partner_ids = []
if message.subtype_id:
partner_ids = [partner_tree[partner.id] for partner in message.partner_ids
if partner.id in partner_tree]
else:
partner_ids = [partner_tree[partner.id] for partner in message.partner_ids
if partner.id in partner_tree]
attachment_ids = []
for attachment in message.attachment_ids:
if attachment.id in attachments_tree:
attachment_ids.append(attachments_tree[attachment.id])
tracking_value_ids = []
for tracking_value in message.tracking_value_ids:
if tracking_value.id in tracking_tree:
tracking_value_ids.append(tracking_tree[tracking_value.id])
message_dict.update({
'author_id': author,
'partner_ids': partner_ids,
'attachment_ids': attachment_ids,
'tracking_value_ids': tracking_value_ids,
})
return True
@api.model
def message_fetch(self, domain, limit=20):
return self.search(domain, limit=limit).message_format()
@api.multi
def message_format(self):
""" Get the message values in the format for web client. Since message values can be broadcasted,
computed fields MUST NOT BE READ and broadcasted.
:returns list(dict).
Example :
{
'body': HTML content of the message
'model': u'res.partner',
'record_name': u'Agrolait',
'attachment_ids': [
{
'file_type_icon': u'webimage',
'id': 45,
'name': u'sample.png',
'filename': u'sample.png'
}
],
'needaction_partner_ids': [], # list of partner ids
'res_id': 7,
'tracking_value_ids': [
{
'old_value': "",
'changed_field': "Customer",
'id': 2965,
'new_value': "Axelor"
}
],
'author_id': (3, u'Administrator'),
'email_from': 'sacha@pokemon.com' # email address or False
'subtype_id': (1, u'Discussions'),
'channel_ids': [], # list of channel ids
'date': '2015-06-30 08:22:33',
'partner_ids': [[7, "Sacha Du Bourg-Palette"]], # list of partner name_get
'message_type': u'comment',
'id': 59,
'subject': False
'is_note': True # only if the subtype is internal
}
"""
message_values = self.read([
'id', 'body', 'date', 'author_id', 'email_from', # base message fields
'message_type', 'subtype_id', 'subject', # message specific
'model', 'res_id', 'record_name', # document related
'channel_ids', 'partner_ids', # recipients
'needaction_partner_ids', # list of partner ids for whom the message is a needaction
'starred_partner_ids', # list of partner ids for whom the message is starred
])
message_tree = dict((m.id, m) for m in self)
self._message_read_dict_postprocess(message_values, message_tree)
# add subtype data (is_note flag, subtype_description). Do it as sudo
# because portal / public may have to look for internal subtypes
subtypes = self.env['mail.message.subtype'].sudo().search(
[('id', 'in', [msg['subtype_id'][0] for msg in message_values if msg['subtype_id']])]).read(['internal', 'description'])
subtypes_dict = dict((subtype['id'], subtype) for subtype in subtypes)
for message in message_values:
message['is_note'] = message['subtype_id'] and subtypes_dict[message['subtype_id'][0]]['internal']
message['subtype_description'] = message['subtype_id'] and subtypes_dict[message['subtype_id'][0]]['description']
return message_values
#------------------------------------------------------
# mail_message internals
#------------------------------------------------------
@api.model_cr
def init(self):
self._cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'mail_message_model_res_id_idx'""")
if not self._cr.fetchone():
self._cr.execute("""CREATE INDEX mail_message_model_res_id_idx ON mail_message (model, res_id)""")
@api.model
def _find_allowed_model_wise(self, doc_model, doc_dict):
doc_ids = doc_dict.keys()
allowed_doc_ids = self.env[doc_model].with_context(active_test=False).search([('id', 'in', doc_ids)]).ids
return set([message_id for allowed_doc_id in allowed_doc_ids for message_id in doc_dict[allowed_doc_id]])
@api.model
def _find_allowed_doc_ids(self, model_ids):
IrModelAccess = self.env['ir.model.access']
allowed_ids = set()
for doc_model, doc_dict in model_ids.iteritems():
if not IrModelAccess.check(doc_model, 'read', False):
continue
allowed_ids |= self._find_allowed_model_wise(doc_model, doc_dict)
return allowed_ids
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
ids uid could not see according to our custom rules. Please refer to
check_access_rule for more details about those rules.
Non employees users see only message with subtype (aka do not see
internal logs).
After having received ids of a classic search, keep only:
- if author_id == pid, uid is the author, OR
- uid belongs to a notified channel, OR
- uid is in the specified recipients, OR
- uid have read access to the related document is model, res_id
- otherwise: remove the id
"""
# Rules do not apply to administrator
if self._uid == SUPERUSER_ID:
return super(Message, self)._search(
args, offset=offset, limit=limit, order=order,
count=count, access_rights_uid=access_rights_uid)
# Non-employee see only messages with a subtype (aka, no internal logs)
if not self.env['res.users'].has_group('base.group_user'):
args = ['&', '&', ('subtype_id', '!=', False), ('subtype_id.internal', '=', False)] + list(args)
# Perform a super with count as False, to have the ids, not a counter
ids = super(Message, self)._search(
args, offset=offset, limit=limit, order=order,
count=False, access_rights_uid=access_rights_uid)
if not ids and count:
return 0
elif not ids:
return ids
pid = self.env.user.partner_id.id
author_ids, partner_ids, channel_ids, allowed_ids = set([]), set([]), set([]), set([])
model_ids = {}
# check read access rights before checking the actual rules on the given ids
super(Message, self.sudo(access_rights_uid or self._uid)).check_access_rights('read')
self._cr.execute("""SELECT DISTINCT m.id, m.model, m.res_id, m.author_id, partner_rel.res_partner_id, channel_partner.channel_id as channel_id
FROM "%s" m
LEFT JOIN "mail_message_res_partner_rel" partner_rel
ON partner_rel.mail_message_id = m.id AND partner_rel.res_partner_id = (%%s)
LEFT JOIN "mail_message_mail_channel_rel" channel_rel
ON channel_rel.mail_message_id = m.id
LEFT JOIN "mail_channel" channel
ON channel.id = channel_rel.mail_channel_id
LEFT JOIN "mail_channel_partner" channel_partner
ON channel_partner.channel_id = channel.id AND channel_partner.partner_id = (%%s)
WHERE m.id = ANY (%%s)""" % self._table, (pid, pid, ids,))
for id, rmod, rid, author_id, partner_id, channel_id in self._cr.fetchall():
if author_id == pid:
author_ids.add(id)
elif partner_id == pid:
partner_ids.add(id)
elif channel_id:
channel_ids.add(id)
elif rmod and rid:
model_ids.setdefault(rmod, {}).setdefault(rid, set()).add(id)
allowed_ids = self._find_allowed_doc_ids(model_ids)
final_ids = author_ids | partner_ids | channel_ids | allowed_ids
if count:
return len(final_ids)
else:
# re-construct a list based on ids, because set did not keep the original order
id_list = [id for id in ids if id in final_ids]
return id_list
@api.multi
def check_access_rule(self, operation):
""" Access rules of mail.message:
- read: if
- author_id == pid, uid is the author OR
- uid is in the recipients (partner_ids) OR
- uid is member of a listern channel (channel_ids.partner_ids) OR
- uid have read access to the related document if model, res_id
- otherwise: raise
- create: if
- no model, no res_id (private message) OR
- pid in message_follower_ids if model, res_id OR
- uid can read the parent OR
- uid have write or create access on the related document if model, res_id, OR
- otherwise: raise
- write: if
- author_id == pid, uid is the author, OR
- uid is in the recipients (partner_ids) OR
- uid has write or create access on the related document if model, res_id
- otherwise: raise
- unlink: if
- uid has write or create access on the related document if model, res_id
- otherwise: raise
Specific case: non employee users see only messages with subtype (aka do
not see internal logs).
"""
def _generate_model_record_ids(msg_val, msg_ids):
""" :param model_record_ids: {'model': {'res_id': (msg_id, msg_id)}, ... }
:param message_values: {'msg_id': {'model': .., 'res_id': .., 'author_id': ..}}
"""
model_record_ids = {}
for id in msg_ids:
vals = msg_val.get(id, {})
if vals.get('model') and vals.get('res_id'):
model_record_ids.setdefault(vals['model'], set()).add(vals['res_id'])
return model_record_ids
if self._uid == SUPERUSER_ID:
return
# Non employees see only messages with a subtype (aka, not internal logs)
if not self.env['res.users'].has_group('base.group_user'):
self._cr.execute('''SELECT DISTINCT message.id, message.subtype_id, subtype.internal
FROM "%s" AS message
LEFT JOIN "mail_message_subtype" as subtype
ON message.subtype_id = subtype.id
WHERE message.message_type = %%s AND (message.subtype_id IS NULL OR subtype.internal IS TRUE) AND message.id = ANY (%%s)''' % (self._table), ('comment', self.ids,))
if self._cr.fetchall():
raise AccessError(
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') %
(self._description, operation))
# Read mail_message.ids to have their values
message_values = dict((res_id, {}) for res_id in self.ids)
if operation in ['read', 'write']:
self._cr.execute("""SELECT DISTINCT m.id, m.model, m.res_id, m.author_id, m.parent_id, partner_rel.res_partner_id, channel_partner.channel_id as channel_id
FROM "%s" m
LEFT JOIN "mail_message_res_partner_rel" partner_rel
ON partner_rel.mail_message_id = m.id AND partner_rel.res_partner_id = (%%s)
LEFT JOIN "mail_message_mail_channel_rel" channel_rel
ON channel_rel.mail_message_id = m.id
LEFT JOIN "mail_channel" channel
ON channel.id = channel_rel.mail_channel_id
LEFT JOIN "mail_channel_partner" channel_partner
ON channel_partner.channel_id = channel.id AND channel_partner.partner_id = (%%s)
WHERE m.id = ANY (%%s)""" % self._table, (self.env.user.partner_id.id, self.env.user.partner_id.id, self.ids,))
for mid, rmod, rid, author_id, parent_id, partner_id, channel_id in self._cr.fetchall():
message_values[mid] = {
'model': rmod,
'res_id': rid,
'author_id': author_id,
'parent_id': parent_id,
'notified': any((message_values[mid].get('notified'), partner_id, channel_id))
}
else:
self._cr.execute("""SELECT DISTINCT id, model, res_id, author_id, parent_id FROM "%s" WHERE id = ANY (%%s)""" % self._table, (self.ids,))
for mid, rmod, rid, author_id, parent_id in self._cr.fetchall():
message_values[mid] = {'model': rmod, 'res_id': rid, 'author_id': author_id, 'parent_id': parent_id}
# Author condition (READ, WRITE, CREATE (private))
author_ids = []
if operation == 'read' or operation == 'write':
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('author_id') and message.get('author_id') == self.env.user.partner_id.id]
elif operation == 'create':
author_ids = [mid for mid, message in message_values.iteritems()
if not message.get('model') and not message.get('res_id')]
# Parent condition, for create (check for received notifications for the created message parent)
notified_ids = []
if operation == 'create':
# TDE: probably clean me
parent_ids = [message.get('parent_id') for mid, message in message_values.iteritems()
if message.get('parent_id')]
self._cr.execute("""SELECT DISTINCT m.id, partner_rel.res_partner_id, channel_partner.partner_id FROM "%s" m
LEFT JOIN "mail_message_res_partner_rel" partner_rel
ON partner_rel.mail_message_id = m.id AND partner_rel.res_partner_id = (%%s)
LEFT JOIN "mail_message_mail_channel_rel" channel_rel
ON channel_rel.mail_message_id = m.id
LEFT JOIN "mail_channel" channel
ON channel.id = channel_rel.mail_channel_id
LEFT JOIN "mail_channel_partner" channel_partner
ON channel_partner.channel_id = channel.id AND channel_partner.partner_id = (%%s)
WHERE m.id = ANY (%%s)""" % self._table, (self.env.user.partner_id.id, self.env.user.partner_id.id, parent_ids,))
not_parent_ids = [mid[0] for mid in self._cr.fetchall() if any([mid[1], mid[2]])]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('parent_id') in not_parent_ids]
# Recipients condition, for read and write (partner_ids) and create (message_follower_ids)
other_ids = set(self.ids).difference(set(author_ids), set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
if operation in ['read', 'write']:
notified_ids = [mid for mid, message in message_values.iteritems() if message.get('notified')]
elif operation == 'create':
for doc_model, doc_ids in model_record_ids.items():
followers = self.env['mail.followers'].sudo().search([
('res_model', '=', doc_model),
('res_id', 'in', list(doc_ids)),
('partner_id', '=', self.env.user.partner_id.id),
])
fol_mids = [follower.res_id for follower in followers]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == doc_model and message.get('res_id') in fol_mids]
# CRUD: Access rights related to the document
other_ids = other_ids.difference(set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
document_related_ids = []
for model, doc_ids in model_record_ids.items():
DocumentModel = self.env[model]
mids = DocumentModel.browse(doc_ids).exists()
if hasattr(DocumentModel, 'check_mail_message_access'):
DocumentModel.check_mail_message_access(mids.ids, operation) # ?? mids ?
else:
self.env['mail.thread'].check_mail_message_access(mids.ids, operation, model_name=model)
document_related_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == model and message.get('res_id') in mids.ids]
# Calculate remaining ids: if not void, raise an error
other_ids = other_ids.difference(set(document_related_ids))
if not other_ids:
return
raise AccessError(
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') %
(self._description, operation))
@api.model
def _get_record_name(self, values):
""" Return the related document name, using name_get. It is done using
SUPERUSER_ID, to be sure to have the record name correctly stored. """
model = values.get('model', self.env.context.get('default_model'))
res_id = values.get('res_id', self.env.context.get('default_res_id'))
if not model or not res_id or model not in self.pool:
return False
return self.env[model].sudo().browse(res_id).name_get()[0][1]
@api.model
def _get_reply_to(self, values):
""" Return a specific reply_to: alias of the document through
message_get_reply_to or take the email_from """
model, res_id, email_from = values.get('model', self._context.get('default_model')), values.get('res_id', self._context.get('default_res_id')), values.get('email_from') # ctx values / defualt_get res ?
if model:
# return self.env[model].browse(res_id).message_get_reply_to([res_id], default=email_from)[res_id]
return self.env[model].message_get_reply_to([res_id], default=email_from)[res_id]
else:
# return self.env['mail.thread'].message_get_reply_to(default=email_from)[None]
return self.env['mail.thread'].message_get_reply_to([None], default=email_from)[None]
@api.model
def _get_message_id(self, values):
if values.get('no_auto_thread', False) is True:
message_id = tools.generate_tracking_message_id('reply_to')
elif values.get('res_id') and values.get('model'):
message_id = tools.generate_tracking_message_id('%(res_id)s-%(model)s' % values)
else:
message_id = tools.generate_tracking_message_id('private')
return message_id
@api.model
def create(self, values):
# coming from mail.js that does not have pid in its values
if self.env.context.get('default_starred'):
self = self.with_context({'default_starred_partner_ids': [(4, self.env.user.partner_id.id)]})
if 'email_from' not in values: # needed to compute reply_to
values['email_from'] = self._get_default_from()
if not values.get('message_id'):
values['message_id'] = self._get_message_id(values)
if 'reply_to' not in values:
values['reply_to'] = self._get_reply_to(values)
if 'record_name' not in values and 'default_record_name' not in self.env.context:
values['record_name'] = self._get_record_name(values)
message = super(Message, self).create(values)
message._notify(force_send=self.env.context.get('mail_notify_force_send', True),
user_signature=self.env.context.get('mail_notify_user_signature', True))
return message
@api.multi
def read(self, fields=None, load='_classic_read'):
""" Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them. """
self.check_access_rule('read')
return super(Message, self).read(fields=fields, load=load)
@api.multi
def unlink(self):
# cascade-delete attachments that are directly attached to the message (should only happen
# for mail.messages that act as parent for a standalone mail.mail record).
self.check_access_rule('unlink')
self.mapped('attachment_ids').filtered(
lambda attach: attach.res_model == self._name and (attach.res_id in self.ids or attach.res_id == 0)
).unlink()
return super(Message, self).unlink()
#------------------------------------------------------
# Messaging API
#------------------------------------------------------
@api.multi
def _notify(self, force_send=False, user_signature=True):
""" Add the related record followers to the destination partner_ids if is not a private message.
Call mail_notification.notify to manage the email sending
"""
group_user = self.env.ref('base.group_user')
# have a sudoed copy to manipulate partners (public can go here with
# website modules like forum / blog / ...
self_sudo = self.sudo()
# TDE CHECK: add partners / channels as arguments to be able to notify a message with / without computation ??
self.ensure_one() # tde: not sure, just for testinh, will see
partners = self.env['res.partner'] | self.partner_ids
channels = self.env['mail.channel'] | self.channel_ids
# all followers of the mail.message document have to be added as partners and notified
# and filter to employees only if the subtype is internal
if self_sudo.subtype_id and self.model and self.res_id:
followers = self.env['mail.followers'].sudo().search([
('res_model', '=', self.model),
('res_id', '=', self.res_id)
]).filtered(lambda fol: self.subtype_id in fol.subtype_ids)
if self_sudo.subtype_id.internal:
followers = followers.filtered(lambda fol: fol.channel_id or (fol.partner_id.user_ids and group_user in fol.partner_id.user_ids[0].mapped('groups_id')))
channels = self_sudo.channel_ids | followers.mapped('channel_id')
partners = self_sudo.partner_ids | followers.mapped('partner_id')
else:
channels = self_sudo.channel_ids
partners = self_sudo.partner_ids
# remove author from notified partners
if not self._context.get('mail_notify_author', False) and self_sudo.author_id:
partners = partners - self_sudo.author_id
# update message
self.write({'channel_ids': [(6, 0, channels.ids)], 'needaction_partner_ids': [(6, 0, partners.ids)]})
# notify partners and channels
partners._notify(self, force_send=force_send, user_signature=user_signature)
channels._notify(self)
# Discard cache, because child / parent allow reading and therefore
# change access rights.
if self.parent_id:
self.parent_id.invalidate_cache()
return True
|
ayepezv/GAD_ERP
|
addons/mail/models/mail_message.py
|
Python
|
gpl-3.0
| 38,977
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2022 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
def popup_list(op, title: str, msgs: list, icon: str = "INFO") -> None:
def draw(self, context):
for text in msgs:
self.layout.label(text=text)
op.report({"INFO"}, f"{title}:")
for text in msgs:
op.report({"INFO"}, text)
bpy.context.window_manager.popup_menu(draw, title=title, icon=icon)
|
mrachinskiy/jewelcraft
|
lib/ui_lib.py
|
Python
|
gpl-3.0
| 1,174
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as published by
the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/gpl-3.0.txt>.
'''
import sys
sys.path.append('../shared')
import functions as f
import ape, operator
def main():
d_output = ape.format_results('results.json')
crlf = '\r\n'
output = []
s = '======================================='
for item in sorted(d_output.iteritems(), key = operator.itemgetter(0)):
d_item = item[1]
f.append(output, s + crlf + 'Propuestas tarea - ' + item[0] + (' (' + d_item['task_id'] + ')') + crlf + s)
f.append(output, d_item['breadcrumbs'])
f.append(output, d_item['pages'] + crlf + '------------------')
answers = d_item['answers']
for answer in answers:
answ = answer
if 'desconocido' in answer:
answer = answer.split('_')
answer = answer[0] + ' (' + answer[1] + ')'
else:
answer = '(' + str(answer) + ')'
f.append(output, 'Propuestas analista ' + answer + crlf + '---------------------------------------')
f.append(output, 'Hora de inicio: ' + f.formatTime(answers[answ]['answer_end_date']) + crlf + 'Hora de fin: ' + f.formatTime(answers[answ]['answer_start_date']))
for item in answers[answ]['answer'].split('\n'):
if item.replace(' ', '') != '':
f.append(output, item + crlf + '----------')
f.write_file('propuestas.txt', str(crlf * 2).join(output))
if __name__ == '__main__':
main()
|
proyectos-analizo-info/pyanalizo
|
src/app-ape/deprecated/get-propuestas-electorales-v5.py
|
Python
|
gpl-3.0
| 2,267
|
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from .AbstractEval import AbstractEval
class DcgEval(AbstractEval):
"""Compute DCG (with gain = 2**rel-1 and log2 discount)."""
def get_dcg(self, ranked_labels, cutoff=-1):
"""
Get the dcg value of a list ranking.
Does not check if the numer for ranked labels is smaller than cutoff.
"""
if (cutoff == -1):
cutoff = len(ranked_labels)
rank = np.arange(cutoff)
return ((np.power(2, np.asarray(ranked_labels[:cutoff])) - 1) /
np.log2(2 + rank)).sum()
def evaluate_ranking(self, ranking, query, cutoff=-1):
"""Compute NDCG for the provided ranking. The ranking is expected
to contain document ids in rank order."""
if cutoff == -1 or cutoff > len(ranking):
cutoff = len(ranking)
if query.has_ideal():
ideal_dcg = query.get_ideal()
else:
ideal_labels = list(reversed(sorted(query.get_labels())))[:cutoff]
ideal_dcg = self.get_dcg(ideal_labels, cutoff)
query.set_ideal(ideal_dcg)
if ideal_dcg == .0:
# return 0 when there are no relevant documents. This is consistent
# with letor evaluation tools; an alternative would be to return
# 0.5 (e.g., used by the yahoo learning to rank challenge tools)
return 0.0
# get labels for the sorted docids
sorted_labels = [0] * cutoff
for i in range(cutoff):
sorted_labels[i] = query.get_label(ranking[i])
dcg = self.get_dcg(sorted_labels, cutoff)
return dcg / ideal_dcg
def get_value(self, ranking, labels, orientations, cutoff=-1):
"""
Compute the value of the metric
- ranking contains the list of documents to evaluate
- labels are the relevance labels for all the documents, even those
that are not in the ranking; labels[doc.get_id()] is the relevance of
doc
- orientations contains orientation values for the verticals;
orientations[doc.get_type()] is the orientation value for the
doc (from 0 to 1).
"""
return self.get_dcg([labels[doc.get_id()] for doc in ranking], cutoff)
|
m0re4u/LeRoT-SCLP
|
lerot/evaluation/DcgEval.py
|
Python
|
gpl-3.0
| 2,915
|
# -*- coding: utf-8 -*-
"""Package for suites and tests related to bots.modules package"""
import pytest
from qacode.core.bots.modules.nav_base import NavBase
from qacode.core.exceptions.core_exception import CoreException
from qacode.core.testing.asserts import Assert
from qacode.core.testing.test_info import TestInfoBotUnique
from qacode.utils import settings
from selenium.webdriver.remote.webelement import WebElement
ASSERT = Assert()
SETTINGS = settings(file_path="qacode/configs/")
SKIP_NAVS = SETTINGS['tests']['skip']['bot_navigations']
SKIP_NAVS_MSG = 'bot_navigations DISABLED by config file'
class TestNavBase(TestInfoBotUnique):
"""Test Suite for class NavBase"""
app = None
page = None
@classmethod
def setup_class(cls, **kwargs):
"""Setup class (suite) to be executed"""
super(TestNavBase, cls).setup_class(
config=settings(file_path="qacode/configs/"),
skip_force=SKIP_NAVS)
def setup_method(self, test_method, close=True):
"""Configure self.attribute"""
super(TestNavBase, self).setup_method(
test_method,
config=settings(file_path="qacode/configs/"))
self.add_property('app', self.cfg_app('qadmin'))
self.add_property('page', self.cfg_page('qacode_login'))
self.add_property('txt_username', self.cfg_control('txt_username'))
self.add_property('txt_password', self.cfg_control('txt_password'))
self.add_property('btn_submit', self.cfg_control('btn_submit'))
self.add_property('lst_ordered', self.cfg_control('lst_ordered'))
self.add_property(
'lst_ordered_child', self.cfg_control('lst_ordered_child'))
self.add_property('dd_menu_data', self.cfg_control('dd_menu_data'))
self.add_property(
'dd_menu_data_lists', self.cfg_control('dd_menu_data_lists'))
self.add_property(
'btn_click_invisible', self.cfg_control('btn_click_invisible'))
self.add_property(
'btn_click_visible', self.cfg_control('btn_click_visible'))
self.add_property('title_buttons', self.cfg_control('title_buttons'))
def setup_login_to_inputs(self):
"""Do login before to exec some testcases"""
# setup_login
self.bot.navigation.get_url(self.page.get('url'), wait_for_load=10)
txt_username = self.bot.navigation.find_element(
self.txt_username.get("selector"))
txt_password = self.bot.navigation.find_element(
self.txt_password.get("selector"))
btn_submit = self.bot.navigation.find_element(
self.btn_submit.get("selector"))
self.bot.navigation.ele_write(txt_username, "admin")
self.bot.navigation.ele_write(txt_password, "admin")
self.bot.navigation.ele_click(btn_submit)
# end setup_login
def setup_login_to_data(self):
"""Do login before to exec some testcases"""
# setup_login
self.bot.navigation.get_url(self.page.get('url'), wait_for_load=10)
txt_username = self.bot.navigation.find_element(
self.txt_username.get("selector"))
txt_password = self.bot.navigation.find_element(
self.txt_password.get("selector"))
btn_submit = self.bot.navigation.find_element(
self.btn_submit.get("selector"))
self.bot.navigation.ele_write(txt_username, "admin")
self.bot.navigation.ele_write(txt_password, "admin")
self.bot.navigation.ele_click(btn_submit)
self.bot.navigation.ele_click(
self.bot.navigation.find_element_wait(
self.dd_menu_data.get("selector")))
self.bot.navigation.ele_click(
self.bot.navigation.find_element_wait(
self.dd_menu_data_lists.get("selector")))
# end setup_login
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_navbase_instance(self):
"""Testcase: test_navbase_instance"""
ASSERT.is_instance(self.bot.navigation, NavBase)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_gourl_withoutwaits(self):
"""Testcase: test_gourl_withoutwaits"""
self.bot.navigation.get_url(self.page.get('url'))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_gourl_withwaits(self):
"""Testcase: test_gourl_withwaits"""
self.bot.navigation.get_url(
self.page.get('url'), wait_for_load=1)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getcurrenturl_ok(self):
"""Testcase: test_getcurrenturl_ok"""
ASSERT.equals(
self.bot.navigation.get_current_url(),
self.page.get('url'))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_isurl_true(self):
"""Testcase: test_isurl_true"""
ASSERT.true(
self.bot.navigation.is_url(
self.bot.navigation.get_current_url()))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_isurl_false(self):
"""Testcase: test_isurl_false"""
ASSERT.false(self.bot.navigation.is_url(""))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_isurl_raiseswhenurlreturnfalse(self):
"""Testcase: test_isurl_false"""
with pytest.raises(CoreException):
self.bot.navigation.is_url("", ignore_raises=False)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_reload_ok(self):
"""Testcase: test_reload_ok"""
self.bot.navigation.reload()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_forward_ok(self):
"""Testcase: test_reload_ok"""
self.bot.navigation.forward()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getmaximizewindow_ok(self):
"""Testcase: test_getmaximizewindow_ok"""
self.bot.navigation.get_maximize_window()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getcapabilities_ok(self):
"""Testcase: test_getcapabilities_ok"""
caps = self.bot.navigation.get_capabilities()
ASSERT.is_instance(caps, dict)
ASSERT.is_instance(caps['chrome'], dict)
ASSERT.equals(caps['browserName'], 'chrome')
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getlog_ok(self):
"""Testcase: test_getlog_ok"""
self.bot.navigation.get_url(self.page.get('url'))
log_data = self.bot.navigation.get_log()
ASSERT.not_none(log_data)
self.log.debug("selenium logs, browser={}".format(log_data))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
@pytest.mark.parametrize(
"log_name", [None, 'browser', 'driver', 'client', 'server'])
def test_getlog_lognames(self, log_name):
"""Testcase: test_getlog_lognames"""
self.bot.navigation.get_url(self.page.get('url'))
if log_name is None:
with pytest.raises(CoreException):
self.bot.navigation.get_log(log_name=log_name)
return True
log_data = self.bot.navigation.get_log(log_name=log_name)
ASSERT.not_none(log_data)
msg = "selenium logs, log_name={}, log_data={}".format(
log_name, log_data)
self.log.debug(msg)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelement_ok(self):
"""Testcase: test_findelement_ok"""
ASSERT.is_instance(
self.bot.navigation.find_element("body"),
WebElement)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelement_notfound(self):
"""Testcase: test_findelement_notfound"""
with pytest.raises(CoreException):
self.bot.navigation.find_element("article")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelement_notlocator(self):
"""Testcase: test_findelement_notlocator"""
with pytest.raises(CoreException):
self.bot.navigation.find_element(
"body", locator=None)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelementwait_ok(self):
"""Testcase: test_findelementwait_ok"""
ASSERT.is_instance(
self.bot.navigation.find_element_wait("body"),
WebElement)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelementswait_ok(self):
"""Testcase: test_findelementwait_ok"""
elements = self.bot.navigation.find_elements_wait("body>*")
ASSERT.is_instance(elements, list)
for element in elements:
ASSERT.is_instance(element, WebElement)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelements_ok(self):
"""Testcase: test_findelement_ok"""
elements = self.bot.navigation.find_elements("body>*")
ASSERT.is_instance(elements, list)
for element in elements:
ASSERT.is_instance(element, WebElement)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelements_notfound(self):
"""Testcase: test_findelements_notfound"""
with pytest.raises(CoreException):
self.bot.navigation.find_elements("article")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelements_notlocator(self):
"""Testcase: test_findelements_notlocator"""
with pytest.raises(CoreException):
self.bot.navigation.find_elements(
"body", locator=None)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getwindowhandle_ok(self):
"""Testcase: test_getwindowhandle_ok"""
ASSERT.not_none(
self.bot.navigation.get_window_handle())
@pytest.mark.skipIf(
True, "Depends of remote+local webdrivers to get working")
def test_addcookie_ok(self):
"""Testcase: test_addcookie_ok"""
cookie = {"name": "test_cookie", "value": "test_value"}
self.bot.navigation.add_cookie(cookie)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_addcookie_notparams(self):
"""Testcase: test_addcookie_ok"""
with pytest.raises(CoreException):
self.bot.navigation.add_cookie(None)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_addcookie_badcookiekeys(self):
"""Testcase: test_addcookie_ok"""
with pytest.raises(CoreException):
self.bot.navigation.add_cookie({})
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getcookies_ok(self):
"""Testcase: test_getcookies_ok"""
ASSERT.is_instance(
self.bot.navigation.get_cookies(),
list)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_deletecookiebykey_ok(self):
"""Testcase: test_deleteallcookies_ok"""
self.bot.navigation.delete_cookie_by_key("")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_deleteallcookies_ok(self):
"""Testcase: test_deleteallcookies_ok"""
self.bot.navigation.delete_cookies()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_setwindowsize_ok(self):
"""Testcase: test_setwindowsize_ok"""
self.bot.navigation.set_window_size(
pos_x=1024, pos_y=768)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_gettitle_ok(self):
"""Testcase: test_gettitle_ok"""
ASSERT.not_none(
self.bot.navigation.get_title())
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getscreenshotasbase64_ok(self):
"""Testcase: test_getscreenshotasbase64_ok"""
ASSERT.not_none(
self.bot.navigation.get_screenshot_as_base64())
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_jssettimeout_ok(self):
"""Testcase: test_jssettimeout_ok"""
self.bot.navigation.js_set_timeout(1)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_eleclick_okbyselector(self):
"""Testcase: test_eleclick_ok"""
self.bot.navigation.ele_click(selector="body")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_eleclick_okbyelement(self):
"""Testcase: test_eleclick_ok"""
self.bot.navigation.ele_click(
element=self.bot.navigation.find_element("body"))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_eleclick_notparams(self):
"""Testcase: test_eleclick_notparams"""
with pytest.raises(CoreException):
self.bot.navigation.ele_click()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_elewrite_ok(self):
"""Testcase: test_elewrite_ok"""
self.bot.navigation.ele_write(
self.bot.navigation.find_element("body"),
text="test")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_elewrite_okwithouttext(self):
"""Testcase: test_elewrite_ok"""
self.bot.navigation.ele_write(
self.bot.navigation.find_element("body"),
text=None)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_elewrite_notparams(self):
"""Testcase: test_elewrite_notparams"""
with pytest.raises(CoreException):
self.bot.navigation.ele_write(None)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_setwebelement_ok(self):
"""Testcase: test_setwebelement_ok"""
self.bot.navigation.set_web_element("test-element")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelementchild_ok(self):
"""Testcase: test_findelementchild_ok"""
self.setup_login_to_data()
ele_parent = self.bot.navigation.find_element_wait(
self.lst_ordered.get("selector"))
ASSERT.is_instance(ele_parent, WebElement)
ele_child = self.bot.navigation.find_element_child(
ele_parent, self.lst_ordered_child.get("selector"))
ASSERT.is_instance(ele_child, WebElement)
ASSERT.equals(
"Item list01", self.bot.navigation.ele_text(ele_child))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelementchildren_ok(self):
"""Testcase: test_findelementchildren_ok"""
self.setup_login_to_data()
ele_parent = self.bot.navigation.find_element_wait(
self.lst_ordered.get("selector"))
ASSERT.is_instance(ele_parent, WebElement)
ele_children = self.bot.navigation.find_element_children(
ele_parent, self.lst_ordered_child.get("selector"))
ASSERT.is_instance(ele_children, list)
ASSERT.greater(len(ele_children), 1)
ASSERT.lower(len(ele_children), 5)
ASSERT.equals(
"Item list01",
self.bot.navigation.ele_text(ele_children[0]))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_elewaitinvisible_ok(self):
"""Testcase: test_elewaitinvisible_ok"""
self.setup_login_to_inputs()
selector = self.btn_click_invisible.get("selector")
ele = self.bot.navigation.find_element_wait(selector)
ele.click()
# end setup
ele = self.bot.navigation.ele_wait_invisible(selector, timeout=7)
ASSERT.is_instance(ele, WebElement)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_elewaitvisible_ok(self):
"""Testcase: test_elewaitvisible_ok"""
self.setup_login_to_inputs()
find_ele = self.bot.navigation.find_element_wait
ele = find_ele(self.btn_click_invisible.get("selector"))
ele.click()
ele_invisible = find_ele(self.btn_click_visible.get("selector"))
# end setup
ele_visible = self.bot.navigation.ele_wait_visible(
ele_invisible, timeout=7)
ASSERT.is_instance(ele_visible, WebElement)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_elewaittext_ok(self):
"""Testcase: test_elewaitvalue_ok"""
self.setup_login_to_inputs()
selector = self.btn_click_invisible.get("selector")
selector_title = self.title_buttons.get("selector")
ele_text = self.bot.navigation.find_element_wait(selector)
ele_text.click()
# end setup
is_changed = self.bot.navigation.ele_wait_text(
selector_title, "Buttonss", timeout=12)
ASSERT.true(is_changed)
ASSERT.is_instance(
self.bot.navigation.ele_text(ele_text),
"Buttonss")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_elewaitvalue_ok(self):
"""Testcase: test_elewaitvalue_ok"""
self.setup_login_to_inputs()
selector = self.btn_click_invisible.get("selector")
ele_text = self.bot.navigation.find_element_wait(selector)
ele_text.click()
# end setup
is_changed = self.bot.navigation.ele_wait_value(
selector, "bad_text", timeout=12)
ASSERT.true(is_changed)
ASSERT.is_instance(
self.bot.navigation.ele_attribute(ele_text, "value"),
"bad_text")
|
netzulo/qacode
|
tests/001_functionals/suite_004_navbase.py
|
Python
|
gpl-3.0
| 16,880
|
from django.db import models
from .common_info import CommonInfo
from django.utils import timezone
from django.urls import reverse
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.core.validators import URLValidator
def validate_nonzero(value):
if value == 0:
raise ValidationError(
_("Quantity {} is not allowed".format(value)), params={"value": value}
)
class DataSource(CommonInfo):
"""A parent container for DataGroup objects"""
STATE_CHOICES = (
("AT", "Awaiting Triage"),
("IP", "In Progress"),
("CO", "Complete"),
("ST", "Stale"),
)
PRIORITY_CHOICES = (("HI", "High"), ("MD", "Medium"), ("LO", "Low"))
title = models.CharField(max_length=50)
url = models.CharField(max_length=150, blank=True, validators=[URLValidator()])
estimated_records = models.PositiveIntegerField(
default=47,
validators=[validate_nonzero],
help_text="Estimated number of documents that the data source will eventually contain.",
)
state = models.CharField(max_length=2, choices=STATE_CHOICES, default="AT")
description = models.TextField(blank=True)
priority = models.CharField(max_length=2, choices=PRIORITY_CHOICES, default="HI")
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("data_source_edit", kwargs={"pk": self.pk})
|
HumanExposure/factotum
|
dashboard/models/data_source.py
|
Python
|
gpl-3.0
| 1,472
|
# -*- coding: utf-8 -*-
import os
from collections import namedtuple
from copy import copy
Colour = namedtuple('Colour', 'r,g,b')
Colour.copy = lambda self: copy(self)
black = Colour(0, 0, 0)
white = Colour(255, 255, 255) # Colour ranges are not enforced.
class Bitmap():
def __init__(self, width=40, height=40, background=white):
assert width > 0 and height > 0 and type(background) == Colour
self.width = width
self.height = height
self.background = background
self.map = [[background.copy() for w in range(width)]
for h in range(height)]
def fillrect(self, x, y, width, height, colour=black):
assert x >= 0 and y >= 0 and width > 0 and height > 0 and type(
colour) == Colour
for h in range(height):
for w in range(width):
self.map[y + h][x + w] = colour.copy()
def chardisplay(self):
txt = [''.join(' ' if bit == self.background else '@'
for bit in row)
for row in self.map]
# Boxing
txt = ['|' + row + '|' for row in txt]
txt.insert(0, '+' + '-' * self.width + '+')
txt.append('+' + '-' * self.width + '+')
print('\n'.join(reversed(txt)))
def set(self, x, y, colour=black):
assert type(colour) == Colour
self.map[y][x] = colour
def get(self, x, y):
return self.map[y][x]
bitmap = Bitmap(20, 10)
bitmap.fillrect(4, 5, 6, 3)
assert bitmap.get(5, 5) == black
assert bitmap.get(0, 1) == white
bitmap.set(0, 1, black)
assert bitmap.get(0, 1) == black
bitmap.chardisplay()
'''
The origin, 0,0; is the lower left, with x increasing to the right,
and Y increasing upwards.
The program above produces the following display :
+--------------------+
| |
| |
| @@@@@@ |
| @@@@@@ |
| @@@@@@ |
| |
| |
| |
|@ |
| |
+--------------------+
'''
os.system("pause")
|
NicovincX2/Python-3.5
|
Représentations graphiques/Bitmap/bitmap.py
|
Python
|
gpl-3.0
| 2,087
|
import bpy
from functions import *
class Combination():
'''A class containing all properties and methods
relative to combination settings for
Curve To Frame addon'''
def update_curves( self, context ):
'''method that must be over ride: update curve when settings have been changed'''
type(self).update_curves( self, context )
######################################
## combination settings ##
######################################
# method used to combine amplitude and peaks curve
combination_mode = bpy.props.EnumProperty(
name = 'Mode',
description = 'the way to combine amplitude and peaks curve',
default = 'ignore_peaks',
items = [
# (identifier, name,
# description, number)
('multiply', 'Peaks Curve Multiplied by amplitude',
'peaks is multiplied by \
amplitude percentage of maxi', 0),
('clamp_key', 'Peaks Keyframe Clamped to amplitude',
'peaks keyframe is clamped by amplitude', 1),
('clamp_curve', 'Peaks Curve Clamped to amplitude',
'all peaks value is clamped by amplitude', 2),
('ignore_amplitude', 'Only use peaks curve',
'Only use peaks curve', 3),
('ignore_peaks', 'Only use amplitude curve',
'Only use amplitude curve', 4)
],
update = update_curves
)
# combination of net amplitude and peaks curves
combination = bpy.props.FloatProperty(
name = "combination",
description = "Only to visualize the combination of \
peaks and amplitude curve curve. Can't \
be edit manually: use rate and amplitude settings.",
default = 0,
min = 0,
max = 1)
def update_combination_curve(
self,
clip,
context,
amplitude_net_curve,
peaks_curve):
'''update clip combination curve'''
# get combination mode curve
combination_enum = clip.curve_to_frame.bl_rna.\
properties['combination_mode'].enum_items
combination_mode = combination_enum.find( clip.curve_to_frame.combination_mode )
combination_mode_curve = get_fcurve_by_data_path(clip,
'curve_to_frame.combination_mode')
# get and initialize combination curve
combination_curve = get_fcurve_by_data_path(clip,
'curve_to_frame.combination')
if combination_curve is not None:
hide = combination_curve.hide
clip.animation_data.action.fcurves.remove(combination_curve)
else:
hide = True
clip.animation_data.action.fcurves.new(
'curve_to_frame.combination')
combination_curve = get_fcurve_by_data_path(clip,
'curve_to_frame.combination')
# get rate curve
rate_curve = get_fcurve_by_data_path(clip, 'curve_to_frame.rate')
# loop only on peak curve keyframe
for keyframe in peaks_curve.keyframe_points:
# get peaks keyframe value and frame
frame = keyframe.co[0]
value = max( min(1, keyframe.co[1]), 0 )
# get combination_mode at this frame
if combination_mode_curve is not None:
combination_mode = combination_mode_curve.evaluate(frame)
# generate keyframe
if combination_mode != 3 : # «combination mode == multiply or clamp
value = value * amplitude_net_curve.evaluate(frame)
if combination_mode != 4 :
combination_curve.keyframe_points.insert(frame, value)
combination_curve.keyframe_points[-1].interpolation = 'LINEAR'
# loop for all frame
end = max( peaks_curve.keyframe_points[-1].co[0],
context.scene.frame_end )
frame = start = context.scene.frame_start
while frame <= end:
# get combination_mode at this frame
if combination_mode_curve is not None:
combination_mode = combination_mode_curve.evaluate(frame)
if combination_mode == 0 : # combination mode is «multiply»
value = max( min( 1, peaks_curve.evaluate(frame) ), 0 )\
* amplitude_net_curve.evaluate(frame)
combination_curve.keyframe_points.insert(frame, value)
elif combination_mode == 2: # combination mode is «clamp_curve»
combination_curve.keyframe_points.insert(
frame,
max(
min (
amplitude_net_curve.evaluate(frame),
peaks_curve.evaluate(frame),
1
),
0
)
)
elif combination_mode == 4:
# combination mode is «ignore peaks»
combination_curve.keyframe_points.insert(
frame,
amplitude_net_curve.evaluate(frame)
)
combination_curve.keyframe_points[-1].interpolation = 'LINEAR'
# next frame
frame += 1
#erase keyframe on flat section
avoid_useless_keyframe( combination_curve )
# prevent curve edition
combination_curve.lock = True
combination_curve.hide = hide
return combination_curve
|
CaptainDesAstres/Frames-Animated-By-Curve
|
single_track/Combination.py
|
Python
|
gpl-3.0
| 4,675
|
#!/usr/bin/env python3
# Copyright (C) 2005-2006 Dimitur Kirov <dkirov AT gmail.com>
# Nikos Kouremenos <kourem AT gmail.com>
# Copyright (C) 2005-2014 Yann Leboulanger <asterix AT lagaule.org>
# Copyright (C) 2006 Junglecow <junglecow AT gmail.com>
# Travis Shirk <travis AT pobox.com>
# Copyright (C) 2006-2008 Jean-Marie Traissard <jim AT lapin.org>
# Copyright (C) 2007 Julien Pivotto <roidelapluie AT gmail.com>
#
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
# gajim-remote help will show you the D-BUS API of Gajim
import os
import sys
import locale
import signal
from gajim.common import exceptions
from gajim.common.i18n import _
from gajim.common.i18n import Q_
signal.signal(signal.SIGINT, signal.SIG_DFL) # ^C exits the application
try:
PREFERRED_ENCODING = locale.getpreferredencoding()
except Exception:
PREFERRED_ENCODING = 'UTF-8'
def send_error(error_message):
'''Writes error message to stderr and exits'''
print(error_message, file=sys.stderr)
sys.exit(1)
try:
import dbus
import dbus.service
# import dbus.glib
# test if dbus-x11 is installed
bus = dbus.SessionBus()
except Exception:
print(_('D-Bus is not present on this machine or python module is missing'))
sys.exit(1)
OBJ_PATH = '/org/gajim/dbus/RemoteObject'
INTERFACE = 'org.gajim.dbus.RemoteInterface'
SERVICE = 'org.gajim.Gajim'
BASENAME = 'gajim-remote'
class GajimRemote:
def __init__(self):
self.argv_len = len(sys.argv)
# define commands dict. Prototype :
# {
# 'command': [comment, [list of arguments] ]
# }
#
# each argument is defined as a tuple:
# (argument name, help on argument, is mandatory)
#
self.commands = {
'help': [
_('Shows a help on specific command'),
[
#User gets help for the command, specified by this parameter
(_('command'),
_('show help on command'), False)
]
],
'list_contacts': [
_('Lists all contacts in the contact list, one for each line'),
[
(Q_('?CLI:account'), _('show only contacts of the given account'),
False)
]
],
'list_accounts': [
_('Prints a list of registered accounts'),
[]
],
'change_status': [
_('Changes the status of account(s)'),
[
#offline, online, chat, away, xa, dnd should not be translated
(Q_('?CLI:status'), _('one of: offline, online, chat, away, xa, dnd. If not set, use account\'s previous status'), False),
(Q_('?CLI:message'), _('status message'), False),
(Q_('?CLI:account'), _('change status of account "account". '
'If not specified, try to change status of all accounts that have '
'"sync with global status" option set'), False)
]
],
'set_priority': [
_('Changes the priority of account(s)'),
[
(Q_('?CLI:priority'), _('priority you want to give to the account'),
True),
(Q_('?CLI:account'), _('change the priority of the given account. '
'If not specified, change status of all accounts that have'
' "sync with global status" option set'), False)
]
],
'send_chat_message': [
_('Sends new chat message to a contact in the contact list. Account is optional.'),
[
('jid', _('XMPP Address of the contact that will receive the message'), True),
(Q_('?CLI:message'), _('message contents'), True),
(Q_('?CLI:account'), _('if specified, the message will be sent '
'using this account'), False),
]
],
'send_single_message': [
_('Sends a chat message to someone on your contact list. '
'Account is optional.'),
[
('jid', _('XMPP Address of the contact that will receive the message'), True),
(_('subject'), _('message subject'), True),
(Q_('?CLI:message'), _('message contents'), True),
(Q_('?CLI:account'), _('if specified, the message will be sent '
'using this account'), False),
]
],
'send_groupchat_message': [
_('Sends new message to a group chat you\'ve joined.'),
[
('room_jid', _('XMPP Address of the group chat that will receive the message'), True),
(Q_('?CLI:message'), _('message contents'), True),
(Q_('?CLI:account'), _('if specified, the message will be sent '
'using this account'), False),
]
],
'contact_info': [
_('Gets detailed info on a contact'),
[
('jid', _('XMPP Address of the contact'), True)
]
],
'account_info': [
_('Gets detailed info on a account'),
[
('account', _('Name of the account'), True)
]
],
'send_file': [
_('Sends file to a contact'),
[
(_('file'), _('File path'), True),
('jid', _('XMPP Address of the contact'), True),
(Q_('?CLI:account'), _('if specified, file will be sent using this '
'account'), False)
]
],
'remove_contact': [
_('Removes contact from contact list'),
[
('jid', _('XMPP Address of the contact'), True),
(Q_('?CLI:account'), _('if specified, contact is taken from the '
'contact list of this account'), False)
]
],
'get_status': [
_('Returns current status (the global one unless account is specified)'),
[
(Q_('?CLI:account'), '', False)
]
],
'get_status_message': [
_('Returns current status message (the global one unless account is specified)'),
[
(Q_('?CLI:account'), '', False)
]
],
'get_unread_msgs_number': [
_('Returns number of unread messages'),
[]
],
'send_xml': [
_('Sends custom XML'),
[
('xml', _('XML to send'), True),
('account', _('Account to which the XML will be sent; '
'if not specified, XML will be sent to all accounts'),
False)
]
],
'check_gajim_running': [
_('Check if Gajim is running'),
[]
],
}
self.sbus = None
if self.argv_len < 2 or sys.argv[1] not in self.commands:
# no args or bad args
send_error(self.compose_help())
self.command = sys.argv[1]
if self.command == 'help':
if self.argv_len == 3:
print(self.help_on_command(sys.argv[2]).encode(
PREFERRED_ENCODING))
else:
print(self.compose_help().encode(PREFERRED_ENCODING))
sys.exit(0)
if self.command == 'check_gajim_running':
print(self.check_gajim_running())
sys.exit(0)
self.init_connection()
self.check_arguments()
if self.command == 'contact_info':
if self.argv_len < 3:
send_error(_('Missing argument "contact_jid"'))
try:
res = self.call_remote_method()
except exceptions.ServiceNotAvailable:
# At this point an error message has already been displayed
sys.exit(1)
else:
self.print_result(res)
def print_result(self, res):
"""
Print retrieved result to the output
"""
if res is not None:
if self.command in ('send_chat_message', 'send_single_message'):
self.argv_len -= 2
if res is False:
if self.argv_len < 4:
send_error(_('\'%s\' is not in your contact list.\n'
'Please specify account for sending the message.') % sys.argv[2])
else:
send_error(_('You have no active account'))
elif self.command == 'list_accounts':
if isinstance(res, list):
for account in res:
print(account)
elif self.command == 'account_info':
if res:
print(self.print_info(0, res, True))
elif self.command == 'list_contacts':
for account_dict in res:
print(self.print_info(0, account_dict, True))
elif self.command == 'prefs_list':
pref_keys = sorted(res.keys())
for pref_key in pref_keys:
result = '%s = %s' % (pref_key, res[pref_key])
print(result)
elif self.command == 'contact_info':
print(self.print_info(0, res, True))
elif res:
print(res)
def check_gajim_running(self):
if not self.sbus:
try:
self.sbus = dbus.SessionBus()
except Exception:
raise exceptions.SessionBusNotPresent
test = False
if hasattr(self.sbus, 'name_has_owner'):
if self.sbus.name_has_owner(SERVICE):
test = True
elif dbus.dbus_bindings.bus_name_has_owner(self.sbus.get_connection(),
SERVICE):
test = True
return test
def init_connection(self):
"""
Create the connection to the session dbus, or exit if it is not possible
"""
try:
self.sbus = dbus.SessionBus()
except Exception:
raise exceptions.SessionBusNotPresent
if not self.check_gajim_running():
#Do not translate "gajim-remote"
send_error(_('It seems Gajim is not running. So you can\'t use gajim-remote.'))
obj = self.sbus.get_object(SERVICE, OBJ_PATH)
interface = dbus.Interface(obj, INTERFACE)
# get the function asked
self.method = interface.__getattr__(self.command)
def make_arguments_row(self, args):
"""
Return arguments list. Mandatory arguments are enclosed with:
'<', '>', optional arguments - with '[', ']'
"""
s = ''
for arg in args:
if arg[2]:
s += ' <' + arg[0] + '>'
else:
s += ' [' + arg[0] + ']'
return s
def help_on_command(self, command):
"""
Return help message for a given command
"""
if command in self.commands:
command_props = self.commands[command]
arguments_str = self.make_arguments_row(command_props[1])
str_ = _('Usage: %(basename)s %(command)s %(arguments)s \n\t %(help)s')\
% {'basename': BASENAME, 'command': command,
'arguments': arguments_str, 'help': command_props[0]}
if command_props[1]:
str_ += '\n\n' + _('Arguments:') + '\n'
for argument in command_props[1]:
str_ += ' ' + argument[0] + ' - ' + argument[1] + '\n'
return str_
send_error(_('%s not found') % command)
def compose_help(self):
"""
Print usage, and list available commands
"""
s = _('Usage:\n %s command [arguments]\n\nCommand is one of:\n') % (
BASENAME)
for command in sorted(self.commands):
s += ' ' + command
for arg in self.commands[command][1]:
if arg[2]:
s += ' <' + arg[0] + '>'
else:
s += ' [' + arg[0] + ']'
s += '\n'
return s
def print_info(self, level, prop_dict, encode_return=False):
"""
Return formatted string from data structure
"""
if prop_dict is None or not isinstance(prop_dict, (dict, list, tuple)):
return ''
ret_str = ''
if isinstance(prop_dict, (list, tuple)):
ret_str = ''
spacing = ' ' * level * 4
for val in prop_dict:
if val is None:
ret_str += '\t'
elif isinstance(val, int):
ret_str += '\t' + str(val)
elif isinstance(val, str):
ret_str += '\t' + val
elif isinstance(val, (list, tuple)):
res = ''
for items in val:
res += self.print_info(level+1, items)
if res != '':
ret_str += '\t' + res
elif isinstance(val, dict):
ret_str += self.print_info(level+1, val)
ret_str = '%s(%s)\n' % (spacing, ret_str[1:])
elif isinstance(prop_dict, dict):
for key in prop_dict.keys():
val = prop_dict[key]
spacing = ' ' * level * 4
if isinstance(val, (int, str)):
if val is not None:
val = val.strip()
ret_str += '%s%-10s: %s\n' % (spacing, key, val)
elif isinstance(val, (list, tuple)):
res = ''
for items in val:
res += self.print_info(level+1, items)
if res != '':
ret_str += '%s%s: \n%s' % (spacing, key, res)
elif isinstance(val, dict):
res = self.print_info(level+1, val)
if res != '':
ret_str += '%s%s: \n%s' % (spacing, key, res)
if encode_return:
try:
ret_str = ret_str.encode(PREFERRED_ENCODING)
except Exception:
pass
return ret_str
def check_arguments(self):
"""
Make check if all necessary arguments are given
"""
argv_len = self.argv_len - 2
args = self.commands[self.command][1]
if len(args) < argv_len:
send_error(_('Too many arguments. \n'
'Type "%(basename)s help %(command)s" for more info') % {
'basename': BASENAME, 'command': self.command})
if len(args) > argv_len:
if args[argv_len][2]:
send_error(_('Argument "%(arg)s" is not specified. \n'
'Type "%(basename)s help %(command)s" for more info') %
{'arg': args[argv_len][0], 'basename': BASENAME,
'command': self.command})
self.arguments = []
i = 0
for arg in sys.argv[2:]:
i += 1
if i < len(args):
self.arguments.append(arg)
else:
# it's latest argument with spaces
self.arguments.append(' '.join(sys.argv[i+1:]))
break
# add empty string for missing args
self.arguments += ['']*(len(args)-i)
def call_remote_method(self):
"""
Calls self.method with arguments from sys.argv[2:]
"""
args = [dbus.String(i) for i in self.arguments]
try:
res = self.method(*args)
return res
except Exception:
raise exceptions.ServiceNotAvailable
return None
def main():
if os.geteuid() == 0:
sys.exit("You must not launch gajim as root, it is insecure.")
GajimRemote()
if __name__ == '__main__':
main()
|
gajim/gajim
|
gajim/gajim_remote.py
|
Python
|
gpl-3.0
| 18,904
|
__RCSID__ = "$Id$"
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
class Pilot(BaseAccountingType):
def __init__(self):
BaseAccountingType.__init__(self)
self.definitionKeyFields = [('User', 'VARCHAR(64)'),
('UserGroup', 'VARCHAR(32)'),
('Site', 'VARCHAR(64)'),
('GridCE', "VARCHAR(128)"),
('GridMiddleware', 'VARCHAR(32)'),
('GridResourceBroker', 'VARCHAR(128)'),
('GridStatus', 'VARCHAR(32)'),
]
self.definitionAccountingFields = [('Jobs', "INT UNSIGNED"),
]
self.checkType()
|
fstagni/DIRAC
|
AccountingSystem/Client/Types/Pilot.py
|
Python
|
gpl-3.0
| 801
|
# Created By: Virgil Dupras
# Created On: 2011-04-20
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
# Heavily based on http://topo.math.u-psud.fr/~bousch/exifdump.py by Thierry Bousch (Public Domain)
import logging
EXIF_TAGS = {
0x0100: "ImageWidth",
0x0101: "ImageLength",
0x0102: "BitsPerSample",
0x0103: "Compression",
0x0106: "PhotometricInterpretation",
0x010A: "FillOrder",
0x010D: "DocumentName",
0x010E: "ImageDescription",
0x010F: "Make",
0x0110: "Model",
0x0111: "StripOffsets",
0x0112: "Orientation",
0x0115: "SamplesPerPixel",
0x0116: "RowsPerStrip",
0x0117: "StripByteCounts",
0x011A: "XResolution",
0x011B: "YResolution",
0x011C: "PlanarConfiguration",
0x0128: "ResolutionUnit",
0x012D: "TransferFunction",
0x0131: "Software",
0x0132: "DateTime",
0x013B: "Artist",
0x013E: "WhitePoint",
0x013F: "PrimaryChromaticities",
0x0156: "TransferRange",
0x0200: "JPEGProc",
0x0201: "JPEGInterchangeFormat",
0x0202: "JPEGInterchangeFormatLength",
0x0211: "YCbCrCoefficients",
0x0212: "YCbCrSubSampling",
0x0213: "YCbCrPositioning",
0x0214: "ReferenceBlackWhite",
0x828F: "BatteryLevel",
0x8298: "Copyright",
0x829A: "ExposureTime",
0x829D: "FNumber",
0x83BB: "IPTC/NAA",
0x8769: "ExifIFDPointer",
0x8773: "InterColorProfile",
0x8822: "ExposureProgram",
0x8824: "SpectralSensitivity",
0x8825: "GPSInfoIFDPointer",
0x8827: "ISOSpeedRatings",
0x8828: "OECF",
0x9000: "ExifVersion",
0x9003: "DateTimeOriginal",
0x9004: "DateTimeDigitized",
0x9101: "ComponentsConfiguration",
0x9102: "CompressedBitsPerPixel",
0x9201: "ShutterSpeedValue",
0x9202: "ApertureValue",
0x9203: "BrightnessValue",
0x9204: "ExposureBiasValue",
0x9205: "MaxApertureValue",
0x9206: "SubjectDistance",
0x9207: "MeteringMode",
0x9208: "LightSource",
0x9209: "Flash",
0x920A: "FocalLength",
0x9214: "SubjectArea",
0x927C: "MakerNote",
0x9286: "UserComment",
0x9290: "SubSecTime",
0x9291: "SubSecTimeOriginal",
0x9292: "SubSecTimeDigitized",
0xA000: "FlashPixVersion",
0xA001: "ColorSpace",
0xA002: "PixelXDimension",
0xA003: "PixelYDimension",
0xA004: "RelatedSoundFile",
0xA005: "InteroperabilityIFDPointer",
0xA20B: "FlashEnergy", # 0x920B in TIFF/EP
0xA20C: "SpatialFrequencyResponse", # 0x920C - -
0xA20E: "FocalPlaneXResolution", # 0x920E - -
0xA20F: "FocalPlaneYResolution", # 0x920F - -
0xA210: "FocalPlaneResolutionUnit", # 0x9210 - -
0xA214: "SubjectLocation", # 0x9214 - -
0xA215: "ExposureIndex", # 0x9215 - -
0xA217: "SensingMethod", # 0x9217 - -
0xA300: "FileSource",
0xA301: "SceneType",
0xA302: "CFAPattern", # 0x828E in TIFF/EP
0xA401: "CustomRendered",
0xA402: "ExposureMode",
0xA403: "WhiteBalance",
0xA404: "DigitalZoomRatio",
0xA405: "FocalLengthIn35mmFilm",
0xA406: "SceneCaptureType",
0xA407: "GainControl",
0xA408: "Contrast",
0xA409: "Saturation",
0xA40A: "Sharpness",
0xA40B: "DeviceSettingDescription",
0xA40C: "SubjectDistanceRange",
0xA420: "ImageUniqueID",
}
INTR_TAGS = {
0x0001: "InteroperabilityIndex",
0x0002: "InteroperabilityVersion",
0x1000: "RelatedImageFileFormat",
0x1001: "RelatedImageWidth",
0x1002: "RelatedImageLength",
}
GPS_TA0GS = {
0x00: "GPSVersionID",
0x01: "GPSLatitudeRef",
0x02: "GPSLatitude",
0x03: "GPSLongitudeRef",
0x04: "GPSLongitude",
0x05: "GPSAltitudeRef",
0x06: "GPSAltitude",
0x07: "GPSTimeStamp",
0x08: "GPSSatellites",
0x09: "GPSStatus",
0x0A: "GPSMeasureMode",
0x0B: "GPSDOP",
0x0C: "GPSSpeedRef",
0x0D: "GPSSpeed",
0x0E: "GPSTrackRef",
0x0F: "GPSTrack",
0x10: "GPSImgDirectionRef",
0x11: "GPSImgDirection",
0x12: "GPSMapDatum",
0x13: "GPSDestLatitudeRef",
0x14: "GPSDestLatitude",
0x15: "GPSDestLongitudeRef",
0x16: "GPSDestLongitude",
0x17: "GPSDestBearingRef",
0x18: "GPSDestBearing",
0x19: "GPSDestDistanceRef",
0x1A: "GPSDestDistance",
0x1B: "GPSProcessingMethod",
0x1C: "GPSAreaInformation",
0x1D: "GPSDateStamp",
0x1E: "GPSDifferential"
}
INTEL_ENDIAN = ord('I')
MOTOROLA_ENDIAN = ord('M')
# About MAX_COUNT: It's possible to have corrupted exif tags where the entry count is way too high
# and thus makes us loop, not endlessly, but for heck of a long time for nothing. Therefore, we put
# an arbitrary limit on the entry count we'll allow ourselves to read and any IFD reporting more
# entries than that will be considered corrupt.
MAX_COUNT = 0xffff
def s2n_motorola(bytes):
x = 0
for c in bytes:
x = (x << 8) | c
return x
def s2n_intel(bytes):
x = 0
y = 0
for c in bytes:
x = x | (c << y)
y = y + 8
return x
class Fraction:
def __init__(self, num, den):
self.num = num
self.den = den
def __repr__(self):
return '%d/%d' % (self.num, self.den)
class TIFF_file:
def __init__(self, data):
self.data = data
self.endian = data[0]
self.s2nfunc = s2n_intel if self.endian == INTEL_ENDIAN else s2n_motorola
def s2n(self, offset, length, signed=0, debug=False):
slice = self.data[offset:offset+length]
val = self.s2nfunc(slice)
# Sign extension ?
if signed:
msb = 1 << (8*length - 1)
if val & msb:
val = val - (msb << 1)
if debug:
logging.debug(self.endian)
logging.debug("Slice for offset %d length %d: %r and value: %d", offset, length, slice, val)
return val
def first_IFD(self):
return self.s2n(4, 4)
def next_IFD(self, ifd):
entries = self.s2n(ifd, 2)
return self.s2n(ifd + 2 + 12 * entries, 4)
def list_IFDs(self):
i = self.first_IFD()
a = []
while i:
a.append(i)
i = self.next_IFD(i)
return a
def dump_IFD(self, ifd):
entries = self.s2n(ifd, 2)
logging.debug("Entries for IFD %d: %d", ifd, entries)
if entries > MAX_COUNT:
logging.debug("Probably corrupt. Aborting.")
return []
a = []
for i in range(entries):
entry = ifd + 2 + 12*i
tag = self.s2n(entry, 2)
type = self.s2n(entry+2, 2)
if not 1 <= type <= 10:
continue # not handled
typelen = [1, 1, 2, 4, 8, 1, 1, 2, 4, 8][type-1]
count = self.s2n(entry+4, 4)
if count > MAX_COUNT:
logging.debug("Probably corrupt. Aborting.")
return []
offset = entry+8
if count*typelen > 4:
offset = self.s2n(offset, 4)
if type == 2:
# Special case: nul-terminated ASCII string
values = str(self.data[offset:offset+count-1], encoding='latin-1')
else:
values = []
signed = (type == 6 or type >= 8)
for j in range(count):
if type in {5, 10}:
# The type is either 5 or 10
value_j = Fraction(self.s2n(offset, 4, signed),
self.s2n(offset+4, 4, signed))
else:
# Not a fraction
value_j = self.s2n(offset, typelen, signed)
values.append(value_j)
offset = offset + typelen
# Now "values" is either a string or an array
a.append((tag, type, values))
return a
def read_exif_header(fp):
# If `fp`'s first bytes are not exif, it tries to find it in the next 4kb
def isexif(data):
return data[0:4] == b'\377\330\377\341' and data[6:10] == b'Exif'
data = fp.read(12)
if isexif(data):
return data
# ok, not exif, try to find it
large_data = fp.read(4096)
try:
index = large_data.index(b'Exif')
data = large_data[index-6:index+6]
# large_data omits the first 12 bytes, and the index is at the middle of the header, so we
# must seek index + 18
fp.seek(index+18)
return data
except ValueError:
raise ValueError("Not an Exif file")
def get_fields(fp):
data = read_exif_header(fp)
length = data[4] * 256 + data[5]
logging.debug("Exif header length: %d bytes", length)
data = fp.read(length-8)
data_format = data[0]
logging.debug("%s format", {INTEL_ENDIAN: 'Intel', MOTOROLA_ENDIAN: 'Motorola'}[data_format])
T = TIFF_file(data)
# There may be more than one IFD per file, but we only read the first one because others are
# most likely thumbnails.
main_IFD_offset = T.first_IFD()
result = {}
def add_tag_to_result(tag, values):
try:
stag = EXIF_TAGS[tag]
except KeyError:
stag = '0x%04X' % tag
if stag in result:
return # don't overwrite data
result[stag] = values
logging.debug("IFD at offset %d", main_IFD_offset)
IFD = T.dump_IFD(main_IFD_offset)
exif_off = gps_off = 0
for tag, type, values in IFD:
if tag == 0x8769:
exif_off = values[0]
continue
if tag == 0x8825:
gps_off = values[0]
continue
add_tag_to_result(tag, values)
if exif_off:
logging.debug("Exif SubIFD at offset %d:", exif_off)
IFD = T.dump_IFD(exif_off)
# Recent digital cameras have a little subdirectory
# here, pointed to by tag 0xA005. Apparently, it's the
# "Interoperability IFD", defined in Exif 2.1 and DCF.
intr_off = 0
for tag, type, values in IFD:
if tag == 0xA005:
intr_off = values[0]
continue
add_tag_to_result(tag, values)
if intr_off:
logging.debug("Exif Interoperability SubSubIFD at offset %d:", intr_off)
IFD = T.dump_IFD(intr_off)
for tag, type, values in IFD:
add_tag_to_result(tag, values)
if gps_off:
logging.debug("GPS SubIFD at offset %d:", gps_off)
IFD = T.dump_IFD(gps_off)
for tag, type, values in IFD:
add_tag_to_result(tag, values)
return result
|
stuckj/dupeguru
|
core_pe/exif.py
|
Python
|
gpl-3.0
| 10,797
|
SEFARIA_API_NODE = "https://www.sefaria.org/api/texts/"
CACHE_MONITOR_LOOP_DELAY_IN_SECONDS = 86400
CACHE_LIFETIME_SECONDS = 604800
category_colors = {
"Commentary": "#4871bf",
"Tanakh": "#004e5f",
"Midrash": "#5d956f",
"Mishnah": "#5a99b7",
"Talmud": "#ccb479",
"Halakhah": "#802f3e",
"Kabbalah": "#594176",
"Philosophy": "#7f85a9",
"Liturgy": "#ab4e66",
"Tanaitic": "#00827f",
"Parshanut": "#9ab8cb",
"Chasidut": "#97b386",
"Musar": "#7c406f",
"Responsa": "#cb6158",
"Apocrypha": "#c7a7b4",
"Other": "#073570",
"Quoting Commentary": "#cb6158",
"Sheets": "#7c406f",
"Community": "#7c406f",
"Targum": "#7f85a9",
"Modern Works": "#7c406f",
"Modern Commentary": "#7c406f",
}
platform_settings = {
"twitter": {
"font_size": 29,
"additional_line_spacing_he": 5,
"additional_line_spacing_en": -10,
"image_width": 506,
"image_height": 253,
"margin": 20,
"category_color_line_width": 7,
"sefaria_branding": False,
"branding_height": 0
},
"facebook": {
"font_size": 70,
"additional_line_spacing_he": 12,
"additional_line_spacing_en": -20,
"image_width": 1200,
"image_height": 630,
"margin": 40,
"category_color_line_width": 15,
"sefaria_branding": False,
"branding_height": 0
},
"instagram": {
"font_size": 70,
"additional_line_spacing_he": 12,
"additional_line_spacing_en": 0,
"image_width": 1040,
"image_height": 1040,
"margin": 40,
"category_color_line_width": 13,
"sefaria_branding": True,
"branding_height": 100
}
}
|
nassan/sefaria-embedded
|
constants.py
|
Python
|
gpl-3.0
| 1,909
|
T = input()
while(T):
T -= 1
buff = []
a,b = [int(i) for i in raw_input().split()]
buff.append(b)
pt = 0
while(a):
a -= 1
c = [i for i in raw_input().split()]
if len(c)==2:
c[1] = int(c[1])
if c[0] == "P":
buff.append(c[1])
#pt = len(buff)-1
k = len(buff)-1
l = len(buff)-2
#print buff[k]
if c[0] == "B":
k,l = l,k
#print k
print buff,buff[k]
print "Player " + str(buff[k])
|
Dawny33/Code
|
HackerEarth/SuperProf Hiring/footy.py
|
Python
|
gpl-3.0
| 579
|
# Simple class SimgleList
class SingleList:
"""Documentation for SingleList
"""
def __init__(self, initial_list=None):
self.__list = []
if initial_list:
for value in initial_list:
if value not in self.__list:
self.__list.append(value)
def __str__(self):
temp_string = ""
i = 0
for i in range(len(self)):
temp_string += "%12d" % self.__list[i]
if (i + 1) % 4 == 0:
temp_string += "\n"
if i % 4 != 0:
temp_string += "\n"
return temp_string
def __len__(self):
return len(self.__list)
def __getitem__(self, index):
return self.__list[index]
def __setitem__(self, index, value):
if value in self.__list:
raise ValueError("List already contains value %s" % str(value))
self.__list[index] = value
def __eq__(self, other):
if len(self) != len(other):
return 0
for i in range(0, len(self)):
if self.__list[i] != other.__list[i]:
return 0
return 1
def __ne__(self, other):
return not (self == other)
|
r-castro/Python
|
NewList.py
|
Python
|
gpl-3.0
| 1,219
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/colin/Projects/OpenCobolIDE/forms/dlg_preferences.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from pyqode.qt import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1280, 1024)
Dialog.setMinimumSize(QtCore.QSize(500, 500))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/ide-icons/rc/silex-192x192.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.gridLayout_2 = QtWidgets.QGridLayout(Dialog)
self.gridLayout_2.setContentsMargins(6, 6, 6, 6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok|QtWidgets.QDialogButtonBox.Reset|QtWidgets.QDialogButtonBox.RestoreDefaults)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout_2.addWidget(self.buttonBox, 6, 0, 1, 1)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setStyleSheet("")
self.widget.setObjectName("widget")
self.widget_2 = QtWidgets.QGridLayout(self.widget)
self.widget_2.setContentsMargins(0, 0, 0, 0)
self.widget_2.setSpacing(0)
self.widget_2.setObjectName("widget_2")
self.tabWidget = QtWidgets.QTabWidget(self.widget)
self.tabWidget.setStyleSheet("")
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabWidget.setUsesScrollButtons(True)
self.tabWidget.setDocumentMode(False)
self.tabWidget.setObjectName("tabWidget")
self.tabEditor = QtWidgets.QWidget()
self.tabEditor.setObjectName("tabEditor")
self.verticalLayout = QtWidgets.QVBoxLayout(self.tabEditor)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.scrollArea = QtWidgets.QScrollArea(self.tabEditor)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 1244, 921))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_2)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.groupBox_3 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_2)
self.groupBox_3.setObjectName("groupBox_3")
self.formLayout = QtWidgets.QFormLayout(self.groupBox_3)
self.formLayout.setObjectName("formLayout")
self.label_10 = QtWidgets.QLabel(self.groupBox_3)
self.label_10.setObjectName("label_10")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_10)
self.checkBoxViewLineNumber = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBoxViewLineNumber.setText("")
self.checkBoxViewLineNumber.setChecked(True)
self.checkBoxViewLineNumber.setObjectName("checkBoxViewLineNumber")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.checkBoxViewLineNumber)
self.label_11 = QtWidgets.QLabel(self.groupBox_3)
self.label_11.setObjectName("label_11")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_11)
self.checkBoxHighlightCurrentLine = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBoxHighlightCurrentLine.setText("")
self.checkBoxHighlightCurrentLine.setChecked(True)
self.checkBoxHighlightCurrentLine.setObjectName("checkBoxHighlightCurrentLine")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.checkBoxHighlightCurrentLine)
self.label_12 = QtWidgets.QLabel(self.groupBox_3)
self.label_12.setObjectName("label_12")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_12)
self.checkBoxHighlightWhitespaces = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBoxHighlightWhitespaces.setText("")
self.checkBoxHighlightWhitespaces.setChecked(True)
self.checkBoxHighlightWhitespaces.setObjectName("checkBoxHighlightWhitespaces")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkBoxHighlightWhitespaces)
self.label_13 = QtWidgets.QLabel(self.groupBox_3)
self.label_13.setObjectName("label_13")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_13)
self.checkBoxShowErrors = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBoxShowErrors.setText("")
self.checkBoxShowErrors.setObjectName("checkBoxShowErrors")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.checkBoxShowErrors)
self.label_38 = QtWidgets.QLabel(self.groupBox_3)
self.label_38.setObjectName("label_38")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_38)
self.cb_cursor_pos_in_bytes = QtWidgets.QCheckBox(self.groupBox_3)
self.cb_cursor_pos_in_bytes.setText("")
self.cb_cursor_pos_in_bytes.setObjectName("cb_cursor_pos_in_bytes")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.cb_cursor_pos_in_bytes)
self.verticalLayout_7.addWidget(self.groupBox_3)
self.groupBox_11 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_2)
self.groupBox_11.setObjectName("groupBox_11")
self.horizontalLayout_18 = QtWidgets.QHBoxLayout(self.groupBox_11)
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.label_5 = QtWidgets.QLabel(self.groupBox_11)
self.label_5.setObjectName("label_5")
self.horizontalLayout_14.addWidget(self.label_5)
self.spin_box_margin_1 = QtWidgets.QSpinBox(self.groupBox_11)
self.spin_box_margin_1.setObjectName("spin_box_margin_1")
self.horizontalLayout_14.addWidget(self.spin_box_margin_1)
self.color_picker_1 = ColorPicker(self.groupBox_11)
self.color_picker_1.setText("")
self.color_picker_1.setObjectName("color_picker_1")
self.horizontalLayout_14.addWidget(self.color_picker_1)
self.horizontalLayout_18.addLayout(self.horizontalLayout_14)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.label_39 = QtWidgets.QLabel(self.groupBox_11)
self.label_39.setObjectName("label_39")
self.horizontalLayout_15.addWidget(self.label_39)
self.spin_box_margin_2 = QtWidgets.QSpinBox(self.groupBox_11)
self.spin_box_margin_2.setObjectName("spin_box_margin_2")
self.horizontalLayout_15.addWidget(self.spin_box_margin_2)
self.color_picker_2 = ColorPicker(self.groupBox_11)
self.color_picker_2.setText("")
self.color_picker_2.setObjectName("color_picker_2")
self.horizontalLayout_15.addWidget(self.color_picker_2)
self.horizontalLayout_18.addLayout(self.horizontalLayout_15)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.label_40 = QtWidgets.QLabel(self.groupBox_11)
self.label_40.setObjectName("label_40")
self.horizontalLayout_16.addWidget(self.label_40)
self.spin_box_margin_3 = QtWidgets.QSpinBox(self.groupBox_11)
self.spin_box_margin_3.setObjectName("spin_box_margin_3")
self.horizontalLayout_16.addWidget(self.spin_box_margin_3)
self.color_picker_3 = ColorPicker(self.groupBox_11)
self.color_picker_3.setText("")
self.color_picker_3.setObjectName("color_picker_3")
self.horizontalLayout_16.addWidget(self.color_picker_3)
self.horizontalLayout_18.addLayout(self.horizontalLayout_16)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
self.label_41 = QtWidgets.QLabel(self.groupBox_11)
self.label_41.setObjectName("label_41")
self.horizontalLayout_17.addWidget(self.label_41)
self.spin_box_margin_4 = QtWidgets.QSpinBox(self.groupBox_11)
self.spin_box_margin_4.setObjectName("spin_box_margin_4")
self.horizontalLayout_17.addWidget(self.spin_box_margin_4)
self.color_picker_4 = ColorPicker(self.groupBox_11)
self.color_picker_4.setText("")
self.color_picker_4.setObjectName("color_picker_4")
self.horizontalLayout_17.addWidget(self.color_picker_4)
self.horizontalLayout_18.addLayout(self.horizontalLayout_17)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_18.addItem(spacerItem)
self.verticalLayout_7.addWidget(self.groupBox_11)
self.groupBox = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_2)
self.groupBox.setObjectName("groupBox")
self.formLayout_7 = QtWidgets.QFormLayout(self.groupBox)
self.formLayout_7.setObjectName("formLayout_7")
self.label_7 = QtWidgets.QLabel(self.groupBox)
self.label_7.setObjectName("label_7")
self.formLayout_7.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.lineEditCommentIndicator = QtWidgets.QLineEdit(self.groupBox)
font = QtGui.QFont()
font.setFamily("Monospace")
self.lineEditCommentIndicator.setFont(font)
self.lineEditCommentIndicator.setObjectName("lineEditCommentIndicator")
self.formLayout_7.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEditCommentIndicator)
self.verticalLayout_7.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_2)
self.groupBox_2.setObjectName("groupBox_2")
self.formLayout_2 = QtWidgets.QFormLayout(self.groupBox_2)
self.formLayout_2.setObjectName("formLayout_2")
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setObjectName("label")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)
self.spinBoxEditorTabLen = QtWidgets.QSpinBox(self.groupBox_2)
self.spinBoxEditorTabLen.setMinimum(2)
self.spinBoxEditorTabLen.setMaximum(99)
self.spinBoxEditorTabLen.setProperty("value", 4)
self.spinBoxEditorTabLen.setObjectName("spinBoxEditorTabLen")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.spinBoxEditorTabLen)
self.label_14 = QtWidgets.QLabel(self.groupBox_2)
self.label_14.setText("")
self.label_14.setObjectName("label_14")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_14)
self.checkBoxEditorAutoIndent = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBoxEditorAutoIndent.setChecked(True)
self.checkBoxEditorAutoIndent.setObjectName("checkBoxEditorAutoIndent")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkBoxEditorAutoIndent)
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setText("")
self.label_6.setObjectName("label_6")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.checkBoxSmartBackspace = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBoxSmartBackspace.setObjectName("checkBoxSmartBackspace")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.checkBoxSmartBackspace)
self.verticalLayout_7.addWidget(self.groupBox_2)
self.groupBox_4 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_2)
self.groupBox_4.setObjectName("groupBox_4")
self.formLayout_4 = QtWidgets.QFormLayout(self.groupBox_4)
self.formLayout_4.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)
self.formLayout_4.setObjectName("formLayout_4")
self.label_2 = QtWidgets.QLabel(self.groupBox_4)
self.label_2.setObjectName("label_2")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.spinBoxEditorCCTriggerLen = QtWidgets.QSpinBox(self.groupBox_4)
self.spinBoxEditorCCTriggerLen.setProperty("value", 1)
self.spinBoxEditorCCTriggerLen.setObjectName("spinBoxEditorCCTriggerLen")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.spinBoxEditorCCTriggerLen)
self.label_16 = QtWidgets.QLabel(self.groupBox_4)
self.label_16.setObjectName("label_16")
self.formLayout_4.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_16)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.rbLowerCaseKwds = QtWidgets.QRadioButton(self.groupBox_4)
self.rbLowerCaseKwds.setObjectName("rbLowerCaseKwds")
self.horizontalLayout.addWidget(self.rbLowerCaseKwds)
self.rbUpperCaseKwds = QtWidgets.QRadioButton(self.groupBox_4)
self.rbUpperCaseKwds.setChecked(True)
self.rbUpperCaseKwds.setObjectName("rbUpperCaseKwds")
self.horizontalLayout.addWidget(self.rbUpperCaseKwds)
self.formLayout_4.setLayout(3, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout)
self.label_31 = QtWidgets.QLabel(self.groupBox_4)
self.label_31.setObjectName("label_31")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_31)
self.comboCcFilterMode = QtWidgets.QComboBox(self.groupBox_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboCcFilterMode.sizePolicy().hasHeightForWidth())
self.comboCcFilterMode.setSizePolicy(sizePolicy)
self.comboCcFilterMode.setMinimumSize(QtCore.QSize(134, 0))
self.comboCcFilterMode.setObjectName("comboCcFilterMode")
self.comboCcFilterMode.addItem("")
self.comboCcFilterMode.addItem("")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.comboCcFilterMode)
self.verticalLayout_7.addWidget(self.groupBox_4)
self.groupBox_10 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_2)
self.groupBox_10.setObjectName("groupBox_10")
self.formLayout_11 = QtWidgets.QFormLayout(self.groupBox_10)
self.formLayout_11.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)
self.formLayout_11.setObjectName("formLayout_11")
self.label_30 = QtWidgets.QLabel(self.groupBox_10)
self.label_30.setObjectName("label_30")
self.formLayout_11.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_30)
self.comboBoxPreferredEOL = QtWidgets.QComboBox(self.groupBox_10)
self.comboBoxPreferredEOL.setObjectName("comboBoxPreferredEOL")
self.comboBoxPreferredEOL.addItem("")
self.comboBoxPreferredEOL.addItem("")
self.comboBoxPreferredEOL.addItem("")
self.comboBoxPreferredEOL.addItem("")
self.formLayout_11.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.comboBoxPreferredEOL)
self.checkBoxAutodetectEOL = QtWidgets.QCheckBox(self.groupBox_10)
self.checkBoxAutodetectEOL.setChecked(True)
self.checkBoxAutodetectEOL.setObjectName("checkBoxAutodetectEOL")
self.formLayout_11.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.checkBoxAutodetectEOL)
self.verticalLayout_7.addWidget(self.groupBox_10)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout.addWidget(self.scrollArea)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/ide-icons/rc/cobol-mimetype.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tabEditor, icon1, "")
self.tabStyle = QtWidgets.QWidget()
self.tabStyle.setObjectName("tabStyle")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tabStyle)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.groupBox_5 = QtWidgets.QGroupBox(self.tabStyle)
self.groupBox_5.setObjectName("groupBox_5")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_5)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.radioButtonColorWhite = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButtonColorWhite.setChecked(True)
self.radioButtonColorWhite.setObjectName("radioButtonColorWhite")
self.horizontalLayout_3.addWidget(self.radioButtonColorWhite)
self.radioButtonColorDark = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButtonColorDark.setObjectName("radioButtonColorDark")
self.horizontalLayout_3.addWidget(self.radioButtonColorDark)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.layoutIconTheme = QtWidgets.QFormLayout()
self.layoutIconTheme.setContentsMargins(-1, 0, -1, -1)
self.layoutIconTheme.setObjectName("layoutIconTheme")
self.lblIconTheme = QtWidgets.QLabel(self.groupBox_5)
self.lblIconTheme.setObjectName("lblIconTheme")
self.layoutIconTheme.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.lblIconTheme)
self.comboBoxIconTheme = QtWidgets.QComboBox(self.groupBox_5)
self.comboBoxIconTheme.setObjectName("comboBoxIconTheme")
self.layoutIconTheme.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.comboBoxIconTheme)
self.verticalLayout_2.addLayout(self.layoutIconTheme)
self.verticalLayout_3.addWidget(self.groupBox_5)
self.groupBox_6 = QtWidgets.QGroupBox(self.tabStyle)
self.groupBox_6.setObjectName("groupBox_6")
self.formLayout_3 = QtWidgets.QFormLayout(self.groupBox_6)
self.formLayout_3.setObjectName("formLayout_3")
self.label_3 = QtWidgets.QLabel(self.groupBox_6)
self.label_3.setObjectName("label_3")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.fontComboBox = QtWidgets.QFontComboBox(self.groupBox_6)
self.fontComboBox.setFontFilters(QtWidgets.QFontComboBox.MonospacedFonts)
font = QtGui.QFont()
font.setFamily("Clean")
self.fontComboBox.setCurrentFont(font)
self.fontComboBox.setObjectName("fontComboBox")
self.horizontalLayout_4.addWidget(self.fontComboBox)
self.formLayout_3.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_4)
self.label_4 = QtWidgets.QLabel(self.groupBox_6)
self.label_4.setObjectName("label_4")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.spinBoxFontSize = QtWidgets.QSpinBox(self.groupBox_6)
self.spinBoxFontSize.setProperty("value", 10)
self.spinBoxFontSize.setObjectName("spinBoxFontSize")
self.horizontalLayout_5.addWidget(self.spinBoxFontSize)
self.formLayout_3.setLayout(1, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_5)
self.verticalLayout_3.addWidget(self.groupBox_6)
self.groupBox_7 = QtWidgets.QGroupBox(self.tabStyle)
self.groupBox_7.setObjectName("groupBox_7")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox_7)
self.gridLayout_4.setObjectName("gridLayout_4")
self.listWidgetColorSchemes = QtWidgets.QListWidget(self.groupBox_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidgetColorSchemes.sizePolicy().hasHeightForWidth())
self.listWidgetColorSchemes.setSizePolicy(sizePolicy)
self.listWidgetColorSchemes.setObjectName("listWidgetColorSchemes")
self.gridLayout_4.addWidget(self.listWidgetColorSchemes, 0, 0, 1, 1)
self.plainTextEdit = CobolCodeEdit(self.groupBox_7)
self.plainTextEdit.setObjectName("plainTextEdit")
self.gridLayout_4.addWidget(self.plainTextEdit, 0, 1, 1, 1)
self.verticalLayout_3.addWidget(self.groupBox_7)
icon = QtGui.QIcon.fromTheme("applications-graphics")
self.tabWidget.addTab(self.tabStyle, icon, "")
self.tabCompiler = QtWidgets.QWidget()
self.tabCompiler.setObjectName("tabCompiler")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.tabCompiler)
self.verticalLayout_11.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.scrollArea_5 = QtWidgets.QScrollArea(self.tabCompiler)
self.scrollArea_5.setWidgetResizable(True)
self.scrollArea_5.setObjectName("scrollArea_5")
self.scrollAreaWidgetContents_7 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_7.setGeometry(QtCore.QRect(0, 0, 1224, 973))
self.scrollAreaWidgetContents_7.setObjectName("scrollAreaWidgetContents_7")
self.formLayout_6 = QtWidgets.QFormLayout(self.scrollAreaWidgetContents_7)
self.formLayout_6.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)
self.formLayout_6.setContentsMargins(9, 9, 9, 9)
self.formLayout_6.setSpacing(9)
self.formLayout_6.setObjectName("formLayout_6")
self.label_compiler_path = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_compiler_path.setObjectName("label_compiler_path")
self.formLayout_6.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_compiler_path)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.lineEditCompilerPath = PathLineEdit(self.scrollAreaWidgetContents_7)
self.lineEditCompilerPath.setObjectName("lineEditCompilerPath")
self.horizontalLayout_9.addWidget(self.lineEditCompilerPath)
self.toolButtonCustomCompilerPath = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.toolButtonCustomCompilerPath.setObjectName("toolButtonCustomCompilerPath")
self.horizontalLayout_9.addWidget(self.toolButtonCustomCompilerPath)
self.toolButtonCheckCompiler = QtWidgets.QPushButton(self.scrollAreaWidgetContents_7)
icon = QtGui.QIcon.fromTheme("emblem-checked")
self.toolButtonCheckCompiler.setIcon(icon)
self.toolButtonCheckCompiler.setObjectName("toolButtonCheckCompiler")
self.horizontalLayout_9.addWidget(self.toolButtonCheckCompiler)
self.formLayout_6.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_9)
self.groupBoxEnvVars = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_7)
self.groupBoxEnvVars.setObjectName("groupBoxEnvVars")
self.formLayout_12 = QtWidgets.QFormLayout(self.groupBoxEnvVars)
self.formLayout_12.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_12.setObjectName("formLayout_12")
self.PATH = PathLineEdit(self.groupBoxEnvVars)
self.PATH.setObjectName("PATH")
self.formLayout_12.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.PATH)
self.COB_CONFIG_DIR = PathLineEdit(self.groupBoxEnvVars)
self.COB_CONFIG_DIR.setObjectName("COB_CONFIG_DIR")
self.formLayout_12.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.COB_CONFIG_DIR)
self.COB_COPY_DIR = PathLineEdit(self.groupBoxEnvVars)
self.COB_COPY_DIR.setObjectName("COB_COPY_DIR")
self.formLayout_12.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.COB_COPY_DIR)
self.COB_INCLUDE_PATH = PathLineEdit(self.groupBoxEnvVars)
self.COB_INCLUDE_PATH.setObjectName("COB_INCLUDE_PATH")
self.formLayout_12.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.COB_INCLUDE_PATH)
self.COB_LIB_PATH = PathLineEdit(self.groupBoxEnvVars)
self.COB_LIB_PATH.setObjectName("COB_LIB_PATH")
self.formLayout_12.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.COB_LIB_PATH)
self.cbPATH = QtWidgets.QCheckBox(self.groupBoxEnvVars)
self.cbPATH.setObjectName("cbPATH")
self.formLayout_12.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.cbPATH)
self.cbCOB_CONFIG_DIR = QtWidgets.QCheckBox(self.groupBoxEnvVars)
self.cbCOB_CONFIG_DIR.setObjectName("cbCOB_CONFIG_DIR")
self.formLayout_12.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.cbCOB_CONFIG_DIR)
self.cbCOB_COPY_DIR = QtWidgets.QCheckBox(self.groupBoxEnvVars)
self.cbCOB_COPY_DIR.setObjectName("cbCOB_COPY_DIR")
self.formLayout_12.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.cbCOB_COPY_DIR)
self.cbCOB_INCLUDE_PATH = QtWidgets.QCheckBox(self.groupBoxEnvVars)
self.cbCOB_INCLUDE_PATH.setObjectName("cbCOB_INCLUDE_PATH")
self.formLayout_12.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.cbCOB_INCLUDE_PATH)
self.cbCOB_LIB_PATH = QtWidgets.QCheckBox(self.groupBoxEnvVars)
self.cbCOB_LIB_PATH.setObjectName("cbCOB_LIB_PATH")
self.formLayout_12.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.cbCOB_LIB_PATH)
self.formLayout_6.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.groupBoxEnvVars)
self.cbAutoDetectSublmodules = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cbAutoDetectSublmodules.setObjectName("cbAutoDetectSublmodules")
self.formLayout_6.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.cbAutoDetectSublmodules)
self.labelVCVARS = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.labelVCVARS.setObjectName("labelVCVARS")
self.formLayout_6.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.labelVCVARS)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.lineEditVCVARS = PathLineEdit(self.scrollAreaWidgetContents_7)
self.lineEditVCVARS.setObjectName("lineEditVCVARS")
self.horizontalLayout_8.addWidget(self.lineEditVCVARS)
self.toolButtonVCVARS = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.toolButtonVCVARS.setObjectName("toolButtonVCVARS")
self.horizontalLayout_8.addWidget(self.toolButtonVCVARS)
self.combo_arch = QtWidgets.QComboBox(self.scrollAreaWidgetContents_7)
self.combo_arch.setEditable(False)
self.combo_arch.setObjectName("combo_arch")
self.combo_arch.addItem("")
self.combo_arch.addItem("")
self.horizontalLayout_8.addWidget(self.combo_arch)
self.formLayout_6.setLayout(3, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_8)
self.label_36 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_36.setObjectName("label_36")
self.formLayout_6.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_36)
self.lineEditOutputDirectory = PathLineEdit(self.scrollAreaWidgetContents_7)
self.lineEditOutputDirectory.setObjectName("lineEditOutputDirectory")
self.formLayout_6.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.lineEditOutputDirectory)
self.cb_copy_runtime_dlls = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_copy_runtime_dlls.setObjectName("cb_copy_runtime_dlls")
self.formLayout_6.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.cb_copy_runtime_dlls)
self.label_32 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_32.setObjectName("label_32")
self.formLayout_6.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_32)
self.lineEditCobcExts = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_7)
self.lineEditCobcExts.setObjectName("lineEditCobcExts")
self.formLayout_6.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.lineEditCobcExts)
self.label_8 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_8.setObjectName("label_8")
self.formLayout_6.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label_8)
self.comboBoxStandard = QtWidgets.QComboBox(self.scrollAreaWidgetContents_7)
self.comboBoxStandard.setObjectName("comboBoxStandard")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.comboBoxStandard.addItem("")
self.formLayout_6.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.comboBoxStandard)
self.label_9 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_9.setObjectName("label_9")
self.formLayout_6.setWidget(9, QtWidgets.QFormLayout.LabelRole, self.label_9)
self.checkBoxFreeFormat = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.checkBoxFreeFormat.setText("")
self.checkBoxFreeFormat.setObjectName("checkBoxFreeFormat")
self.formLayout_6.setWidget(9, QtWidgets.QFormLayout.FieldRole, self.checkBoxFreeFormat)
self.label_15 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_15.setObjectName("label_15")
self.formLayout_6.setWidget(10, QtWidgets.QFormLayout.LabelRole, self.label_15)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setContentsMargins(0, 0, -1, -1)
self.gridLayout_5.setObjectName("gridLayout_5")
self.cb_ftrace = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_ftrace.setObjectName("cb_ftrace")
self.gridLayout_5.addWidget(self.cb_ftrace, 2, 1, 1, 1)
self.cb_static = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_static.setObjectName("cb_static")
self.gridLayout_5.addWidget(self.cb_static, 0, 1, 1, 1)
self.cb_g = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_g.setObjectName("cb_g")
self.gridLayout_5.addWidget(self.cb_g, 0, 3, 1, 1)
self.cb_debugging_line = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_debugging_line.setObjectName("cb_debugging_line")
self.gridLayout_5.addWidget(self.cb_debugging_line, 2, 3, 1, 1)
self.cb_ftraceall = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_ftraceall.setObjectName("cb_ftraceall")
self.gridLayout_5.addWidget(self.cb_ftraceall, 2, 2, 1, 1)
self.cb_debug = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_debug.setObjectName("cb_debug")
self.gridLayout_5.addWidget(self.cb_debug, 0, 2, 1, 1)
self.cb_w = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_w.setObjectName("cb_w")
self.gridLayout_5.addWidget(self.cb_w, 0, 0, 1, 1)
self.cb_wall = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.cb_wall.setObjectName("cb_wall")
self.gridLayout_5.addWidget(self.cb_wall, 2, 0, 1, 1)
self.formLayout_6.setLayout(10, QtWidgets.QFormLayout.FieldRole, self.gridLayout_5)
self.label_35 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_35.setObjectName("label_35")
self.formLayout_6.setWidget(12, QtWidgets.QFormLayout.LabelRole, self.label_35)
self.label_37 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_37.setObjectName("label_37")
self.formLayout_6.setWidget(13, QtWidgets.QFormLayout.LabelRole, self.label_37)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.listWidgetCopyPaths = QtWidgets.QListWidget(self.scrollAreaWidgetContents_7)
self.listWidgetCopyPaths.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.listWidgetCopyPaths.setObjectName("listWidgetCopyPaths")
self.horizontalLayout_12.addWidget(self.listWidgetCopyPaths)
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setContentsMargins(0, -1, -1, -1)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.btAddAbsoluteCopyPath = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
icon = QtGui.QIcon.fromTheme("folder-open")
self.btAddAbsoluteCopyPath.setIcon(icon)
self.btAddAbsoluteCopyPath.setObjectName("btAddAbsoluteCopyPath")
self.verticalLayout_12.addWidget(self.btAddAbsoluteCopyPath)
self.btAddRelativeCopyPath = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
icon = QtGui.QIcon.fromTheme("list-add")
self.btAddRelativeCopyPath.setIcon(icon)
self.btAddRelativeCopyPath.setObjectName("btAddRelativeCopyPath")
self.verticalLayout_12.addWidget(self.btAddRelativeCopyPath)
self.btRemoveCopyPath = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/ide-icons/rc/list-remove.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btRemoveCopyPath.setIcon(icon2)
self.btRemoveCopyPath.setObjectName("btRemoveCopyPath")
self.verticalLayout_12.addWidget(self.btRemoveCopyPath)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_12.addItem(spacerItem4)
self.horizontalLayout_12.addLayout(self.verticalLayout_12)
self.formLayout_6.setLayout(13, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_12)
self.label_17 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_17.setObjectName("label_17")
self.formLayout_6.setWidget(14, QtWidgets.QFormLayout.LabelRole, self.label_17)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.listWidgetLibPaths = QtWidgets.QListWidget(self.scrollAreaWidgetContents_7)
self.listWidgetLibPaths.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.listWidgetLibPaths.setObjectName("listWidgetLibPaths")
self.horizontalLayout_10.addWidget(self.listWidgetLibPaths)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setContentsMargins(0, -1, -1, -1)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.toolButtonAddLibPath = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/ide-icons/rc/document-open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonAddLibPath.setIcon(icon3)
self.toolButtonAddLibPath.setObjectName("toolButtonAddLibPath")
self.verticalLayout_4.addWidget(self.toolButtonAddLibPath)
self.toolButtonAddRelativeLibPath = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/ide-icons/rc/list-add.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonAddRelativeLibPath.setIcon(icon4)
self.toolButtonAddRelativeLibPath.setObjectName("toolButtonAddRelativeLibPath")
self.verticalLayout_4.addWidget(self.toolButtonAddRelativeLibPath)
self.toolButtonRemoveLibPath = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.toolButtonRemoveLibPath.setIcon(icon2)
self.toolButtonRemoveLibPath.setObjectName("toolButtonRemoveLibPath")
self.verticalLayout_4.addWidget(self.toolButtonRemoveLibPath)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem5)
self.horizontalLayout_10.addLayout(self.verticalLayout_4)
self.formLayout_6.setLayout(14, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_10)
self.label_18 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_18.setObjectName("label_18")
self.formLayout_6.setWidget(15, QtWidgets.QFormLayout.LabelRole, self.label_18)
self.lineEditLibs = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_7)
self.lineEditLibs.setObjectName("lineEditLibs")
self.formLayout_6.setWidget(15, QtWidgets.QFormLayout.FieldRole, self.lineEditLibs)
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setContentsMargins(0, 0, -1, -1)
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
self.le_compiler_flags = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_7)
self.le_compiler_flags.setObjectName("le_compiler_flags")
self.horizontalLayout_20.addWidget(self.le_compiler_flags)
self.btCompilerFlagsHelp = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.btCompilerFlagsHelp.setObjectName("btCompilerFlagsHelp")
self.horizontalLayout_20.addWidget(self.btCompilerFlagsHelp)
self.formLayout_6.setLayout(12, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_20)
self.scrollArea_5.setWidget(self.scrollAreaWidgetContents_7)
self.verticalLayout_11.addWidget(self.scrollArea_5)
icon = QtGui.QIcon.fromTheme("exec")
self.tabWidget.addTab(self.tabCompiler, icon, "")
self.tabRun = QtWidgets.QWidget()
self.tabRun.setObjectName("tabRun")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.tabRun)
self.verticalLayout_10.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.formLayout_13 = QtWidgets.QFormLayout()
self.formLayout_13.setContentsMargins(6, 6, 6, 6)
self.formLayout_13.setObjectName("formLayout_13")
self.lbl_external_terminal_command = QtWidgets.QLabel(self.tabRun)
self.lbl_external_terminal_command.setObjectName("lbl_external_terminal_command")
self.formLayout_13.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.lbl_external_terminal_command)
self.lineEditRunTerm = QtWidgets.QLineEdit(self.tabRun)
self.lineEditRunTerm.setObjectName("lineEditRunTerm")
self.formLayout_13.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lineEditRunTerm)
self.label_42 = QtWidgets.QLabel(self.tabRun)
self.label_42.setObjectName("label_42")
self.formLayout_13.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_42)
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.edit_working_dir = PathLineEdit(self.tabRun)
self.edit_working_dir.setObjectName("edit_working_dir")
self.horizontalLayout_19.addWidget(self.edit_working_dir)
self.bt_working_dir = QtWidgets.QToolButton(self.tabRun)
self.bt_working_dir.setObjectName("bt_working_dir")
self.horizontalLayout_19.addWidget(self.bt_working_dir)
self.formLayout_13.setLayout(3, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_19)
self.checkBoxRunExtTerm = QtWidgets.QCheckBox(self.tabRun)
self.checkBoxRunExtTerm.setObjectName("checkBoxRunExtTerm")
self.formLayout_13.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.checkBoxRunExtTerm)
self.label_43 = QtWidgets.QLabel(self.tabRun)
self.label_43.setObjectName("label_43")
self.formLayout_13.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_43)
self.verticalLayout_10.addLayout(self.formLayout_13)
self.groupBox_12 = QtWidgets.QGroupBox(self.tabRun)
self.groupBox_12.setObjectName("groupBox_12")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.groupBox_12)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.tw_run_env = QtWidgets.QTableWidget(self.groupBox_12)
self.tw_run_env.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.tw_run_env.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tw_run_env.setObjectName("tw_run_env")
self.tw_run_env.setColumnCount(2)
self.tw_run_env.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tw_run_env.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tw_run_env.setHorizontalHeaderItem(1, item)
self.tw_run_env.horizontalHeader().setCascadingSectionResizes(False)
self.tw_run_env.horizontalHeader().setDefaultSectionSize(300)
self.tw_run_env.horizontalHeader().setStretchLastSection(True)
self.tw_run_env.verticalHeader().setVisible(False)
self.horizontalLayout_13.addWidget(self.tw_run_env)
self.verticalLayout_14 = QtWidgets.QVBoxLayout()
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.bt_add_run_env = QtWidgets.QPushButton(self.groupBox_12)
icon = QtGui.QIcon.fromTheme("list-add")
self.bt_add_run_env.setIcon(icon)
self.bt_add_run_env.setObjectName("bt_add_run_env")
self.verticalLayout_14.addWidget(self.bt_add_run_env)
self.bt_rm_run_env = QtWidgets.QPushButton(self.groupBox_12)
icon = QtGui.QIcon.fromTheme("list-remove")
self.bt_rm_run_env.setIcon(icon)
self.bt_rm_run_env.setObjectName("bt_rm_run_env")
self.verticalLayout_14.addWidget(self.bt_rm_run_env)
self.bt_clear_run_env = QtWidgets.QPushButton(self.groupBox_12)
icon = QtGui.QIcon.fromTheme("edit-clear")
self.bt_clear_run_env.setIcon(icon)
self.bt_clear_run_env.setObjectName("bt_clear_run_env")
self.verticalLayout_14.addWidget(self.bt_clear_run_env)
spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_14.addItem(spacerItem6)
self.horizontalLayout_13.addLayout(self.verticalLayout_14)
self.verticalLayout_10.addWidget(self.groupBox_12)
self.verticalLayout_10.setStretch(1, 1)
icon = QtGui.QIcon.fromTheme("media-playback-start")
self.tabWidget.addTab(self.tabRun, icon, "")
self.tabSqlCobol = QtWidgets.QWidget()
self.tabSqlCobol.setObjectName("tabSqlCobol")
self.gridLayout = QtWidgets.QGridLayout(self.tabSqlCobol)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea_3 = QtWidgets.QScrollArea(self.tabSqlCobol)
self.scrollArea_3.setWidgetResizable(True)
self.scrollArea_3.setObjectName("scrollArea_3")
self.scrollAreaWidgetContents_5 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_5.setGeometry(QtCore.QRect(0, 0, 1244, 897))
self.scrollAreaWidgetContents_5.setObjectName("scrollAreaWidgetContents_5")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_5)
self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.stackedWidgetSQL = QtWidgets.QStackedWidget(self.scrollAreaWidgetContents_5)
self.stackedWidgetSQL.setObjectName("stackedWidgetSQL")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.gridLayout_3 = QtWidgets.QGridLayout(self.page)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.groupBox_8 = QtWidgets.QGroupBox(self.page)
self.groupBox_8.setObjectName("groupBox_8")
self.formLayout_8 = QtWidgets.QFormLayout(self.groupBox_8)
self.formLayout_8.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)
self.formLayout_8.setObjectName("formLayout_8")
self.label_19 = QtWidgets.QLabel(self.groupBox_8)
self.label_19.setObjectName("label_19")
self.formLayout_8.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_19)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setContentsMargins(0, -1, -1, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lineEditDbpre = QtWidgets.QLineEdit(self.groupBox_8)
self.lineEditDbpre.setObjectName("lineEditDbpre")
self.horizontalLayout_2.addWidget(self.lineEditDbpre)
self.toolButtonDbpre = QtWidgets.QToolButton(self.groupBox_8)
self.toolButtonDbpre.setObjectName("toolButtonDbpre")
self.horizontalLayout_2.addWidget(self.toolButtonDbpre)
self.verticalLayout_5.addLayout(self.horizontalLayout_2)
self.labelDbpreVersion = QtWidgets.QLabel(self.groupBox_8)
self.labelDbpreVersion.setStyleSheet("font: oblique 9pt \"Cantarell\";")
self.labelDbpreVersion.setObjectName("labelDbpreVersion")
self.verticalLayout_5.addWidget(self.labelDbpreVersion)
self.formLayout_8.setLayout(1, QtWidgets.QFormLayout.FieldRole, self.verticalLayout_5)
self.label_20 = QtWidgets.QLabel(self.groupBox_8)
self.label_20.setObjectName("label_20")
self.formLayout_8.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_20)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.lineEditCobmysqlapi = QtWidgets.QLineEdit(self.groupBox_8)
self.lineEditCobmysqlapi.setObjectName("lineEditCobmysqlapi")
self.horizontalLayout_6.addWidget(self.lineEditCobmysqlapi)
self.toolButtonCobMySqlApiPath = QtWidgets.QToolButton(self.groupBox_8)
self.toolButtonCobMySqlApiPath.setObjectName("toolButtonCobMySqlApiPath")
self.horizontalLayout_6.addWidget(self.toolButtonCobMySqlApiPath)
self.formLayout_8.setLayout(2, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_6)
self.label_21 = QtWidgets.QLabel(self.groupBox_8)
self.label_21.setObjectName("label_21")
self.formLayout_8.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_21)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.lineEditDbpreFramework = QtWidgets.QLineEdit(self.groupBox_8)
self.lineEditDbpreFramework.setObjectName("lineEditDbpreFramework")
self.horizontalLayout_7.addWidget(self.lineEditDbpreFramework)
self.toolButtonDbpreFramework = QtWidgets.QToolButton(self.groupBox_8)
self.toolButtonDbpreFramework.setObjectName("toolButtonDbpreFramework")
self.horizontalLayout_7.addWidget(self.toolButtonDbpreFramework)
self.formLayout_8.setLayout(3, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_7)
self.label_33 = QtWidgets.QLabel(self.groupBox_8)
self.label_33.setObjectName("label_33")
self.formLayout_8.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_33)
self.lineEditDbpreExts = QtWidgets.QLineEdit(self.groupBox_8)
self.lineEditDbpreExts.setObjectName("lineEditDbpreExts")
self.formLayout_8.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEditDbpreExts)
self.gridLayout_3.addWidget(self.groupBox_8, 0, 0, 1, 1)
self.groupBox_9 = QtWidgets.QGroupBox(self.page)
self.groupBox_9.setObjectName("groupBox_9")
self.formLayout_9 = QtWidgets.QFormLayout(self.groupBox_9)
self.formLayout_9.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)
self.formLayout_9.setObjectName("formLayout_9")
self.label_22 = QtWidgets.QLabel(self.groupBox_9)
self.label_22.setObjectName("label_22")
self.formLayout_9.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_22)
self.lineEditDBHOST = QtWidgets.QLineEdit(self.groupBox_9)
self.lineEditDBHOST.setText("")
self.lineEditDBHOST.setObjectName("lineEditDBHOST")
self.formLayout_9.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEditDBHOST)
self.label_23 = QtWidgets.QLabel(self.groupBox_9)
self.label_23.setObjectName("label_23")
self.formLayout_9.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_23)
self.lineEditDBUSER = QtWidgets.QLineEdit(self.groupBox_9)
self.lineEditDBUSER.setObjectName("lineEditDBUSER")
self.formLayout_9.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEditDBUSER)
self.label_24 = QtWidgets.QLabel(self.groupBox_9)
self.label_24.setObjectName("label_24")
self.formLayout_9.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_24)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setContentsMargins(3, 3, 3, 3)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.lineEditDBPASSWD = QtWidgets.QLineEdit(self.groupBox_9)
self.lineEditDBPASSWD.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEditDBPASSWD.setObjectName("lineEditDBPASSWD")
self.verticalLayout_6.addWidget(self.lineEditDBPASSWD)
self.checkBoxShowDbPass = QtWidgets.QCheckBox(self.groupBox_9)
self.checkBoxShowDbPass.setObjectName("checkBoxShowDbPass")
self.verticalLayout_6.addWidget(self.checkBoxShowDbPass)
self.formLayout_9.setLayout(2, QtWidgets.QFormLayout.FieldRole, self.verticalLayout_6)
self.label_25 = QtWidgets.QLabel(self.groupBox_9)
self.label_25.setObjectName("label_25")
self.formLayout_9.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_25)
self.lineEditDBNAME = QtWidgets.QLineEdit(self.groupBox_9)
self.lineEditDBNAME.setText("")
self.lineEditDBNAME.setObjectName("lineEditDBNAME")
self.formLayout_9.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lineEditDBNAME)
self.label_26 = QtWidgets.QLabel(self.groupBox_9)
self.label_26.setObjectName("label_26")
self.formLayout_9.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_26)
self.lineEditDBPORT = QtWidgets.QLineEdit(self.groupBox_9)
self.lineEditDBPORT.setText("")
self.lineEditDBPORT.setObjectName("lineEditDBPORT")
self.formLayout_9.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.lineEditDBPORT)
self.label_27 = QtWidgets.QLabel(self.groupBox_9)
self.label_27.setObjectName("label_27")
self.formLayout_9.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_27)
self.lineEditDBSOCKET = QtWidgets.QLineEdit(self.groupBox_9)
self.lineEditDBSOCKET.setText("")
self.lineEditDBSOCKET.setObjectName("lineEditDBSOCKET")
self.formLayout_9.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.lineEditDBSOCKET)
spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.formLayout_9.setItem(6, QtWidgets.QFormLayout.LabelRole, spacerItem7)
self.gridLayout_3.addWidget(self.groupBox_9, 1, 0, 1, 1)
self.stackedWidgetSQL.addWidget(self.page)
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.formLayout_10 = QtWidgets.QFormLayout(self.page_2)
self.formLayout_10.setContentsMargins(0, 0, 0, 0)
self.formLayout_10.setObjectName("formLayout_10")
self.label_29 = QtWidgets.QLabel(self.page_2)
self.label_29.setObjectName("label_29")
self.formLayout_10.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_29)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.lineEditESQLOC = QtWidgets.QLineEdit(self.page_2)
self.lineEditESQLOC.setObjectName("lineEditESQLOC")
self.horizontalLayout_11.addWidget(self.lineEditESQLOC)
self.toolButtonESQLOC = QtWidgets.QToolButton(self.page_2)
self.toolButtonESQLOC.setObjectName("toolButtonESQLOC")
self.horizontalLayout_11.addWidget(self.toolButtonESQLOC)
self.formLayout_10.setLayout(1, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_11)
self.label_34 = QtWidgets.QLabel(self.page_2)
self.label_34.setObjectName("label_34")
self.formLayout_10.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_34)
self.lineEditesqlOcExts = QtWidgets.QLineEdit(self.page_2)
self.lineEditesqlOcExts.setObjectName("lineEditesqlOcExts")
self.formLayout_10.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEditesqlOcExts)
self.stackedWidgetSQL.addWidget(self.page_2)
self.verticalLayout_9.addWidget(self.stackedWidgetSQL)
self.scrollArea_3.setWidget(self.scrollAreaWidgetContents_5)
self.gridLayout.addWidget(self.scrollArea_3, 1, 0, 1, 1)
self.label_28 = QtWidgets.QLabel(self.tabSqlCobol)
self.label_28.setOpenExternalLinks(True)
self.label_28.setObjectName("label_28")
self.gridLayout.addWidget(self.label_28, 0, 0, 1, 1)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/ide-icons/rc/database.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tabSqlCobol, icon5, "")
self.widget_2.addWidget(self.tabWidget, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.widget, 0, 0, 1, 1)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(2)
self.stackedWidgetSQL.setCurrentIndex(0)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Preferences"))
self.widget.setAccessibleName(_translate("Dialog", "widget", "widget"))
self.groupBox_3.setTitle(_translate("Dialog", "View"))
self.label_10.setText(_translate("Dialog", "Display line numbers:"))
self.checkBoxViewLineNumber.setToolTip(_translate("Dialog", "Show/Hide line numbers"))
self.checkBoxViewLineNumber.setStatusTip(_translate("Dialog", "Show/Hide line numbers"))
self.label_11.setText(_translate("Dialog", "Highlight current line:"))
self.checkBoxHighlightCurrentLine.setToolTip(_translate("Dialog", "Highlight caret line"))
self.checkBoxHighlightCurrentLine.setStatusTip(_translate("Dialog", "Highlight caret line"))
self.label_12.setText(_translate("Dialog", "Highlight whitespaces:"))
self.checkBoxHighlightWhitespaces.setToolTip(_translate("Dialog", "Show visual whitespaces"))
self.checkBoxHighlightWhitespaces.setStatusTip(_translate("Dialog", "Show visual whitespaces"))
self.label_13.setText(_translate("Dialog", "Show errors:"))
self.checkBoxShowErrors.setToolTip(_translate("Dialog", "Compile your code on the fly and show errors while you\'re typing"))
self.label_38.setText(_translate("Dialog", "Show cursor position in bytes:"))
self.cb_cursor_pos_in_bytes.setToolTip(_translate("Dialog", "<html><head/><body><p>Check this if you want to see the cursor position expressed in bytes instead of characters (encoding is then taken into account).</p></body></html>"))
self.groupBox_11.setTitle(_translate("Dialog", "Margins"))
self.label_5.setText(_translate("Dialog", "Margin 1:"))
self.label_39.setText(_translate("Dialog", "Margin 2:"))
self.label_40.setText(_translate("Dialog", "Margin 3:"))
self.label_41.setText(_translate("Dialog", "Margin 4:"))
self.groupBox.setTitle(_translate("Dialog", "Comments"))
self.label_7.setText(_translate("Dialog", "Symbol"))
self.lineEditCommentIndicator.setText(_translate("Dialog", "*>"))
self.groupBox_2.setTitle(_translate("Dialog", "Indentation"))
self.label.setText(_translate("Dialog", "Width:"))
self.spinBoxEditorTabLen.setToolTip(_translate("Dialog", "Tab length (number of spaces)"))
self.spinBoxEditorTabLen.setStatusTip(_translate("Dialog", "Tab length (number of spaces)"))
self.checkBoxEditorAutoIndent.setToolTip(_translate("Dialog", "Enable/Disable automatic indentation"))
self.checkBoxEditorAutoIndent.setStatusTip(_translate("Dialog", "Enable/Disable automatic indentation"))
self.checkBoxEditorAutoIndent.setText(_translate("Dialog", "Automatic indentation"))
self.checkBoxSmartBackspace.setToolTip(_translate("Dialog", "Backspace will act as shift+tab, i.e. it will eat as much spaces \n"
"as possible to get back to the previous indentation level."))
self.checkBoxSmartBackspace.setText(_translate("Dialog", "Intelligent backspace"))
self.groupBox_4.setTitle(_translate("Dialog", "Code completion"))
self.label_2.setText(_translate("Dialog", "Trigger length:"))
self.spinBoxEditorCCTriggerLen.setToolTip(_translate("Dialog", "Number of characters needed to trigger auto completion"))
self.spinBoxEditorCCTriggerLen.setStatusTip(_translate("Dialog", "Number of characters needed to trigger auto completion"))
self.label_16.setText(_translate("Dialog", "Proposed keywords:"))
self.rbLowerCaseKwds.setToolTip(_translate("Dialog", "All proposed keywords are lower-case"))
self.rbLowerCaseKwds.setText(_translate("Dialog", "&lower-case"))
self.rbUpperCaseKwds.setToolTip(_translate("Dialog", "All proposed keywords are UPPER-CASE"))
self.rbUpperCaseKwds.setText(_translate("Dialog", "&UPPER-CASE"))
self.label_31.setText(_translate("Dialog", "Filter mode:"))
self.comboCcFilterMode.setItemText(0, _translate("Dialog", "Prefix (faster)"))
self.comboCcFilterMode.setItemText(1, _translate("Dialog", "Subsequence (smarter)"))
self.groupBox_10.setTitle(_translate("Dialog", "EOL"))
self.label_30.setText(_translate("Dialog", "Preferred EOL:"))
self.comboBoxPreferredEOL.setItemText(0, _translate("Dialog", "System"))
self.comboBoxPreferredEOL.setItemText(1, _translate("Dialog", "Linux"))
self.comboBoxPreferredEOL.setItemText(2, _translate("Dialog", "Mac"))
self.comboBoxPreferredEOL.setItemText(3, _translate("Dialog", "Windows"))
self.checkBoxAutodetectEOL.setText(_translate("Dialog", "Auto detect EOL"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabEditor), _translate("Dialog", "Editor"))
self.groupBox_5.setTitle(_translate("Dialog", "Application style"))
self.radioButtonColorWhite.setToolTip(_translate("Dialog", "Use native style"))
self.radioButtonColorWhite.setStatusTip(_translate("Dialog", "Use native style"))
self.radioButtonColorWhite.setText(_translate("Dialog", "&Native"))
self.radioButtonColorDark.setToolTip(_translate("Dialog", "Use a global dark style (using QDarkStyleSheet)"))
self.radioButtonColorDark.setStatusTip(_translate("Dialog", "Use a global dark style (using QDarkStyleSheet)"))
self.radioButtonColorDark.setText(_translate("Dialog", "Dark"))
self.lblIconTheme.setText(_translate("Dialog", "Icon theme:"))
self.groupBox_6.setTitle(_translate("Dialog", "Editor font"))
self.label_3.setText(_translate("Dialog", "Editor font:"))
self.fontComboBox.setToolTip(_translate("Dialog", "Change editor font"))
self.fontComboBox.setStatusTip(_translate("Dialog", "Change editor font"))
self.label_4.setText(_translate("Dialog", "Font size:"))
self.spinBoxFontSize.setToolTip(_translate("Dialog", "Change editor font size"))
self.spinBoxFontSize.setStatusTip(_translate("Dialog", "Change editor font size"))
self.groupBox_7.setTitle(_translate("Dialog", "Editor color scheme"))
self.listWidgetColorSchemes.setToolTip(_translate("Dialog", "Pygments color schemes."))
self.listWidgetColorSchemes.setStatusTip(_translate("Dialog", "Pygments color schemes."))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabStyle), _translate("Dialog", "Style"))
self.label_compiler_path.setText(_translate("Dialog", "Compiler path:"))
self.lineEditCompilerPath.setToolTip(_translate("Dialog", "<html><head/><body><p>GnuCOBOL compiler path (complete path or executable name if correct path is defined in PATH).</p></body></html>"))
self.lineEditCompilerPath.setStatusTip(_translate("Dialog", "Full path to the GnuCOBOL compiler."))
self.toolButtonCustomCompilerPath.setText(_translate("Dialog", "..."))
self.toolButtonCheckCompiler.setText(_translate("Dialog", "Check compiler"))
self.groupBoxEnvVars.setTitle(_translate("Dialog", "Environment variables"))
self.cbPATH.setText(_translate("Dialog", "PATH:"))
self.cbCOB_CONFIG_DIR.setText(_translate("Dialog", "COB_CONFIG_DIR:"))
self.cbCOB_COPY_DIR.setText(_translate("Dialog", "COB_COPY_DIR:"))
self.cbCOB_INCLUDE_PATH.setText(_translate("Dialog", "COB_INCLUDE_PATH:"))
self.cbCOB_LIB_PATH.setText(_translate("Dialog", "COB_LIB_PATH:"))
self.cbAutoDetectSublmodules.setToolTip(_translate("Dialog", "<html><head/><body><p>If checked, the IDE will recursively look for CALL PROGRAM statements to build a list of submodules that will be automatically compiled when you\'re compiling the main program.</p></body></html>"))
self.cbAutoDetectSublmodules.setText(_translate("Dialog", "Auto-detect and compile submodules"))
self.labelVCVARS.setText(_translate("Dialog", "VCVARSALL path:"))
self.lineEditVCVARS.setToolTip(_translate("Dialog", "<html><head/><body><p>Path to VCVARSALL.bat. It is needed if you\'re using a GnuCOBOL compiler built with Visual Studio.</p><p><br/>The VCVARSALL.bat file is located under the VC directory of you Visual Studio installation. Make sure to use the same version as the one used to build the compiler!</p></body></html>"))
self.toolButtonVCVARS.setText(_translate("Dialog", "..."))
self.combo_arch.setItemText(0, _translate("Dialog", "x86"))
self.combo_arch.setItemText(1, _translate("Dialog", "x64"))
self.label_36.setText(_translate("Dialog", "Output directory:"))
self.cb_copy_runtime_dlls.setText(_translate("Dialog", "Copy runtime dlls to output directory"))
self.label_32.setText(_translate("Dialog", "Associated extensions:"))
self.label_8.setText(_translate("Dialog", "Standard:"))
self.comboBoxStandard.setItemText(0, _translate("Dialog", "default"))
self.comboBoxStandard.setItemText(1, _translate("Dialog", "cobol2002"))
self.comboBoxStandard.setItemText(2, _translate("Dialog", "cobol85"))
self.comboBoxStandard.setItemText(3, _translate("Dialog", "ibm"))
self.comboBoxStandard.setItemText(4, _translate("Dialog", "mvs"))
self.comboBoxStandard.setItemText(5, _translate("Dialog", "bs2000"))
self.comboBoxStandard.setItemText(6, _translate("Dialog", "mf"))
self.comboBoxStandard.setItemText(7, _translate("Dialog", "cobol2014"))
self.comboBoxStandard.setItemText(8, _translate("Dialog", "acu"))
self.comboBoxStandard.setItemText(9, _translate("Dialog", "none"))
self.label_9.setText(_translate("Dialog", "Free format:"))
self.checkBoxFreeFormat.setToolTip(_translate("Dialog", "Code and compile with free format support"))
self.label_15.setText(_translate("Dialog", "Compiler flags"))
self.cb_ftrace.setToolTip(_translate("Dialog", "<html><head/><body><p>Generate trace code</p><p> - Executed SECTION/PARAGRAPH</p></body></html>"))
self.cb_ftrace.setText(_translate("Dialog", "-ftrace"))
self.cb_static.setToolTip(_translate("Dialog", "Link statically"))
self.cb_static.setText(_translate("Dialog", "-static"))
self.cb_g.setToolTip(_translate("Dialog", "Enable C compiler debug / stack check / trace"))
self.cb_g.setText(_translate("Dialog", "-g"))
self.cb_debugging_line.setToolTip(_translate("Dialog", "<html><head/><body><p>Enable debugging lines</p><p> - \'D\' in indicator column or floating >>D</p></body></html>"))
self.cb_debugging_line.setText(_translate("Dialog", "-fdebugging-line"))
self.cb_ftraceall.setToolTip(_translate("Dialog", "<html><head/><body><p>Generate trace code</p><p> - Executed SECTION/PARAGRAPH/STATEMENTS</p><p> - Turned on by -debug</p></body></html>"))
self.cb_ftraceall.setText(_translate("Dialog", "-ftraceall"))
self.cb_debug.setText(_translate("Dialog", "-debug"))
self.cb_w.setToolTip(_translate("Dialog", "Enable all warnings"))
self.cb_w.setText(_translate("Dialog", "-W"))
self.cb_wall.setToolTip(_translate("Dialog", "Enable most warnings"))
self.cb_wall.setText(_translate("Dialog", "-Wall"))
self.label_35.setText(_translate("Dialog", "Extra compiler flags"))
self.label_37.setText(_translate("Dialog", "Copybook paths:"))
self.listWidgetCopyPaths.setToolTip(_translate("Dialog", "The list of copybooks paths. You can use drag & drop to reorder them."))
self.btAddAbsoluteCopyPath.setText(_translate("Dialog", "..."))
self.btAddRelativeCopyPath.setText(_translate("Dialog", "..."))
self.btRemoveCopyPath.setText(_translate("Dialog", "..."))
self.label_17.setText(_translate("Dialog", "Library paths:"))
self.listWidgetLibPaths.setToolTip(_translate("Dialog", "The list of library paths. You can use drag & drop to reorder them."))
self.toolButtonAddLibPath.setToolTip(_translate("Dialog", "Add an absolute library path"))
self.toolButtonAddLibPath.setText(_translate("Dialog", "..."))
self.toolButtonAddRelativeLibPath.setToolTip(_translate("Dialog", "Add a relative library path"))
self.toolButtonAddRelativeLibPath.setText(_translate("Dialog", "..."))
self.toolButtonRemoveLibPath.setText(_translate("Dialog", "..."))
self.label_18.setText(_translate("Dialog", "Libraries"))
self.lineEditLibs.setToolTip(_translate("Dialog", "<html><head/><body><p>Add the libraries you would like your programs to link with here (-l option)</p></body></html>"))
self.le_compiler_flags.setToolTip(_translate("Dialog", "You can add other compiler flags here"))
self.btCompilerFlagsHelp.setText(_translate("Dialog", "?"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabCompiler), _translate("Dialog", "Compiler"))
self.lbl_external_terminal_command.setText(_translate("Dialog", "External terminal command:"))
self.lineEditRunTerm.setToolTip(_translate("Dialog", "External terminal command (filename is appended at the end of the command)"))
self.lineEditRunTerm.setStatusTip(_translate("Dialog", "External terminal command"))
self.label_42.setText(_translate("Dialog", "Working Directory:"))
self.bt_working_dir.setText(_translate("Dialog", "..."))
self.checkBoxRunExtTerm.setText(_translate("Dialog", "Run in external terminal"))
self.label_43.setText(_translate("Dialog", "Terminal mode:"))
self.groupBox_12.setTitle(_translate("Dialog", "Environment"))
item = self.tw_run_env.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "Key"))
item = self.tw_run_env.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "Value"))
self.bt_add_run_env.setText(_translate("Dialog", "Add "))
self.bt_rm_run_env.setText(_translate("Dialog", "Remove"))
self.bt_clear_run_env.setText(_translate("Dialog", "Clear"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabRun), _translate("Dialog", "Run"))
self.groupBox_8.setTitle(_translate("Dialog", "DBPRE Configuration"))
self.label_19.setText(_translate("Dialog", "dbpre"))
self.toolButtonDbpre.setText(_translate("Dialog", "..."))
self.labelDbpreVersion.setText(_translate("Dialog", "invalid dbpre executable"))
self.label_20.setText(_translate("Dialog", "cobmysqlapi:"))
self.toolButtonCobMySqlApiPath.setText(_translate("Dialog", "..."))
self.label_21.setText(_translate("Dialog", "Framework:"))
self.toolButtonDbpreFramework.setText(_translate("Dialog", "..."))
self.label_33.setText(_translate("Dialog", "Associated extensions:"))
self.groupBox_9.setTitle(_translate("Dialog", "DB Connection Parameters"))
self.label_22.setText(_translate("Dialog", "DBHOST:"))
self.label_23.setText(_translate("Dialog", "DBUSER:"))
self.label_24.setText(_translate("Dialog", "DBPASSWD:"))
self.checkBoxShowDbPass.setText(_translate("Dialog", "Show password"))
self.label_25.setText(_translate("Dialog", "DBNAME:"))
self.label_26.setText(_translate("Dialog", "DBPORT:"))
self.label_27.setText(_translate("Dialog", "DBSOCKET:"))
self.label_29.setText(_translate("Dialog", "esqlOC folder:"))
self.toolButtonESQLOC.setText(_translate("Dialog", "..."))
self.label_34.setText(_translate("Dialog", "Associated extensions:"))
self.label_28.setText(_translate("Dialog", "<html><head/><body><p align=\"center\">Read the <a href=\"http://opencobolide.readthedocs.org/en/latest/advanced.html#how-to-setup-dbpre-integration-with-opencobolide\"><span style=\" text-decoration: underline; color:#2980b9;\">guide</span></a> to get started</p></body></html>"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabSqlCobol), _translate("Dialog", "SQL COBOL"))
from open_cobol_ide.view.editors import CobolCodeEdit
from open_cobol_ide.view.widgets import ColorPicker, PathLineEdit
from . import ide_rc
|
OpenCobolIDE/OpenCobolIDE
|
open_cobol_ide/view/forms/dlg_preferences_ui.py
|
Python
|
gpl-3.0
| 72,494
|
# -*- coding: utf-8 -*-
"""
================================================
Following the Metal to Mott insulator Transition
================================================
Sequence of plots showing the transfer of spectral weight for a Hubbard
Model in the Bethe Lattice as the local dopping is increased.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
from __future__ import division, absolute_import, print_function
import matplotlib.pyplot as plt
import numpy as np
from slaveparticles.quantum import dos
axis = 'real'
u = 8.0
beta = 1e3
dop = [0.25, 0.5, 0.75, 0.9, 0.99]
out_file = axis+'_dop_b{}_U{}'.format(beta, u)
res = np.load(out_file+'.npy')
f, axes = plt.subplots(len(dop), sharex=True)
axes[0].set_title(r'$A(\omega)$ under doping U={} at '
'$\\beta=${}'.format(u, beta))
axes[-1].set_xlabel('$\\omega / t$')
f.subplots_adjust(hspace=0)
for ax, n in zip(axes, dop):
ind = np.abs(res[:, 0] - n).argmin()
sim = res[ind, 1]
w = sim.omega
s = sim.GF[r'$\Sigma$']
ra = w + sim.mu - s
rho = dos.bethe_lattice(ra, sim.t)
ax.plot(w, rho,
label='n={:.2f}'.format(sim.ocupations().sum()))
ax.set_xlim([-6, 6])
ax.set_ylim([0, 0.36])
ax.set_yticks([])
ax.set_ylabel('n={:.2f}'.format(sim.ocupations().sum()))
ax.legend(loc=0, handlelength=0)
|
Titan-C/learn-dmft
|
examples/twosite/plot_dop_A.py
|
Python
|
gpl-3.0
| 1,345
|
# -*- coding: utf-8 -*-
#
"""
test_url.py
websocket - WebSocket client library for Python
Copyright 2021 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import unittest
sys.path[0:0] = [""]
from websocket._url import get_proxy_info, parse_url, _is_address_in_network, _is_no_proxy_host
class UrlTest(unittest.TestCase):
def test_address_in_network(self):
self.assertTrue(_is_address_in_network('127.0.0.1', '127.0.0.0/8'))
self.assertTrue(_is_address_in_network('127.1.0.1', '127.0.0.0/8'))
self.assertFalse(_is_address_in_network('127.1.0.1', '127.0.0.0/24'))
def testParseUrl(self):
p = parse_url("ws://www.example.com/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/r/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("wss://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://www.example.com:8080/r?key=value")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r?key=value")
self.assertEqual(p[3], True)
self.assertRaises(ValueError, parse_url, "http://www.example.com/r")
p = parse_url("ws://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("wss://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 443)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
class IsNoProxyHostTest(unittest.TestCase):
def setUp(self):
self.no_proxy = os.environ.get("no_proxy", None)
if "no_proxy" in os.environ:
del os.environ["no_proxy"]
def tearDown(self):
if self.no_proxy:
os.environ["no_proxy"] = self.no_proxy
elif "no_proxy" in os.environ:
del os.environ["no_proxy"]
def testMatchAll(self):
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['*']))
self.assertTrue(_is_no_proxy_host("192.168.0.1", ['*']))
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['other.websocket.org', '*']))
os.environ['no_proxy'] = '*'
self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
self.assertTrue(_is_no_proxy_host("192.168.0.1", None))
os.environ['no_proxy'] = 'other.websocket.org, *'
self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
def testIpAddress(self):
self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.1']))
self.assertFalse(_is_no_proxy_host("127.0.0.2", ['127.0.0.1']))
self.assertTrue(_is_no_proxy_host("127.0.0.1", ['other.websocket.org', '127.0.0.1']))
self.assertFalse(_is_no_proxy_host("127.0.0.2", ['other.websocket.org', '127.0.0.1']))
os.environ['no_proxy'] = '127.0.0.1'
self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
os.environ['no_proxy'] = 'other.websocket.org, 127.0.0.1'
self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
def testIpAddressInRange(self):
self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.0/8']))
self.assertTrue(_is_no_proxy_host("127.0.0.2", ['127.0.0.0/8']))
self.assertFalse(_is_no_proxy_host("127.1.0.1", ['127.0.0.0/24']))
os.environ['no_proxy'] = '127.0.0.0/8'
self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
self.assertTrue(_is_no_proxy_host("127.0.0.2", None))
os.environ['no_proxy'] = '127.0.0.0/24'
self.assertFalse(_is_no_proxy_host("127.1.0.1", None))
def testHostnameMatch(self):
self.assertTrue(_is_no_proxy_host("my.websocket.org", ['my.websocket.org']))
self.assertTrue(_is_no_proxy_host("my.websocket.org", ['other.websocket.org', 'my.websocket.org']))
self.assertFalse(_is_no_proxy_host("my.websocket.org", ['other.websocket.org']))
os.environ['no_proxy'] = 'my.websocket.org'
self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
self.assertFalse(_is_no_proxy_host("other.websocket.org", None))
os.environ['no_proxy'] = 'other.websocket.org, my.websocket.org'
self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
def testHostnameMatchDomain(self):
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['.websocket.org']))
self.assertTrue(_is_no_proxy_host("my.other.websocket.org", ['.websocket.org']))
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['my.websocket.org', '.websocket.org']))
self.assertFalse(_is_no_proxy_host("any.websocket.com", ['.websocket.org']))
os.environ['no_proxy'] = '.websocket.org'
self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
self.assertTrue(_is_no_proxy_host("my.other.websocket.org", None))
self.assertFalse(_is_no_proxy_host("any.websocket.com", None))
os.environ['no_proxy'] = 'my.websocket.org, .websocket.org'
self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
class ProxyInfoTest(unittest.TestCase):
def setUp(self):
self.http_proxy = os.environ.get("http_proxy", None)
self.https_proxy = os.environ.get("https_proxy", None)
self.no_proxy = os.environ.get("no_proxy", None)
if "http_proxy" in os.environ:
del os.environ["http_proxy"]
if "https_proxy" in os.environ:
del os.environ["https_proxy"]
if "no_proxy" in os.environ:
del os.environ["no_proxy"]
def tearDown(self):
if self.http_proxy:
os.environ["http_proxy"] = self.http_proxy
elif "http_proxy" in os.environ:
del os.environ["http_proxy"]
if self.https_proxy:
os.environ["https_proxy"] = self.https_proxy
elif "https_proxy" in os.environ:
del os.environ["https_proxy"]
if self.no_proxy:
os.environ["no_proxy"] = self.no_proxy
elif "no_proxy" in os.environ:
del os.environ["no_proxy"]
def testProxyFromArgs(self):
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128),
("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128),
("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(
get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(
get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128,
no_proxy=["example.com"], proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128,
no_proxy=["echo.websocket.org"], proxy_auth=("a", "b")),
(None, 0, None))
def testProxyFromEnv(self):
os.environ["http_proxy"] = "http://localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, None))
os.environ["http_proxy"] = "http://a:b@localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://john%40example.com:P%40SSWORD@localhost:3128/"
os.environ["https_proxy"] = "http://john%40example.com:P%40SSWORD@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, ("john@example.com", "P@SSWORD")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
os.environ["no_proxy"] = "example1.com,example2.com"
self.assertEqual(get_proxy_info("example.1.com", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "example1.com,example2.com, echo.websocket.org"
self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "example1.com,example2.com, .websocket.org"
self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "127.0.0.0/8, 192.168.0.0/16"
self.assertEqual(get_proxy_info("127.0.0.1", False), (None, 0, None))
self.assertEqual(get_proxy_info("192.168.1.1", False), (None, 0, None))
if __name__ == "__main__":
unittest.main()
|
JonnyWong16/plexpy
|
lib/websocket/tests/test_url.py
|
Python
|
gpl-3.0
| 14,537
|
""" Script for building the Pieman package. """
from setuptools import setup
try:
import pypandoc
LONG_DESCRIPTION = pypandoc.convert('README.md', 'rst')
except (ImportError, OSError):
# OSError is raised when pandoc is not installed.
LONG_DESCRIPTION = ('Utilities written in Python which are used by '
'Pieman, script for creating custom OS images for '
'single-board computers.')
with open('requirements.txt') as outfile:
REQUIREMENTS_LIST = outfile.read().splitlines()
setup(name='pieman',
version='0.19.0',
description='Pieman package',
long_description=LONG_DESCRIPTION,
url='https://github.com/tolstoyevsky/pieman',
author='Evgeny Golyshev',
maintainer='Evgeny Golyshev',
maintainer_email='eugulixes@gmail.com',
license='https://gnu.org/licenses/gpl-3.0.txt',
scripts=[
'bin/apk_tools_version.py',
'bin/bsc.py',
'bin/bscd.py',
'bin/check_mutually_exclusive_params.py',
'bin/check_redis.py',
'bin/check_wpa_passphrase.py',
'bin/depend_on.py',
'bin/du.py',
'bin/image_attrs.py',
'bin/preprocessor.py',
'bin/render.py',
'bin/wget.py',
],
packages=['pieman'],
include_package_data=True,
data_files=[
('', ['requirements.txt']),
('pieman', ['pieman/build_status_codes']),
],
install_requires=REQUIREMENTS_LIST)
|
tolstoyevsky/pieman
|
pieman/setup.py
|
Python
|
gpl-3.0
| 1,517
|
import sys, re
for fn in sys.argv[1:]:
with open(fn, 'r') as f:
s = f.read()
xx = re.findall(r'([^\n]+)\s+\'\'\'(.*?)\'\'\'', s, re.M|re.S)
for (obj, doc) in xx:
s = re.findall('[^:`]\B(([`*])[a-zA-Z_][a-zA-Z0-9_]*\\2)\B', doc)
if s:
print '-'*50
print fn, obj
print '.'*50
print doc
print '.'*50
print [ss[0] for ss in s]
# for vim:
# :s/\([^`:]\)\([`*]\)\([a-zA-Z0-9_]\+\)\2/\1``\3``/
|
pyrocko/pyrocko
|
maintenance/docstring_cop.py
|
Python
|
gpl-3.0
| 543
|
from vsg import parser
class if_label(parser.label):
'''
unique_id = if_statement : if_label
'''
def __init__(self, sString):
parser.label.__init__(self, sString)
class label_colon(parser.label_colon):
'''
unique_id = if_statement : label_colon
'''
def __init__(self):
parser.label_colon.__init__(self)
class if_keyword(parser.keyword):
'''
unique_id = if_statement : if_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class then_keyword(parser.keyword):
'''
unique_id = if_statement : then_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class elsif_keyword(parser.keyword):
'''
unique_id = if_statement : elsif_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class else_keyword(parser.keyword):
'''
unique_id = if_statement : else_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class end_keyword(parser.keyword):
'''
unique_id = if_statement : end_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class end_if_keyword(parser.keyword):
'''
unique_id = if_statement : end_if_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class end_if_label(parser.label):
'''
unique_id = if_statement : end_if_label
'''
def __init__(self, sString):
parser.label.__init__(self, sString)
class semicolon(parser.semicolon):
'''
unique_id = if_statement : semicolon
'''
def __init__(self, sString=';'):
parser.semicolon.__init__(self)
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/token/if_statement.py
|
Python
|
gpl-3.0
| 1,778
|
"""This module contains constants used by the Lifemapper web services
"""
import os
from LmServer.base.utilities import get_mjd_time_from_iso_8601
from LmServer.common.lmconstants import SESSION_DIR
from LmServer.common.localconstants import SCRATCH_PATH, APP_PATH
from LmWebServer.common.localconstants import PACKAGING_DIR
# CherryPy constants
SESSION_PATH = os.path.join(SCRATCH_PATH, SESSION_DIR)
SESSION_KEY = '_cp_username'
REFERER_KEY = 'lm_referer'
# Results package constants
GRIDSET_DIR = 'gridset'
MATRIX_DIR = os.path.join(GRIDSET_DIR, 'matrix')
SDM_PRJ_DIR = os.path.join(GRIDSET_DIR, 'sdm')
DYN_PACKAGE_DIR = 'package'
STATIC_PACKAGE_PATH = os.path.join(APP_PATH, PACKAGING_DIR)
MAX_PROJECTIONS = 1000
# .............................................................................
class HTTPMethod:
"""Constant class for HTTP methods
"""
DELETE = 'DELETE'
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
# .............................................................................
def sci_name_prep(name):
"""Prepare scientific name
"""
strip_chars = [' ', '+', '%20', ',', '%2C']
for strip_chr in strip_chars:
name = name.replace(strip_chr, '')
return name[:20]
# .............................................................................
def boolify_parameter(param, default=True):
"""Convert an input query parameter to boolean."""
try:
# If zero or one
return bool(int(param))
except ValueError:
try:
# Try processing a string
str_val = param.lower().strip()
if str_val == 'false' or str_val == 'no':
return False
if str_val == 'true' or str_val == 'yes':
return True
except Exception:
pass
# Return default if we can't figure it out
return default
# This constant is used for processing query parameters. If no 'processIn'
# key, just take the parameter as it comes in
# Note: The dictionary keys are the .lower() version of the parameter names.
# The 'name' value of each key is what it gets translated to
# The point of this structure is to allow query parameters to be
# case-insensitive
QP_NAME_KEY = 'name'
QP_PROCESS_KEY = 'process_in'
QUERY_PARAMETERS = {
'afterstatus': {
QP_NAME_KEY: 'after_status',
QP_PROCESS_KEY: int
},
'aftertime': {
QP_NAME_KEY: 'after_time',
QP_PROCESS_KEY: get_mjd_time_from_iso_8601
},
'agent': {
QP_NAME_KEY: 'agent'
},
'algorithmcode': {
QP_NAME_KEY: 'algorithm_code',
},
'altpredcode': {
QP_NAME_KEY: 'alt_pred_code'
},
'archivename': {
QP_NAME_KEY: 'archive_name'
},
'atom': {
QP_NAME_KEY: 'atom',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'beforestatus': {
QP_NAME_KEY: 'before_status',
QP_PROCESS_KEY: int
},
'beforetime': {
QP_NAME_KEY: 'before_time',
QP_PROCESS_KEY: get_mjd_time_from_iso_8601
},
'bbox': {
# Comes in as a comma separated list, turn it into a tuple of floats
QP_NAME_KEY: 'bbox',
# QP_PROCESS_KEY: lambda x: [float(i) for i in x.split(',')]
},
'bgcolor': {
QP_NAME_KEY: 'bgcolor',
},
'canonicalname': {
QP_NAME_KEY: 'canonical_name'
},
'catalognumber': {
QP_NAME_KEY: 'catalog_number'
},
'cellsides': {
QP_NAME_KEY: 'cell_sides',
QP_PROCESS_KEY: int
},
'cellsize': {
QP_NAME_KEY: 'cell_size',
QP_PROCESS_KEY: float
},
'collection': {
QP_NAME_KEY: 'collection'
},
'color': {
QP_NAME_KEY: 'color',
},
'coverage': {
QP_NAME_KEY: 'coverage'
},
'crs': {
# TODO: Consider processing the EPSG here
QP_NAME_KEY: 'crs'
},
'datecode': {
QP_NAME_KEY: 'date_code'
},
'detail': {
QP_NAME_KEY: 'detail',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'displayname': {
QP_NAME_KEY: 'display_name'
},
'docalc': {
QP_NAME_KEY: 'do_calc',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'domcpa': {
QP_NAME_KEY: 'do_mcpa',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'envcode': {
QP_NAME_KEY: 'env_code'
},
'envtypeid': {
QP_NAME_KEY: 'env_type_id',
QP_PROCESS_KEY: int
},
'epsgcode': {
QP_NAME_KEY: 'epsg_code',
QP_PROCESS_KEY: int
},
'exceptions': {
QP_NAME_KEY: 'exceptions'
},
'filename': {
QP_NAME_KEY: 'file_name'
},
'fillpoints': {
QP_NAME_KEY: 'fill_points',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'format': {
# TODO: Forward to respFormat since format is reserved
QP_NAME_KEY: 'format_',
},
'gcmcode': {
QP_NAME_KEY: 'gcm_code',
},
'gridsetid': {
QP_NAME_KEY: 'gridset_id',
QP_PROCESS_KEY: int
},
'hasbranchlengths': {
QP_NAME_KEY: 'has_branch_lengths',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'height': {
QP_NAME_KEY: 'height',
QP_PROCESS_KEY: int
},
'ident1': {
QP_NAME_KEY: 'ident1'
},
'ident2': {
QP_NAME_KEY: 'ident2'
},
'includecsvs': {
QP_NAME_KEY: 'include_csvs',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'includesdms': {
QP_NAME_KEY: 'include_sdms',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'isbinary': {
QP_NAME_KEY: 'is_binary',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'isultrametric': {
QP_NAME_KEY: 'is_ultrametric',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'keyword': {
QP_NAME_KEY: 'keyword',
QP_PROCESS_KEY: lambda x: [float(x)]
},
'layer': {
QP_NAME_KEY: 'layer'
},
'layers': {
QP_NAME_KEY: 'layers',
# QP_PROCESS_KEY: lambda x: [i for i in x.split(',')]
},
'layertype': {
QP_NAME_KEY: 'layer_type',
QP_PROCESS_KEY: int
},
'limit': {
QP_NAME_KEY: 'limit',
QP_PROCESS_KEY: lambda x: max(1, int(x)) # Integer, minimum is one
},
'map': {
QP_NAME_KEY: 'map_name'
},
'mapname': {
QP_NAME_KEY: 'map_name'
},
'matrixtype': {
QP_NAME_KEY: 'matrix_type',
QP_PROCESS_KEY: int
},
'metadata': {
QP_NAME_KEY: 'metadata'
},
'metastring': {
QP_NAME_KEY: 'meta_string'
},
'modelscenariocode': {
QP_NAME_KEY: 'model_scenario_code'
},
'minimumnumberofpoints': {
QP_NAME_KEY: 'minimum_number_of_points',
QP_PROCESS_KEY: lambda x: max(1, int(x)) # Integer, minimum is one
},
'numpermutations': {
QP_NAME_KEY: 'num_permutations',
QP_PROCESS_KEY: int
},
'occurrencesetid': {
QP_NAME_KEY: 'occurrence_set_id',
QP_PROCESS_KEY: int
},
'operation': {
QP_NAME_KEY: 'operation'
},
'offset': {
QP_NAME_KEY: 'offset',
QP_PROCESS_KEY: lambda x: max(0, int(x)) # Integer, minimum is zero
},
'pathbiogeoid': {
QP_NAME_KEY: 'path_biogeo_id'
},
'pathgridsetid': {
QP_NAME_KEY: 'path_gridset_id'
},
'pathlayerid': {
QP_NAME_KEY: 'path_layer_id'
},
'pathmatrixid': {
QP_NAME_KEY: 'path_matrix_id'
},
'pathoccsetid': {
QP_NAME_KEY: 'path_occset_id'
},
'pathprojectionid': {
QP_NAME_KEY: 'path_projection_id'
},
'pathscenarioid': {
QP_NAME_KEY: 'path_scenario_id'
},
'pathscenariopackageid': {
QP_NAME_KEY: 'path_scenario_package_id'
},
'pathshapegridid': {
QP_NAME_KEY: 'path_shapegrid_id'
},
'pathtreeid': {
QP_NAME_KEY: 'path_tree_id'
},
'pointmax': {
QP_NAME_KEY: 'point_max',
QP_PROCESS_KEY: int
},
'pointmin': {
QP_NAME_KEY: 'point_min',
QP_PROCESS_KEY: int
},
'projectionscenariocode': {
QP_NAME_KEY: 'projection_scenario_code'
},
'provider': {
QP_NAME_KEY: 'provider'
},
'request': {
QP_NAME_KEY: 'request'
},
'resolution': {
QP_NAME_KEY: 'resolution'
},
'scenariocode': {
QP_NAME_KEY: 'scenario_code'
},
'scenarioid': {
QP_NAME_KEY: 'scenario_id',
QP_PROCESS_KEY: int
},
'scientificname': {
QP_NAME_KEY: 'scientific_name',
QP_PROCESS_KEY: sci_name_prep
},
'searchstring': {
QP_NAME_KEY: 'search_string'
},
'service': {
QP_NAME_KEY: 'service'
},
'shapegridid': {
QP_NAME_KEY: 'shapegrid_id'
},
'sld': {
QP_NAME_KEY: 'sld'
},
'sldbody': {
QP_NAME_KEY: 'sld_body'
},
'squid': {
QP_NAME_KEY: 'squid',
# TODO: Evaluate what needs to be done to process into list
QP_PROCESS_KEY: lambda x: x
},
'srs': {
# TODO: Forward to crs for WMS 1.3.0?
QP_NAME_KEY: 'srs'
},
'status': {
QP_NAME_KEY: 'status',
QP_PROCESS_KEY: int
},
'styles': {
QP_NAME_KEY: 'styles',
# QP_PROCESS_KEY: lambda x: [i for i in x.split(',')]
},
'taxonclass': {
QP_NAME_KEY: 'class_'
},
'taxonfamily': {
QP_NAME_KEY: 'family'
},
'taxongenus': {
QP_NAME_KEY: 'genus'
},
'taxonkingdom': {
QP_NAME_KEY: 'kingdom'
},
'taxonorder': {
QP_NAME_KEY: 'order_'
},
'taxonphylum': {
QP_NAME_KEY: 'phylum'
},
'taxonspecies': {
QP_NAME_KEY: 'species'
},
'time': {
QP_NAME_KEY: 'time'
},
'transparent': {
QP_NAME_KEY: 'transparent',
# QP_PROCESS_KEY: lambda x: bool(x.lower() == 'true')
},
'treename': {
QP_NAME_KEY: 'name' # Map to 'name' for processing
},
'treeschema': {
QP_NAME_KEY: 'tree_schema'
},
'file': {
QP_NAME_KEY: 'file'
},
'uploadtype': {
QP_NAME_KEY: 'upload_type'
},
'url': {
QP_NAME_KEY: 'url'
},
'user': {
QP_NAME_KEY: 'url_user',
QP_PROCESS_KEY: lambda x: x
},
'version': {
QP_NAME_KEY: 'version'
},
'who': {
QP_NAME_KEY: 'who'
},
'why': {
QP_NAME_KEY: 'why'
},
'width': {
QP_NAME_KEY: 'width',
QP_PROCESS_KEY: int
},
# Authentication parameters
'address1': {
QP_NAME_KEY: 'address1'
},
'address2': {
QP_NAME_KEY: 'address2'
},
'address3': {
QP_NAME_KEY: 'address3'
},
'phone': {
QP_NAME_KEY: 'phone'
},
'email': {
QP_NAME_KEY: 'email'
},
'firstname': {
QP_NAME_KEY: 'first_name'
},
'institution': {
QP_NAME_KEY: 'institution'
},
'lastname': {
QP_NAME_KEY: 'last_name'
},
'pword': {
QP_NAME_KEY: 'pword'
},
'pword1': {
QP_NAME_KEY: 'pword1'
},
'userid': {
QP_NAME_KEY: 'user_id'
},
}
# Kml
KML_NAMESPACE = "http://earth.google.com/kml/2.2"
KML_NS_PREFIX = None
# .............................................................................
class APIPostKeys:
"""This class contains constants for API JSON POST keys
"""
ALGORITHM = 'algorithm'
ALGORITHM_CODE = 'code'
ALGORITHM_PARAMETERS = 'parameters'
ARCHIVE_NAME = 'archive_name'
BUFFER = 'buffer'
CELL_SIDES = 'cell_sides'
DELIMITER = 'delimiter'
DO_PAM_STATS = 'compute_pam_stats'
DO_MCPA = 'compute_mcpa'
GLOBAL_PAM = 'global_pam'
HULL_REGION = 'hull_region_intersect_mask'
INTERSECT_PARAMETERS = 'intersect_parameters'
MAX_PRESENCE = 'max_presence'
MAX_X = 'maxx'
MAX_Y = 'maxy'
MCPA = 'mcpa'
MIN_PERCENT = 'min_percent'
MIN_POINTS = 'point_count_min'
MIN_PRESENCE = 'min_presence'
MIN_X = 'minx'
MIN_Y = 'miny'
MODEL_SCENARIO = 'model_scenario'
NAME = 'name'
OCCURRENCE = 'occurrence'
OCCURRENCE_IDS = 'occurrence_ids'
PACKAGE_FILENAME = 'scenario_package_filename'
PACKAGE_NAME = 'scenario_package_name'
PAM_STATS = 'pam_stats'
POINTS_FILENAME = 'points_filename'
PROJECTION_SCENARIO = 'projection_scenario'
REGION = 'region'
RESOLUTION = 'resolution'
SCENARIO_CODE = 'scenario_code'
SCENARIO_PACKAGE = 'scenario_package'
SDM = 'sdm'
SHAPEGRID = 'shapegrid'
TAXON_IDS = 'taxon_ids'
TAXON_NAMES = 'taxon_names'
TREE = 'tree'
TREE_FILENAME = 'tree_file_name'
VALUE_NAME = 'value_name'
|
lifemapper/core
|
LmWebServer/common/lmconstants.py
|
Python
|
gpl-3.0
| 13,281
|
from mrjob.job import MRJob
from mrjob.step import MRStep
def get_id_from_line(line):
if line.find('.","Message-ID: <') > 0:
start = line.find("Message-ID")+13
i=0
for char in line[start:]:
i=i+1
if (not (char.isdigit() or (char == '.'))):
stop = i+start-2
break
return line[start:stop]
class MRMultilineInput(MRJob):
def steps(self):
return [
MRStep(mapper_init=self.mapper_init_count,
mapper=self.mapper_count),
MRStep(mapper=self.mapper_child)
# STEP 1
def mapper_init_count(self):
self.message_id = ''
self.in_body = False
self.body = []
self.after_key = False
self.beginning = False
self.key = False
def mapper_count(self, _, line):
line = line.strip()
if (line.find('.","Message-ID: <') > 0) and self.in_body and not self.beginning:
yield self.message_id, self.body
self.message_id = ''
self.body = []
self.in_body = False
self.after_key = False
self.beginning = False
self.key = False
if self.in_body and not self.after_key:
self.beginning = False
self.body.append(line)
if line.find('.","Message-ID: <') > 0 and not self.key:
if not self.in_body:
self.in_body = True
self.beginning = True
self.after_key = True
self.key = True
start = line.find("Message-ID")+13
i=0
for char in line[start:]:
i=i+1
if (not (char.isdigit() or (char == '.'))):
stop = i+start-2
break
self.message_id = line[start:stop]
self.after_key = False
# STEP 2
def mapper_child(self, message_id, values):
clean_body = ''
clean_date = ''
clean_from = ''
clean_to = ''
clean_values = []
start = 0
for idx, line in enumerate(values):
if "Date:" in line:
clean_date = line[5:].strip()
if line.find("From:") == 0:
clean_from = line[5:].strip()
if line.find("To:") == 0:
clean_to = line[3:].strip()
if "X-FileName:" in line:
start = idx+1
break
for i in range(start,len(values)):
if "-Original Message-" in values[i]:
break
clean_body=clean_body + values[i] + " "
clean_values.append(clean_date)
clean_values.append(clean_from)
#clean_values.append(clean_to)
#clean_values.append(clean_body.strip())
clean_values.append("TEST BODY")
newval = values
for element in values:
if "subject:" in element.lower():
subject = element
break
if "re:" in subject.lower():
newval.append("child")
elif "fw:" not in subject.lower():
newval.append("parent")
for element in newval:
if "Subject:" in element:
subject = element
break
relation = values[-1]
i = 0
colon = 0
if "<" not in subject:
for char in subject:
i=i+1
if char == ":":
colon = i
sub = subject[colon+1:].strip()
sub_relation = []
sub_relation.append(sub)
sub_relation.append(relation)
yield sub_relation, (message_id,clean_values)
if __name__ == '__main__':
MRMultilineInput.run()
|
tokamstud/enron-analysis
|
src/complex/hive_prep.py
|
Python
|
gpl-3.0
| 2,895
|
# -*- coding: utf-8 -*-
from MainWindow import Controller
|
bkolada/koalocleaner
|
uis/__init__.py
|
Python
|
gpl-3.0
| 59
|
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from socketio import sdjango
from web.api import EventResource, SummaryFeedResource, SummaryFeedByCountryCodeResource
from tastypie.api import Api
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
sdjango.autodiscover()
v1_api = Api(api_name='v1')
#api_test = EventResource()
v1_api.register(SummaryFeedResource())
#v1_api.register(SummaryFeedByCountryCodeResource())
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'malwarez.views.home', name='home'),
# url(r'^malwarez/', include('malwarez.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'web.views.index'),
url(r'^test/$', 'web.views.test'),
url(r'^socket\.io', include(sdjango.urls)),
url(r'^api/', include(v1_api.urls)),
url(r'^summary-country/(?P<countryCode>\w+)/$', 'web.views.getSummaryByCountry'),
# TODO: be more specific country parameter should be only 3 char long
url(r'^top/(?P<type>\w+)/(?P<country>\w+)/$', 'web.views.getTopFive'),
url(r'^top/(?P<type>\w+)/$', 'web.views.getTopFive'),
#url(r'^detail/(?P<type>\w+)/(?P<country>\w+)/(?P<data>\w+)/$', 'web.views.getDetail'),
url(r'^detail/(?P<type>\w+)/(?P<data>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/$', 'web.views.getDetail'),
url(r'^detail/(?P<type>\w+)/(?P<data>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/(?P<country>\w+)/$', 'web.views.getDetail'),
url(r'filter/malware', 'web.views.getDiversityMalware'),
url(r'filter/ip', 'web.views.getDiversityIP')
)
urlpatterns += staticfiles_urlpatterns()
|
YakindanEgitim/malwarez
|
malwarez/urls.py
|
Python
|
gpl-3.0
| 1,882
|
import numpy
import logging
import sys
from apgl.graph import *
from apgl.generator import *
from sandbox.util.ProfileUtils import ProfileUtils
from exp.sandbox.predictors.leafrank.SVMLeafRank import SVMLeafRank
from exp.sandbox.predictors.TreeRank import TreeRank
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class TreeRankProfile(object):
def __init__(self):
self.folds = 3
self.paramDict = {}
self.paramDict["setC"] = 2**numpy.arange(-5, 5, dtype=numpy.float)
self.leafRanklearner = SVMLeafRank(self.paramDict, self.folds)
def profileLearnModel(self):
treeRank = TreeRank(self.leafRanklearner)
treeRank.setMaxDepth(10)
treeRank.setMinSplit(50)
numExamples = 5000
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = numpy.array(numpy.random.rand(numExamples) < 0.1, numpy.int)*2-1
def run():
for i in range(5):
print("Iteration " + str(i))
treeRank.learnModel(X, Y)
#print(treeRank.getTreeSize())
#print(treeRank.getTreeDepth())
ProfileUtils.profile('run()', globals(), locals())
profiler = TreeRankProfile()
profiler.profileLearnModel()
|
charanpald/sandbox
|
sandbox/predictors/profile/TreeRankProfile.py
|
Python
|
gpl-3.0
| 1,271
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui,QtCore
from Ui_about_author import Ui_About
_IME = "<p>Author: Bojan Ili""ć</p>"
_FAKULTET = "Faculty of Electrical Engineering, University of Belgrade - ETF"
_MAIL = "https.rs@gmail.com"
_URL = "<a href = ""https://www.facebook.com/puzicius>Facebook link</a>"
#-------------------------------------------------------------------------------
class AboutWindow2(QtGui.QDialog):
""" Class wrapper for about window ui """
def __init__(self):
super(AboutWindow2,self).__init__()
self.setupUI()
#print sys.stdout.encoding
def setupUI(self):
#create window from ui
self.ui=Ui_About()
self.ui.setupUi(self)
self.ui.lblVersion.setText("{}".format(_IME))
self.ui.lblVersion2.setText("{}".format(_FAKULTET))
self.ui.lblVersion3.setText("E-mail: {}".format(_MAIL))
self.ui.lblURL.setText(_URL)
#-------------------------------------------------------------------------------
def main():
app = QtGui.QApplication(sys.argv)
form = AboutWindow2()
form.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
puyilio/Application-for-PyBoard
|
About_author.py
|
Python
|
gpl-3.0
| 1,254
|
# -*- coding: utf-8 -*-
# EDIS - a simple cross-platform IDE for C
#
# This file is part of Edis
# Copyright 2014-2015 - Gabriel Acosta <acostadariogabriel at gmail>
# License: GPLv3 (see http://www.gnu.org/licenses/gpl.html)
from PyQt4.Qsci import QsciLexerCPP
from PyQt4.QtGui import QColor
from src import editor_scheme
from src.core import settings
class Lexer(QsciLexerCPP):
""" Lexer class """
def __init__(self, *args, **kwargs):
super(Lexer, self).__init__(*args, **kwargs)
# Configuración
self.setStylePreprocessor(True)
self.setFoldComments(True)
self.setFoldPreprocessor(True)
self.setFoldCompact(False)
self.load_highlighter()
def load_highlighter(self):
""" Método público: carga el resaltado de sintáxis """
scheme = editor_scheme.get_scheme(
settings.get_setting('editor/scheme'))
self.setDefaultPaper(QColor(scheme['BackgroundEditor']))
self.setPaper(self.defaultPaper(0))
self.setColor(QColor(scheme['Color']))
types = dir(self)
for _type in types:
if _type in scheme:
atr = getattr(self, _type)
self.setColor(QColor(scheme[_type]), atr)
def keywords(self, kset):
super(Lexer, self).keywords(kset)
if kset == 1:
# Palabras reservadas
return ('auto break case const continue default do else enum '
'extern for goto if register return short sizeof static '
'struct switch typedef union unsigned void volatile while '
'char float int long double')
elif kset == 2:
# Funciones definidas en stdio.h y stdlib.h
return ('fprintf fscanf printf scanf sprintf sscanf vfprintf '
'vprintf vsprintf fclose fflush fopen freopen remove '
'rename setbuf tmpfile tmpnam fgetc fgets fputc fputs '
'getc getchar gets putc putchar puts ungetc fread fseek '
'fsetpos ftell rewind clearerr feof ferror perror '
'abort atexit exit getenv system abs div labs ldiv '
'rand srand atof atoi atol strtod strtod strtoll '
'strtoul bsearch qsort calloc realloc malloc free '
'mblen mbtowc wctomb mbstowcs wcstombs')
super(Lexer, self).keywords(kset)
|
centaurialpha/edis
|
src/ui/editor/lexer.py
|
Python
|
gpl-3.0
| 2,435
|
def dummy_export_progress_cb(*args, **kwargs):
pass
class Exporter(object):
def __init__(self, obj, export_format):
self.obj = obj
self.export_format = str(export_format)
self.can_change_quality = False
def set_quality(quality_pourcent):
raise NotImplementedError()
def set_postprocess_func(self, postprocess_func):
raise NotImplementedError()
def estimate_size():
"""
returns the size in bytes
"""
raise NotImplementedError()
def get_img(self):
"""
Returns a Pillow Image
"""
raise NotImplementedError()
def get_mime_type(self):
raise NotImplementedError()
def get_file_extensions(self):
raise NotImplementedError()
def save(self, target_path, progress_cb=dummy_export_progress_cb):
raise NotImplementedError()
|
jflesch/paperwork-backend
|
paperwork_backend/common/export.py
|
Python
|
gpl-3.0
| 885
|
'''a module to define a cache class for pictures'''
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import Cache
class PictureCache(Cache.Cache):
'''a class to maintain a cache of pictures
'''
'''a class to maintain a cache of an user avatars
'''
def __init__(self, config_path, user):
'''constructor
config_path -- the path where the base configuration is located
user -- the user account or identifier
'''
Cache.Cache.__init__(self, os.path.join(config_path,
user.strip()), 'pictures', True)
def parse(self):
'''parse the file that contains the dir information
return a list of tuples containing (stamp, hash) in the order found
on the file
'''
lines = {}
with file(self.info_path) as handle:
for line in handle.readlines():
stamp, hash_ = line.split(' ', 1)
lines[int(stamp)] = hash_.strip()
return lines
def list(self):
'''return a list of tuples (stamp, hash) of the elements on cache
'''
return self.parse().items()
def insert(self, item):
'''insert a new item into the cache
return the information (stamp, hash) on success None otherwise
item -- a path to an image
'''
hash_ = Cache.get_file_path_hash(item)
if hash_ is None:
return None
path = os.path.join(self.path, hash_)
last_path = os.path.join(self.path, 'last')
shutil.copy2(item, path)
shutil.copy2(item, last_path)
return self.__add_entry(hash_)
def insert_url(self, url):
'''download and insert a new item into the cache
return the information (stamp, hash) on success None otherwise
item -- a path to an image
'''
path = os.path.join(tempfile.gettempdir(), "avatars")
try:
urlretrieve(url, path)
except IOError:
log.warning("Can't read url avatar")
return None
return self.insert(path)
def insert_raw(self, item):
'''insert a new item into the cache
return the information (stamp, hash) on success None otherwise
item -- a file like object containing an image
'''
if item is None:
return None
position = item.tell()
item.seek(0)
hash_ = Cache.get_file_hash(item)
if hash_ is None:
return None
path = os.path.join(self.path, hash_)
last_path = os.path.join(self.path, 'last')
self.create_file(path, item)
shutil.copy2(path, last_path)
item.seek(position)
return self.__add_entry(hash_)
def __add_entry(self, hash_):
'''add an entry to the information file with the current timestamp
and the hash_ of the file that was saved
return (stamp, hash)
'''
time_info = int(time.time())
handle = file(self.info_path, 'a')
handle.write('%s %s\n' % (str(time_info), hash_))
handle.close()
return time_info, hash_
def __remove_entry(self, hash_to_remove):
'''remove an entry from the information file
'''
entries = self.list()
handle = file(self.info_path, 'w')
for stamp, hash_ in entries:
if hash_ != hash_to_remove:
handle.write('%s %s\n' % (str(stamp), hash_))
handle.close()
def remove(self, item):
'''remove an item from cache
return True on success False otherwise
item -- the name of the image to remove
'''
if item not in self:
return False
os.remove(os.path.join(self.path, item))
self.__remove_entry(item)
return True
def __contains__(self, name):
'''return True if name is in cache, False otherwise
this method is used to do something like
if image_hash in cache: asd()
'''
return os.path.isfile(os.path.join(self.path, name))
|
emesene/emesene
|
emesene/e3/cache/PictureCache.py
|
Python
|
gpl-3.0
| 4,793
|
# Serial Photo Merge
# Copyright (C) 2017 Simone Riva mail: simone.rva {at} gmail {dot} com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import sys
import scipy.ndimage as ndimage
from imgmerge.readImg import ReadImageBasic
from imgmerge.image import Image
from imgmerge.readImgFactory import ReadImageFarctory
#import matplotlib.pyplot as plt
def get_dtype(color_bits):
if color_bits == 8:
return np.uint8
elif color_bits == 16:
return np.uint16
class MergeProcedureVirtual(object):
def __init__(self):
self._img_list = None
self._resimg = None
self._refimage = None
self._images_iterator = None
self._read_img_factory = ReadImageFarctory()
def set_images_iterator(self, img_itr):
self._images_iterator = img_itr
self._images_iterator.read_image_factory = self.read_image_factory
def get_images_iterator(self):
return self._images_iterator
images_iterator = property(get_images_iterator, set_images_iterator)
def set_images_list(self, img_list):
self._img_list = img_list
def get_images_list(self):
return self._img_list
images_list = property(get_images_list, set_images_list)
def set_reference_image(self, file_name):
self._refimage = file_name
def get_reference_image(self):
return self._refimage
reference_image = property(get_reference_image, set_reference_image)
def get_read_image_factory(self):
return self._read_img_factory
def set_read_image_factory(self, rif):
self._read_img_factory = rif
if self.images_iterator:
self.images_iterator.read_image_factory = rif
read_image_factory = property(
get_read_image_factory, set_read_image_factory)
def execute(self):
NotImplementedError(
" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name)
def get_resulting_image(self):
return self._resimg
def set_resulting_image(self, resarr):
self._resimg = resarr
resulting_image = property(get_resulting_image, set_resulting_image)
|
simon-r/SerialPhotoMerge
|
imgmerge/mergeProcedureVirtual.py
|
Python
|
gpl-3.0
| 2,752
|
from functions import logger, config
import asyncio
class MessageHandler:
def __init__(self, client, message, command, args):
''' Create a new messagehandler which handles the required parts for the commands.
disabling this module will fuck up the whole bot.'''
self.client = client
self.message = message
self.command = command
self.channel = message.channel
self.access_level = 0
self.needed_level = 6
self.args = args
async def sendMessage(self, text, channel=None):
'''
Sends a text message to a channel.
Arguments:
(str) text: The message you want to send
(Optional) channel: The channel you want the message to be sent in
Returns:
An messageobject if the message has been sent, None otherwise.'''
message = None
text = str(text)
if len(text)==0:
raise ValueError("The message needs at least one character.")
if len(text)>2000:
raise ValueError("The message can\'t be more than 2000 chars")
if channel is None:
message = await self.client.send_message(self.channel, "\u200B{}".format(text))
else:
message = await self.client.send_message(channel, "\u200B{}".format(text))
return message
|
Tsumiki-Chan/Neko-Chan
|
classes/messagehandler.py
|
Python
|
gpl-3.0
| 1,344
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2014 Andy Stewart
#
# Author: Andy Stewart <lazycat.manatee@gmail.com>
# Maintainer: Andy Stewart <lazycat.manatee@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import Xlib.display
import Xlib.X
import Xlib.XK
import Xlib.protocol.event
from xutils import get_xlib_display
special_X_keysyms = {
' ' : "space",
'\t' : "Tab",
'\n' : "Return", # for some reason this needs to be cr, not lf
'\r' : "Return",
'\e' : "Escape",
'!' : "exclam",
'#' : "numbersign",
'%' : "percent",
'$' : "dollar",
'&' : "ampersand",
'"' : "quotedbl",
'\'' : "apostrophe",
'(' : "parenleft",
')' : "parenright",
'*' : "asterisk",
'=' : "equal",
'+' : "plus",
',' : "comma",
'-' : "minus",
'.' : "period",
'/' : "slash",
':' : "colon",
';' : "semicolon",
'<' : "less",
'>' : "greater",
'?' : "question",
'@' : "at",
'[' : "bracketleft",
']' : "bracketright",
'\\' : "backslash",
'^' : "asciicircum",
'_' : "underscore",
'`' : "grave",
'{' : "braceleft",
'|' : "bar",
'}' : "braceright",
'~' : "asciitilde"
}
def get_keysym(ch):
keysym = Xlib.XK.string_to_keysym(ch)
if keysym == 0:
# Unfortunately, although this works to get the correct keysym
# i.e. keysym for '#' is returned as "numbersign"
# the subsequent display.keysym_to_keycode("numbersign") is 0.
if ch in special_X_keysyms:
special = special_X_keysyms[ch]
keysym = Xlib.XK.string_to_keysym(special)
return keysym
def send_string(window, str, modifiers, press=True):
xlib_display = get_xlib_display()
mask = 0
for modifier in modifiers:
if modifier == "Ctrl":
mask |= Xlib.X.ControlMask
elif modifier == "Alt":
mask |= Xlib.X.Mod1Mask
elif modifier == "Shift":
mask |= Xlib.X.ShiftMask
elif modifier == "Super":
mask |= Xlib.X.Mod4Mask
keycode = xlib_display.keysym_to_keycode(get_keysym(str))
if press:
event_type = Xlib.protocol.event.KeyPress
else:
event_type = Xlib.protocol.event.KeyRelease
event = event_type(
root=xlib_display.screen().root,
window=window,
child=Xlib.X.NONE,
same_screen=1,
root_x=0,
root_y=0,
event_x=0,
event_y=0,
state=mask,
detail=keycode,
time=Xlib.X.CurrentTime,
)
window.send_event(event, propagate=True)
if __name__ == "__main__":
xlib_display = get_xlib_display()
xwindow = xlib_display.create_resource_object("window", 73400407)
# send_string(xwindow, "x", ["Ctrl"], False)
# send_string(xwindow, "x", ["Ctrl"], True)
# send_string(xwindow, "h", [], False)
# send_string(xwindow, "h", [], True)
send_string(xwindow, "y", ["Super"], False)
send_string(xwindow, "y", ["Super"], True)
xlib_display.sync()
|
zcwj/emacs
|
lisps/webkit/send_key.py
|
Python
|
gpl-3.0
| 3,676
|
__version__ = "0.1"
import threading
import numpy as np
import pygame
from expyriment.stimuli import Canvas
from expyriment.stimuli._visual import Visual
lock_expyriment = threading.Lock()
Numpy_array_type = type(np.array([]))
def inherit_docs(cls):
for name, func in vars(cls).items():
if not func.__doc__:
for parent in cls.__bases__:
parfunc = getattr(parent, name)
if parfunc and getattr(parfunc, '__doc__', None):
func.__doc__ = parfunc.__doc__
break
return cls
@inherit_docs
class PGSurface(Canvas):
"""PyGame Surface: Expyriment Stimulus for direct Pygame operations and
PixelArrays
In contrast to other Expyriment stimuli the class does not generate temporary
surfaces.
"""
def __init__(self, size, position=None, colour=None):
Canvas.__init__(self, size, position, colour)
self._px_array = None
@property
def surface(self):
"""todo"""
if not self.has_surface:
ok = self._set_surface(self._get_surface()) # create surface
if not ok:
raise RuntimeError(Visual._compression_exception_message.format(
"surface"))
return self._surface
@property
def pixel_array(self):
"""todo"""
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
return self._px_array
@pixel_array.setter
def pixel_array(self, value):
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
self._px_array = value
def unlock_pixel_array(self):
"""todo"""
self._px_array = None
def preload(self, inhibit_ogl_compress=False):
self.unlock_pixel_array()
return Canvas.preload(self, inhibit_ogl_compress)
def compress(self):
self.unlock_pixel_array()
return Canvas.compress(self)
def decompress(self):
self.unlock_pixel_array()
return Canvas.decompress(self)
def plot(self, stimulus):
self.unlock_pixel_array()
return Canvas.plot(self, stimulus)
def clear_surface(self):
self.unlock_pixel_array()
return Canvas.clear_surface(self)
def copy(self):
self.unlock_pixel_array()
return Canvas.copy(self)
def unload(self, keep_surface=False):
if not keep_surface:
self.unlock_pixel_array()
return Canvas.unload(self, keep_surface)
def rotate(self, degree):
self.unlock_pixel_array()
return Canvas.rotate(self, degree)
def scale(self, factors):
self.unlock_pixel_array()
return Canvas.scale(self, factors)
# expyriment 0.8.0
# def scale_to_fullscreen(self, keep_aspect_ratio=True):
# self.unlock_pixel_array()
# return Canvas.scale_to_fullscreen(self, keep_aspect_ratio)
def flip(self, booleans):
self.unlock_pixel_array()
return Canvas.flip(self, booleans)
def blur(self, level):
self.unlock_pixel_array()
return Canvas.blur(self, level)
def scramble(self, grain_size):
self.unlock_pixel_array()
return Canvas.scramble(self, grain_size)
def add_noise(self, grain_size, percentage, colour):
self.unlock_pixel_array()
return Canvas.add_noise(self, grain_size, percentage, colour)
class Plotter(PGSurface):
"""Pygame Plotter"""
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(180, 180, 180),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
self.n_data_rows = n_data_rows
self.data_row_colours = data_row_colours
self.width = width
self.y_range = y_range
self._background_colour = background_colour
self.marker_colour = marker_colour
if axis_colour is None:
self.axis_colour = background_colour
else:
self.axis_colour = axis_colour
self._previous = [None] * n_data_rows
PGSurface.__init__(self, size=(self.width, self._height),
position=position)
self.clear_area()
@property
def y_range(self):
return self.y_range
@y_range.setter
def y_range(self, values):
"""tuple with lower and upper values"""
self._y_range = values
self._height = self._y_range[1] - self._y_range[0]
self._plot_axis = (self._y_range[0] <= 0 and \
self._y_range[1] >= 0)
@property
def data_row_colours(self):
return self._data_row_colours
@data_row_colours.setter
def data_row_colours(self, values):
"""data_row_colours: list of colour"""
try:
if not isinstance(values[0], list) and \
not isinstance(values[0], tuple): # one dimensional
values = [values]
except:
values = [[]] # values is not listpixel_array
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data row colour does not match the ' +
'defined number of data rows!')
self._data_row_colours = values
def clear_area(self):
self.pixel_array[:, :] = self._background_colour
if self._plot_axis:
self.pixel_array[:, self._y_range[1]:self._y_range[1] + 1] = \
self.axis_colour
def write_values(self, position, values, set_marker=False):
if set_marker:
self.pixel_array[position, :] = self.marker_colour
else:
self.pixel_array[position, :] = self._background_colour
if self._plot_axis and self.axis_colour != self._background_colour:
self.pixel_array[position, self._y_range[1]:self._y_range[1] + 1] = \
self.axis_colour
for c, plot_value in enumerate(self._y_range[1] - \
np.array(values, dtype=int)):
if plot_value >= 0 and self._previous[c] >= 0 \
and plot_value <= self._height and \
self._previous[c] <= self._height:
if self._previous[c] > plot_value:
self.pixel_array[position,
plot_value:self._previous[c] + 1] = \
self._data_row_colours[c]
else:
self.pixel_array[position,
self._previous[c]:plot_value + 1] = \
self._data_row_colours[c]
self._previous[c] = plot_value
def add_values(self, values, set_marker=False):
"""
"""
if type(values) is not Numpy_array_type and \
not isinstance(values, tuple) and \
not isinstance(values, list):
values = [values]
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data values does not match the ' +
'defined number of data rows!')
# move plot one pixel to the left
self.pixel_array[:-1, :] = self.pixel_array[1:, :]
self.write_values(position=-1, values=values, set_marker=set_marker)
class PlotterThread(threading.Thread):
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(80, 80, 80),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
super(PlotterThread, self).__init__()
self._plotter = Plotter(n_data_rows=n_data_rows,
data_row_colours=data_row_colours,
width=width, y_range=y_range,
background_colour=background_colour,
marker_colour=marker_colour,
position=position,
axis_colour=axis_colour)
self._new_values = []
self._lock_new_values = threading.Lock()
self._stop_request = threading.Event()
def get_plotter_rect(self, screen_size):
half_screen_size = (screen_size[0] / 2, screen_size[1] / 2)
pos = self._plotter.absolute_position
stim_size = self._plotter.surface_size
rect_pos = (pos[0] + half_screen_size[0] - stim_size[0] / 2,
- pos[1] + half_screen_size[1] - stim_size[1] / 2)
return pygame.Rect(rect_pos, stim_size)
def stop(self):
self.join()
def join(self, timeout=None):
self._stop_request.set()
super(PlotterThread, self).join(timeout)
def run(self):
"""the plotter thread is constantly updating the the
pixel_area"""
while not self._stop_request.is_set():
# get data
if self._lock_new_values.acquire(False):
values = self._new_values
self._new_values = []
self._lock_new_values.release() # release to receive new values
else:
values = []
n = len(values)
if n > 0:
if n > self._plotter.width:
values = values[-1 * self._plotter.width:] # only the last
n = len(values)
self._plotter.pixel_array[:-1 * n, :] = \
self._plotter.pixel_array[n:, :]
for x in range(-1 * n, 0):
self._plotter.write_values(position=x,
values=values[x][0],
set_marker=values[x][1])
# Expyriment present
lock_expyriment.acquire()
self._plotter.present(update=False, clear=False)
lock_expyriment.release()
def add_values(self, values, set_marker=False):
"""adds new values to the plotter"""
self._lock_new_values.acquire()
self._new_values.append((values, set_marker))
self._lock_new_values.release()
|
lindemann09/pytrak
|
pytrak/plotter.py
|
Python
|
gpl-3.0
| 10,237
|
import falcon
import msgpack
import json
from btree import BinaryTree
import ZODB, ZODB.FileStorage
import transaction
from persistent import Persistent
import uuid
import urllib
import btree
from pprint import pprint
class Collection (object):
def on_post(self, req, resp):
# req.stream corresponds to the WSGI wsgi.input environ variable,
# and allows you to read bytes from the request body.
#
# See also: PEP 3333
if req.content_length in (None, 0):
# Nothing to do
print "nothin"
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
pprint(body)
req.context['doc'] = json.loads(body.decode('utf-8'))
token = str(uuid.uuid4())
storage = ZODB.FileStorage.FileStorage('trees/'+token+'.fs')
db = ZODB.DB(storage)
connection = db.open()
root = connection.root
unordered_list = req.context['doc']['data']
root.tree = BinaryTree(unordered_list.pop())
tree = root.tree
tree.unordered_list = unordered_list
#tree.setList()
if len(unordered_list) <2:
raise falcon.HTTPBadRequest('Empty request body', 'We need more than 2 data elements')
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
tree.current = tree
tree.treeroot = tree.current
tree.next = tree.unordered_list.pop()
tree.ordered = False
tree.jresp = {'remain':tree.unordered_list, 'item':tree.current.getNodeValue(), 'compare':tree.next, 'token':token, 'ordered':tree.ordered,
'links':[{"self":"/order/"},
{'order':'/order/%s'%(urllib.quote(token))},
{'lt':'/order/%s/%s/%s'%(urllib.quote(token), tree.current.getNodeValue(), tree.next)},
{'gt':'/order/%s/%s/%s'%(urllib.quote(token), tree.next, tree.current.getNodeValue())}]}
transaction.commit()
connection.close()
db.close()
storage.close()
resp.body = json.dumps(tree.jresp)
def on_get(self, req, resp, token):
storage = ZODB.FileStorage.FileStorage('trees/'+token+'.fs')
db = ZODB.DB(storage)
connection = db.open()
root = connection.root
if hasattr(root, 'tree'):
tree = root.tree
else:
resp.body = "Initialize first"
connection.close()
db.close()
storage.close()
return
lst = list(btree.inorder(tree))
tree.jresp = {'data':lst, 'item':tree.current.getNodeValue(), 'compare':tree.next, 'token':token, 'ordered':tree.ordered,
'links':[{"new":"/order/"},
{"self":"/order/%s"%(urllib.quote(token))},
{"lt":"/order/%s/%s/%s"%(urllib.quote(token), tree.current.getNodeValue(), tree.next)},
{"gt":"/order/%s/%s/%s"%(urllib.quote(token), tree.next, tree.current.getNodeValue())}]}
transaction.commit()
connection.close()
db.close()
storage.close()
resp.body = json.dumps(tree.jresp)
def on_put(self, req, resp, token):
if req.content_length in (None, 0):
# Nothing to do
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
req.context['doc'] = json.loads(body.decode('utf-8'))
left = req.context['doc']['left']
right = req.context['doc']['right']
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
storage = ZODB.FileStorage.FileStorage('trees/'+token+'.fs')
db = ZODB.DB(storage)
connection = db.open()
root = connection.root
if hasattr(root, 'tree'):
tree = root.tree
else:
resp.body = "Initialize first"
connection.close()
db.close()
storage.close()
return
if tree.next not in [left, right]:
resp.body = json.dumps(tree.jresp)
connection.close()
db.close()
storage.close()
return
if left == tree.current.getNodeValue():
if tree.current.getRightChild() == None:
tree.current.insertRight(right)
tree.current = tree.treeroot
if len(tree.unordered_list)>0:
tree.next = tree.unordered_list.pop()
else:
tree.ordered = True
tree.next = "None"
else:
tree.current = tree.current.getRightChild()
elif right == tree.current.getNodeValue():
if tree.current.getLeftChild()== None:
tree.current.insertLeft(left)
tree.current = tree.treeroot
if len(tree.unordered_list)>0:
tree.next = tree.unordered_list.pop()
else:
tree.ordered = True
tree.next = "None"
else:
tree.current = tree.current.getLeftChild()
tree.jresp = {'remain':tree.unordered_list, 'item':tree.current.getNodeValue(), 'compare':tree.next, 'token':token, 'ordered':tree.ordered,
'links':[{"new":"/order/"},
{"order":"/order/%s"%(urllib.quote(token))},
{"lt":"/order/%s/%s/%s"%(urllib.quote(token), tree.current.getNodeValue(), tree.next)},
{"gt":"/order/%s/%s/%s"%(urllib.quote(token), tree.next, tree.current.getNodeValue())}]}
transaction.commit()
connection.close()
db.close()
storage.close()
resp.body = json.dumps(tree.jresp)
|
AgusRumayor/pypriorapi
|
order.py
|
Python
|
gpl-3.0
| 6,154
|
#!/usr/bin/python
# Copyright MetaCommunications, Inc. 2003-2007
# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import glob
import optparse
import os
import os.path
import platform
import sys
import time
#~ Place holder for xsl_reports/util module
utils = None
repo_root = {
'anon' : 'http://svn.boost.org/svn/boost/',
'user' : 'https://svn.boost.org/svn/boost/'
}
repo_path = {
'trunk' : 'trunk',
'release' : 'branches/release',
'build' : 'trunk/tools/build/v2',
'jam' : 'tags/tools/jam/Boost_Jam_3_1_17/src',
'regression' : 'trunk/tools/regression',
'boost-build.jam'
: 'trunk/boost-build.jam'
}
class runner:
def __init__(self,root):
commands = map(
lambda m: m[8:].replace('_','-'),
filter(
lambda m: m.startswith('command_'),
runner.__dict__.keys())
)
commands.sort()
commands = "commands: %s" % ', '.join(commands)
opt = optparse.OptionParser(
usage="%prog [options] [commands]",
description=commands)
#~ Base Options:
opt.add_option( '--runner',
help="runner ID (e.g. 'Metacomm')" )
opt.add_option( '--comment',
help="an HTML comment file to be inserted in the reports" )
opt.add_option( '--tag',
help="the tag for the results" )
opt.add_option( '--toolsets',
help="comma-separated list of toolsets to test with" )
opt.add_option( '--incremental',
help="do incremental run (do not remove previous binaries)",
action='store_true' )
opt.add_option( '--timeout',
help="specifies the timeout, in minutes, for a single test run/compilation",
type='int' )
opt.add_option( '--bjam-options',
help="options to pass to the regression test" )
opt.add_option( '--bjam-toolset',
help="bootstrap toolset for 'bjam' executable" )
opt.add_option( '--pjl-toolset',
help="bootstrap toolset for 'process_jam_log' executable" )
opt.add_option( '--platform' )
#~ Source Options:
opt.add_option( '--user',
help="Boost SVN user ID" )
opt.add_option( '--local',
help="the name of the boost tarball" )
opt.add_option( '--force-update',
help="do an SVN update (if applicable) instead of a clean checkout, even when performing a full run",
action='store_true' )
opt.add_option( '--have-source',
help="do neither a tarball download nor an SVN update; used primarily for testing script changes",
action='store_true' )
#~ Connection Options:
opt.add_option( '--ftp',
help="FTP URL to upload results to." )
opt.add_option( '--proxy',
help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
opt.add_option( '--ftp-proxy',
help="FTP proxy server (e.g. 'ftpproxy')" )
opt.add_option( '--dart-server',
help="the dart server to send results to" )
#~ Debug Options:
opt.add_option( '--debug-level',
help="debugging level; controls the amount of debugging output printed",
type='int' )
opt.add_option( '--send-bjam-log',
help="send full bjam log of the regression run",
action='store_true' )
opt.add_option( '--mail',
help="email address to send run notification to" )
opt.add_option( '--smtp-login',
help="STMP server address/login information, in the following form: <user>:<password>@<host>[:<port>]" )
opt.add_option( '--skip-tests',
help="do not run bjam; used for testing script changes",
action='store_true' )
#~ Defaults
self.runner = None
self.comment='comment.html'
self.tag='trunk'
self.toolsets=None
self.incremental=False
self.timeout=5
self.bjam_options=''
self.bjam_toolset=''
self.pjl_toolset=''
self.platform=self.platform_name()
self.user='anonymous'
self.local=None
self.force_update=False
self.have_source=False
self.ftp=None
self.proxy=None
self.ftp_proxy=None
self.dart_server=None
self.debug_level=0
self.send_bjam_log=False
self.mail=None
self.smtp_login=None
self.skip_tests=False
( _opt_, self.actions ) = opt.parse_args(None,self)
if not self.actions or self.actions == []:
self.actions = [ 'regression' ]
#~ Initialize option dependent values.
self.regression_root = root
self.boost_root = os.path.join( self.regression_root, 'boost' )
self.regression_results = os.path.join( self.regression_root, 'results' )
if self.pjl_toolset != 'python':
self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
else:
self.regression_log = os.path.join( self.regression_results, 'bjam.xml' )
self.tools_bb_root = os.path.join( self.regression_root,'tools_bb' )
self.tools_bjam_root = os.path.join( self.regression_root,'tools_bjam' )
self.tools_regression_root = os.path.join( self.regression_root,'tools_regression' )
self.xsl_reports_dir = os.path.join( self.tools_regression_root, 'xsl_reports' )
self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
if sys.platform == 'win32':
self.patch_boost = 'patch_boost.bat'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
elif sys.platform == 'cygwin':
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
else:
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam' }
self.process_jam_log = { 'name' : 'process_jam_log' }
self.bjam = {
'name' : self.bjam['name'],
'build_cmd' : self.bjam_build_cmd,
'path' : os.path.join(self.regression_root,self.bjam['name']),
'source_dir' : self.tools_bjam_root,
'build_dir' : self.tools_bjam_root,
'build_args' : ''
}
self.process_jam_log = {
'name' : self.process_jam_log['name'],
'build_cmd' : self.bjam_cmd,
'path' : os.path.join(self.regression_root,self.process_jam_log['name']),
'source_dir' : os.path.join(self.tools_regression_root,'build'),
'build_dir' : os.path.join(self.tools_regression_root,'build'),
'build_args' : 'process_jam_log -d2'
}
if self.debug_level > 0:
self.log('Regression root = %s'%self.regression_root)
self.log('Boost root = %s'%self.boost_root)
self.log('Regression results = %s'%self.regression_results)
self.log('Regression log = %s'%self.regression_log)
self.log('BB root = %s'%self.tools_bb_root)
self.log('Bjam root = %s'%self.tools_bjam_root)
self.log('Tools root = %s'%self.tools_regression_root)
self.log('XSL reports dir = %s'%self.xsl_reports_dir)
self.log('Timestamp = %s'%self.timestamp_path)
self.log('Patch Boost script = %s'%self.patch_boost)
self.main()
#~ The various commands that make up the testing sequence...
def command_cleanup(self,*args):
if not args or args == None or args == []: args = [ 'source', 'bin' ]
if 'source' in args:
self.log( 'Cleaning up "%s" directory ...' % self.boost_root )
self.rmtree( self.boost_root )
if 'bin' in args:
boost_bin_dir = os.path.join( self.boost_root, 'bin' )
self.log( 'Cleaning up "%s" directory ...' % boost_bin_dir )
self.rmtree( boost_bin_dir )
boost_binv2_dir = os.path.join( self.boost_root, 'bin.v2' )
self.log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
self.rmtree( boost_binv2_dir )
self.log( 'Cleaning up "%s" directory ...' % self.regression_results )
self.rmtree( self.regression_results )
def command_get_tools(self):
#~ Get Boost.Build v2...
self.log( 'Getting Boost.Build v2...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bb_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['build']),
os.path.basename(self.tools_bb_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bb_root)+".tar.bz2",
self.tarball_url(repo_path['build']) ) )
self.unpack_tarball(
self.tools_bb_root+".tar.bz2",
os.path.basename(self.tools_bb_root) )
#~ Get Boost.Jam...
self.log( 'Getting Boost.Jam...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bjam_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['jam']),
os.path.basename(self.tools_bjam_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bjam_root)+".tar.bz2",
self.tarball_url(repo_path['jam']) ) )
self.unpack_tarball(
self.tools_bjam_root+".tar.bz2",
os.path.basename(self.tools_bjam_root) )
#~ Get the regression tools and utilities...
self.log( 'Getting regression tools an utilities...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_regression_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['regression']),
os.path.basename(self.tools_regression_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_regression_root)+".tar.bz2",
self.tarball_url(repo_path['regression']) ) )
self.unpack_tarball(
self.tools_regression_root+".tar.bz2",
os.path.basename(self.tools_regression_root) )
#~ We get a boost-build.jam to make the tool build work even if there's
#~ and existing boost-build.jam above the testing root.
self.log( 'Getting boost-build.jam...' )
self.http_get(
self.svn_repository_url(repo_path['boost-build.jam']),
os.path.join( self.regression_root, 'boost-build.jam' ) )
def command_get_source(self):
self.refresh_timestamp()
self.log( 'Getting sources (%s)...' % self.timestamp() )
if self.user and self.user != '':
self.retry( self.svn_checkout )
else:
self.retry( self.get_tarball )
pass
def command_update_source(self):
if self.user and self.user != '' \
or os.path.exists( os.path.join( self.boost_root, '.svn' ) ):
open( self.timestamp_path, 'w' ).close()
self.log( 'Updating sources from SVN (%s)...' % self.timestamp() )
self.retry( self.svn_update )
else:
self.command_get_source( )
pass
def command_patch(self):
self.import_utils()
patch_boost_path = os.path.join( self.regression_root, self.patch_boost )
if os.path.exists( patch_boost_path ):
self.log( 'Found patch file "%s". Executing it.' % patch_boost_path )
os.chdir( self.regression_root )
utils.system( [ patch_boost_path ] )
pass
def command_setup(self):
self.command_patch()
self.build_if_needed(self.bjam,self.bjam_toolset)
if self.pjl_toolset != 'python':
self.build_if_needed(self.process_jam_log,self.pjl_toolset)
def command_test(self, *args):
if not args or args == None or args == []: args = [ "test", "process" ]
self.import_utils()
self.log( 'Making "%s" directory...' % self.regression_results )
utils.makedirs( self.regression_results )
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
if "clean" in args:
self.command_test_clean()
if "test" in args:
self.command_test_run()
self.command_test_boost_build()
if "process" in args:
if self.pjl_toolset != 'python':
self.command_test_process()
def command_test_clean(self):
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
self.rmtree( results_libs )
self.rmtree( results_status )
def command_test_run(self):
self.import_utils()
if self.pjl_toolset != 'python':
test_cmd = '%s -d2 preserve-test-targets=off --dump-tests %s "--build-dir=%s" >>"%s" 2>&1' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
else:
test_cmd = '%s -d1 preserve-test-targets=off --dump-tests --verbose-test %s "--build-dir=%s" "--out-xml=%s"' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
self.log( 'Starting tests (%s)...' % test_cmd )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.system( [ test_cmd ] )
os.chdir( cd )
def command_test_boost_build(self):
self.import_utils()
self.log( 'Running Boost.Build tests' )
# Find the true names of the toolsets used for testing
toolsets = os.listdir(os.path.join(self.regression_results,
"boost/bin.v2/libs/any/test/any_test.test"));
for t in toolsets:
d = os.path.join(self.regression_results, ("boost-build-%s" % (t)))
utils.makedirs (d)
fn = os.path.join(d, "test_log.xml")
cd = os.getcwd()
try:
os.chdir (os.path.join (self.boost_root, 'tools/build/v2/test'));
bjam_path = os.path.dirname (self.tool_path( self.bjam ))
self.log( "Using bjam binary in '%s'" % (bjam_path))
os.putenv('PATH', bjam_path + os.pathsep + os.environ['PATH'])
utils.system ( [ "%s test_all.py --default-bjam --xml %s > %s" % (sys.executable, t, fn) ] )
finally:
os.chdir( cd )
def command_test_process(self):
self.import_utils()
self.log( 'Getting test case results out of "%s"...' % self.regression_log )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.checked_system( [
'"%s" "%s" <"%s"' % (
self.tool_path(self.process_jam_log),
self.regression_results,
self.regression_log )
] )
os.chdir( cd )
def command_collect_logs(self):
self.import_utils()
comment_path = os.path.join( self.regression_root, self.comment )
if not os.path.exists( comment_path ):
self.log( 'Comment file "%s" not found; creating default comment.' % comment_path )
f = open( comment_path, 'w' )
f.write( '<p>Tests are run on %s platform.</p>' % self.platform_name() )
f.close()
source = 'tarball'
revision = ''
svn_root_file = os.path.join( self.boost_root, '.svn' )
svn_info_file = os.path.join( self.boost_root, 'svn_info.txt' )
if os.path.exists( svn_root_file ):
source = 'SVN'
self.svn_command( 'info --xml "%s" >"%s"' % (self.boost_root,svn_info_file) )
if os.path.exists( svn_info_file ):
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find( 'Revision:' )
if i < 0: i = svn_info.find( 'revision=' ) # --xml format
if i >= 0:
i += 10
while svn_info[i] >= '0' and svn_info[i] <= '9':
revision += svn_info[i]
i += 1
if self.pjl_toolset != 'python':
from collect_and_upload_logs import collect_logs
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
collect_logs(
self.regression_results,
self.runner, self.tag, self.platform, comment_path,
self.timestamp_path,
self.user,
source, run_type,
self.dart_server, self.proxy,
revision )
else:
from process_jam_log import BJamLog2Results
if self.incremental:
run_type = '--incremental'
else:
run_type = ''
BJamLog2Results([
'--output='+os.path.join(self.regression_results,self.runner+'.xml'),
'--runner='+self.runner,
'--comment='+comment_path,
'--tag='+self.tag,
'--platform='+self.platform,
'--source='+source,
'--revision='+revision,
run_type,
self.regression_log
])
self.compress_file(
os.path.join(self.regression_results,self.runner+'.xml'),
os.path.join(self.regression_results,self.runner+'.zip')
)
def command_upload_logs(self):
self.import_utils()
from collect_and_upload_logs import upload_logs
if self.ftp:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server,
ftp_url = self.ftp )
)
else:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server )
)
def command_regression(self):
import socket
import string
try:
mail_subject = 'Boost regression for %s on %s' % ( self.tag,
string.split(socket.gethostname(), '.')[0] )
start_time = time.localtime()
if self.mail:
self.log( 'Sending start notification to "%s"' % self.mail )
self.send_mail(
'%s started at %s.' % ( mail_subject, format_time( start_time ) )
)
self.command_get_tools()
if self.local is not None:
self.log( 'Using local file "%s"' % self.local )
b = os.path.basename( self.local )
tag = b[ 0: b.find( '.' ) ]
self.log( 'Tag: "%s"' % tag )
self.unpack_tarball( self.local, self.boost_root )
elif self.have_source:
if not self.incremental: self.command_cleanup( [ 'bin' ] )
else:
if self.incremental or self.force_update:
if not self.incremental: self.command_cleanup( [ 'bin' ] )
else:
self.command_cleanup()
self.command_get_source()
self.command_setup()
# Not specifying --toolset in command line is not enough
# that would mean to use Boost.Build default ones
# We can skip test only we were explictly
# told to have no toolsets in command line "--toolset="
if self.toolsets != '': # --toolset=,
if not self.skip_tests:
self.command_test()
self.command_collect_logs()
self.command_upload_logs()
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
end_time = time.localtime()
self.send_mail(
'%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
)
except:
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
end_time = time.localtime()
self.send_mail(
'%s failed at %s.' % ( mail_subject, format_time( end_time ) ),
traceback_ )
raise
def command_show_revision(self):
modified = '$Date: 2010-01-13 13:03:18 -0500 (Wed, 13 Jan 2010) $'
revision = '$Revision: 58983 $'
import re
re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
#~ Utilities...
def main(self):
for action in self.actions:
action_m = "command_"+action.replace('-','_')
if hasattr(self,action_m):
getattr(self,action_m)()
def platform_name(self):
# See http://article.gmane.org/gmane.comp.lib.boost.testing/933
if sys.platform == 'win32':
return 'Windows'
elif sys.platform == 'cygwin':
return 'Windows/Cygwin'
return platform.system()
def log(self,message):
sys.stdout.flush()
sys.stderr.flush()
sys.stderr.write( '# %s\n' % message )
sys.stderr.flush()
def rmtree(self,path):
if os.path.exists( path ):
import shutil
#~ shutil.rmtree( unicode( path ) )
if sys.platform == 'win32':
os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
shutil.rmtree( unicode( path ) )
else:
os.system( 'rm -f -r "%s"' % path )
def refresh_timestamp( self ):
if os.path.exists( self.timestamp_path ):
os.unlink( self.timestamp_path )
open( self.timestamp_path, 'w' ).close()
def timestamp( self ):
return time.strftime(
'%Y-%m-%dT%H:%M:%SZ',
time.gmtime( os.stat( self.timestamp_path ).st_mtime ) )
def retry( self, f, max_attempts=5, sleep_secs=10 ):
for attempts in range( max_attempts, -1, -1 ):
try:
return f()
except Exception, msg:
self.log( '%s failed with message "%s"' % ( f.__name__, msg ) )
if attempts == 0:
self.log( 'Giving up.' )
raise
self.log( 'Retrying (%d more attempts).' % attempts )
time.sleep( sleep_secs )
def http_get( self, source_url, destination_file ):
import urllib
proxies = None
if hasattr(self,'proxy') and self.proxy is not None:
proxies = { 'http' : self.proxy }
src = urllib.urlopen( source_url, proxies = proxies )
f = open( destination_file, 'wb' )
while True:
data = src.read( 16*1024 )
if len( data ) == 0: break
f.write( data )
f.close()
src.close()
def import_utils(self):
global utils
if utils is None:
sys.path.append( self.xsl_reports_dir )
import utils as utils_module
utils = utils_module
def build_if_needed( self, tool, toolset ):
self.import_utils()
if os.path.exists( tool[ 'path' ] ):
self.log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
return
self.log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
if toolset is None:
if self.toolsets is not None:
toolset = string.split( self.toolsets, ',' )[0]
else:
toolset = tool[ 'default_toolset' ]
self.log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
self.log( ' Using default toolset for the platform (%s).' % toolset )
if os.path.exists( tool[ 'source_dir' ] ):
self.log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
build_cmd = tool[ 'build_cmd' ]( toolset, tool['build_args'] )
self.log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
utils.system( [ 'cd "%s"' % tool[ 'source_dir' ], build_cmd ] )
else:
raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
if not tool.has_key( 'build_path' ):
tool[ 'build_path' ] = self.tool_path( tool )
if not os.path.exists( tool[ 'build_path' ] ):
raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
self.log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
def tool_path( self, name_or_spec ):
if isinstance( name_or_spec, basestring ):
return os.path.join( self.regression_root, name_or_spec )
if os.path.exists( name_or_spec[ 'path' ] ):
return name_or_spec[ 'path' ]
if name_or_spec.has_key( 'build_path' ):
return name_or_spec[ 'build_path' ]
build_dir = name_or_spec[ 'build_dir' ]
self.log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_dir ) )
for root, dirs, files in os.walk( build_dir ):
if name_or_spec[ 'name' ] in files:
return os.path.join( root, name_or_spec[ 'name' ] )
raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
name_or_spec[ 'name' ]
, '\n'.join( [ name_or_spec[ 'path' ], build_dir ] )
) )
def bjam_build_cmd( self, *rest ):
if sys.platform == 'win32':
cmd = 'build.bat %s' % self.bjam_toolset
else:
cmd = './build.sh %s' % self.bjam_toolset
env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
if os.environ.has_key( env_setup_key ):
return '%s & %s' % ( os.environ[env_setup_key], cmd )
return cmd
def bjam_cmd( self, toolsets, args = '', *rest ):
build_path = self.regression_root
if build_path[-1] == '\\': build_path += '\\'
if self.timeout > 0:
args += ' -l%s' % (self.timeout*60)
cmd = '"%(bjam)s"' +\
' "-sBOOST_BUILD_PATH=%(bbpath)s"' +\
' "-sBOOST_ROOT=%(boost)s"' +\
' "--boost=%(boost)s"' +\
' "--boost-build=%(bb)s"' +\
' "--debug-configuration"' +\
' %(arg)s'
cmd %= {
'bjam' : self.tool_path( self.bjam ),
'bbpath' : os.pathsep.join([build_path,self.tools_bb_root]),
'bb' : self.tools_bb_root,
'boost' : self.boost_root,
'arg' : args }
if toolsets:
import string
cmd += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
return cmd
def send_mail( self, subject, msg = '' ):
import smtplib
if not self.smtp_login:
server_name = 'mail.%s' % mail.split( '@' )[-1]
user_name = None
password = None
else:
server_name = self.smtp_login.split( '@' )[-1]
( user_name, password ) = string.split( self.smtp_login.split( '@' )[0], ':' )
log( ' Sending mail through "%s"...' % server_name )
smtp_server = smtplib.SMTP( server_name )
smtp_server.set_debuglevel( self.debug_level )
if user_name:
smtp_server.login( user_name, password )
smtp_server.sendmail( self.mail, [ self.mail ],
'Subject: %s\nTo: %s\n\n%s' % ( subject, self.mail, msg ) )
def compress_file( self, file_path, archive_path ):
self.import_utils()
utils.log( 'Compressing "%s"...' % file_path )
try:
import zipfile
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( file_path, os.path.basename( file_path ) )
z.close()
utils.log( 'Done writing "%s".'% archive_path )
except Exception, msg:
utils.log( 'Warning: Compressing falied (%s)' % msg )
utils.log( ' Trying to compress using a platform-specific tool...' )
try:
import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
raise Exception( 'Compressing failed!' )
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
utils.log( 'Removing stale "%s".' % archive_path )
zip_cmd.main( file_path, archive_path )
utils.log( 'Done compressing "%s".' % archive_path )
#~ Dowloading source, from SVN...
def svn_checkout( self ):
os.chdir( self.regression_root )
self.svn_command( 'co %s %s' % (self.svn_repository_url(self.tag),'boost') )
def svn_update( self ):
os.chdir( self.boost_root )
self.svn_command( 'update' )
def svn_command( self, command ):
svn_anonymous_command_line = 'svn --non-interactive %(command)s'
svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
if not hasattr(self,'user') or self.user is None or self.user == 'anonymous':
cmd = svn_anonymous_command_line % { 'command': command }
else:
cmd = svn_command_line % { 'user': self.user, 'command': command }
self.log( 'Executing SVN command "%s"' % cmd )
rc = os.system( cmd )
if rc != 0:
raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
def svn_repository_url( self, path ):
if self.user != 'anonymous' and self.user != '':
return '%s%s' % (repo_root['user'],path)
else:
return '%s%s' % (repo_root['anon'],path)
#~ Downloading and extracting source archives, from tarballs or zipballs...
def get_tarball( self, *args ):
if not args or args == []:
args = [ 'download', 'unpack' ]
tarball_path = None
if hasattr(self,'local') and self.local is not None:
tarball_path = self.local
elif 'download' in args:
tarball_path = self.download_tarball(self.boost_tarball_name(),self.boost_tarball_url())
if not tarball_path:
tarball_path = os.path.join( self.regression_root, self.boost_tarball_url() )
if 'unpack' in args:
self.unpack_tarball( tarball_path, self.boost_root )
pass
def download_tarball( self, tarball_name, tarball_url ):
tarball_path = os.path.join( self.regression_root, tarball_name )
self.log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
if os.path.exists( tarball_path ):
os.unlink( tarball_path )
self.http_get( tarball_url, tarball_path )
return tarball_path
def tarball_url( self, path ):
return 'http://beta.boost.org/development/snapshot.php/%s' % path
def boost_tarball_name( self ):
return 'boost-%s.tar.bz2' % self.tag.split( '/' )[-1]
def boost_tarball_url( self ):
return self.tarball_url( self.tag )
def unpack_tarball( self, tarball_path, target_path ):
self.log( 'Looking for old unpacked archives...' )
old_boost_dirs = self.find_boost_dirs( )
for old_boost_dir in old_boost_dirs:
if old_boost_dir != tarball_path:
self.log( 'Deleting old directory %s.' % old_boost_dir )
self.rmtree( old_boost_dir )
self.log( 'Unpacking boost tarball ("%s")...' % tarball_path )
tarball_name = os.path.basename( tarball_path )
extension = tarball_name[ tarball_name.find( '.' ) : ]
if extension in ( ".tar.gz", ".tar.bz2" ):
import tarfile
import stat
mode = os.path.splitext( extension )[1][1:]
tar = tarfile.open( tarball_path, 'r:%s' % mode )
for tarinfo in tar:
tar.extract( tarinfo, self.regression_root )
if sys.platform == 'win32' and not tarinfo.isdir():
# workaround what appears to be a Win32-specific bug in 'tarfile'
# (modification times for extracted files are not set properly)
f = os.path.join( self.regression_root, tarinfo.name )
os.chmod( f, stat.S_IWRITE )
os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
tar.close()
elif extension in ( ".zip" ):
import zipfile
z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
destination_file_path = os.path.join( self.regression_root, f.filename )
if destination_file_path[-1] == "/": # directory
if not os.path.exists( destination_file_path ):
os.makedirs( destination_file_path )
else: # file
result = open( destination_file_path, 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()
else:
raise 'Do not know how to unpack archives with extension \"%s\"' % extension
boost_dir = self.find_boost_dirs()[0]
self.log( ' Unpacked into directory "%s"' % boost_dir )
if os.path.exists( target_path ):
self.log( 'Deleting "%s" directory...' % target_path )
self.rmtree( target_path )
self.log( 'Renaming "%s" into "%s"' % ( boost_dir, target_path ) )
os.rename( boost_dir, target_path )
def find_boost_dirs( self ):
return [
x for x in
glob.glob( os.path.join( self.regression_root, 'boost[-_]*' ) )
if os.path.isdir( x )
]
|
kelvindk/Video-Stabilization
|
boost_1_42_0/tools/regression/src/regression.py
|
Python
|
gpl-3.0
| 36,064
|
#!/usr/bin/python
""" wrapper to make simple calls to raxml """
import os
import sys
import glob
import subprocess
from ipyrad.analysis.utils import Params
from ipyrad.assemble.utils import IPyradError
# alias
OPJ = os.path.join
class Raxml(object):
"""
RAxML analysis utility function. This tool makes it easy to build a
raxml command line string and submit it as a job. It also makes it easy
to access the resulting tree files. Set params on the raxml object and
print(<object>.command) to see raxml command string. Call .run() to
submit the job running in background, or .run(block=True) to wait until
it finishes.
Parameters:
-----------
data: str
The phylip formated sequence file (.phy from ipyrad). An alias for '-s'.
name: str
The name for this run. An alias for '-n'.
workdir: str
The output directory for results. An alias for '-w'.
Additional optional parameters
-------------------------------
f: str
(-f a) The raxml function. Default is 'a'.
T: str
(-T 4) The number of threads. Default is 4.
m: str
(-m GTRGAMMA) The model to use.
N: str
(-N 100) The number of distinct starting trees from which to run full
search, or number of bootstrap replicates to run if using -f a.
x: str
(-x 12345) The bootstrap random seed.
p: str
(-p 54321) The parsimony random seed.
n: str
(-n test) The prefix name for output files
w: str
(-w outdir) The output directory
s: str
(-s seq.phy) The .phy formatted sequence file.
o: str or list
(-o tax1,tax2) A list of outgroup sample names or a string.
Attributes:
-----------
params: dict
parameters for this raxml run
command:
returns the command string to run raxml
Functions:
----------
run()
submits a raxml job to locally or on an ipyparallel client cluster.
"""
# init object for params
def __init__(
self,
data,
name="test",
workdir="analysis-raxml",
*args,
**kwargs):
# path attributes
self._kwargs = {
"f": "a",
"T": 4, # <- change to zero !?
"m": "GTRGAMMA",
"N": 100,
"x": 12345,
"p": 54321,
"o": None,
"binary": "",
}
# update kwargs for user args and drop key if value is None
self._kwargs.update(kwargs)
self._kwargs = {i: j for (i, j) in self._kwargs.items() if j is not None}
# check workdir
if workdir:
workdir = os.path.abspath(os.path.expanduser(workdir))
else:
workdir = os.path.abspath(os.path.curdir)
if not os.path.exists(workdir):
os.makedirs(workdir)
# store entered args in params object
self.params = Params()
self.params.n = name
self.params.w = workdir
self.params.s = os.path.abspath(os.path.expanduser(data))
# if arg append kwargs to top of list of binaries to search for
binaries = _get_binary_paths()
if self._kwargs["binary"]:
binaries = [self._kwargs["binary"]] + binaries
# sefind a binary from the list
self.params.binary = _check_binaries(binaries)
# set params
notparams = set(["workdir", "name", "data", "binary"])
for key in set(self._kwargs.keys()) - notparams:
self.params[key] = self._kwargs[key]
# attributesx
self.rasync = None
self.stdout = None
self.stderr = None
# results files
self.trees = Params()
self.trees.bestTree = OPJ(workdir, "RAxML_bestTree." + name)
self.trees.bipartitionsBranchLabels = OPJ(workdir, "RAxML_bipartitionsBranchLabels." + name)
self.trees.bipartitions = OPJ(workdir, "RAxML_bipartitions." + name)
self.trees.bootstrap = OPJ(workdir, "RAxML_bootstrap." + name)
self.trees.info = OPJ(workdir, "RAxML_info." + name)
@property
def _command_list(self):
""" build the command list """
cmd = [
self.params.binary,
"-f", str(self.params.f),
"-T", str(self.params.T),
"-m", str(self.params.m),
"-n", str(self.params.n),
"-w", str(self.params.w),
"-s", str(self.params.s),
"-p", str(self.params.p),
]
if 'N' in self.params:
cmd += ["-N", str(self.params.N)]
if "x" in self.params:
cmd += ["-x", str(self.params.x)]
# ultrafast boostrap and mapping with -f d
# If no bootstraps then run -f D not -f a, and drop -x and -N
# if "-f D":
# add ougroups
if 'o' in self.params:
cmd += ["-o"]
cmd += [",".join(self.params.o)]
return cmd
@property
def command(self):
""" returns command as a string """
return " ".join(self._command_list)
def run(
self,
ipyclient=None,
quiet=False,
force=False,
block=False,
):
"""
Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an
ipyclient is provided then the job is sent to a remote engine and an
asynchronous result object is returned which can be queried or awaited
until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient.
"""
# force removes old files, a bit risky here if names are subsets
if force:
opath = os.path.join(
self.params.w, "RAxML_*.{}".format(self.params.n))
oldfiles = glob.glob(opath)
for oldfile in oldfiles:
if os.path.exists(oldfile):
os.remove(oldfile)
if os.path.exists(self.trees.info):
print("Error Files Exist: set a new name or use Force flag.\n{}"
.format(self.trees.info))
return
## TODO: add a progress bar tracker here. It could even read it from
## the info file that is being written.
## submit it
if not ipyclient:
proc = _call_raxml(self._command_list)
self.stdout = proc[0]
self.stderr = proc[1]
else:
# find all hosts and submit job to the host with most available engines
lbview = ipyclient.load_balanced_view()
self.rasync = lbview.apply(_call_raxml, self._command_list)
# initiate random seed
if not quiet:
if not ipyclient:
# look for errors
if "Overall execution time" not in self.stdout.decode():
print("Error in raxml run\n" + self.stdout.decode())
else:
print("job {} finished successfully".format(self.params.n))
else:
if block:
print("job {} running".format(self.params.n))
ipyclient.wait()
if self.rasync.successful():
print(
"job {} finished successfully"
.format(self.params.n))
else:
raise IPyradError(self.rasync.get())
else:
print("job {} submitted to cluster".format(self.params.n))
def _get_binary_paths():
# check for binary
list_binaries = [
"raxmlHPC-PTHREADS-AVX2",
"raxmlHPC-PTHREADS-AVX",
"raxmlHPC-PTHREADS-SSE3",
"raxmlHPC-PTHREADS",
]
# expand for env path
list_binaries = [os.path.join(sys.prefix, "bin", i) for i in list_binaries]
return list_binaries
def _check_binaries(binaries):
""" find and return a working binary"""
# check user binary first, then backups
for binary in binaries:
# call which to find
proc = subprocess.Popen(
["which", binary],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).communicate()
# if it exists then update the binary
if proc[0]:
return binary
# if you get here then no binaries were found
raise NameError(BINARY_ERROR)
def _call_raxml(command_list):
""" call the command as sps """
proc = subprocess.Popen(
command_list,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE
)
comm = proc.communicate()
return comm
BINARY_ERROR = """
RAxML binary not found.
Check that you have raxml installed. For example, with conda:
'conda install raxml -c bioconda'
If you have a different binary installed you can select it using
the argument 'binary'. For example:
rax = ipa.raxml(name='test', data='test.phy', binary='raxmlHPC')
"""
|
dereneaton/ipyrad
|
ipyrad/analysis/raxml.py
|
Python
|
gpl-3.0
| 9,380
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import psycopg2.extras
import os
import subprocess
import sys
sys.path.append("C:\\Program Files\\FME\\fmeobjects\\python27")
import fmeobjects
# ----------------------##### VARIABLES #####---------------------- #
#
# formatList : liste des formats determines par l'admin dans la BDD
# srsList : liste des SRS determines par l'admin dans la BDD
# mdataList : liste des donnees eligibles
# cheminTelevers : chemin vers le serveur des données extraites determine par l'admin dans la BDD
#
# ###---------------------- RECUPERATION DES FORMATS ----------------------### #
con = None
try:
con = psycopg2.connect("dbname='chimn2' user='postgres' port='5433'")
cur = con.cursor()
cur.execute("SELECT nameformat FROM preferenceformat WHERE activateformat = 'True'")
rows = cur.fetchmany(7)
resultFormat=[]
formatList = ""
for row in rows:
resultFormat.append(row[0])
for i in range(0, len(resultFormat)):
formatList = formatList + resultFormat[i]
print formatList # parametre publie format
# ###---------------------- RECUPERATION DES SRS ----------------------### #
cur = con.cursor()
cur.execute("SELECT epsg FROM preferencesrs WHERE activatesrs='True'")
rows = cur.fetchmany(3)
resultSRS=[]
srsList = ""
for row in rows:
srsList = srsList + str(row[0])
print srsList
# #---------------------- RECUPERATION DES DONNEES ELIGIBLES ----------------------# #
cur = con.cursor()
cur.execute("SELECT title FROM metadata WHERE metadata.changed ='True' AND metadata.asked = 'True'")
rows = cur.fetchmany(3) #limite à revoir
resultData=[]
for row in rows:
resultData.append(row[0])
mdataList = ""
for z in range(0, len(resultData)):
mdataList = mdataList + " " + resultData[z]
# print resultData[z]
# resultData[z].split('.',2)
# for partie in resultData[z].split('.',2) :
# print partie
print mdataList # Titre des donnees eligibles
# #---------------------- RECUPERER CHEMIN SERVEUR EXTRACTION DONNEE ----------------------# #
cur = con.cursor()
cur.execute("SELECT tlurl FROM parameters")
rows = cur.fetchmany(3) #limite à revoir
resultchemin=[]
for row in rows:
resultchemin.append(row[0])
cheminTelevers = ""
for j in range(0, len(resultchemin)):
cheminTelevers = resultchemin[j]
print cheminTelevers # Titre des donnees eligibles
# #---------------------- GESTION DES ERREURS ----------------------# #
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
finally:
if con:
con.close()
#os.system("fme.exe C:\\Users\\ndrissi\\Downloads\\dynamic_format_list.fmw --FORMATS_LIST " + formatList + " --SRS_LIST " + srsList + " --POSTGIS_TABLE " + mdataList + " --DEST C:\\Users\\ndrissi\\Downloads --DestDataset_SHAPE C:\\Users\\ndrissi\\Downloads")
for p in range (0, len(resultData)):
print resultData[p]
#os.system("fme.exe C:\\Users\\ndrissi\\Downloads\\workspaceFME_testv1.fmw --FORMATS_LIST " + formatList + " --SRS_LIST " + srsList + " --POSTGIS_TABLE " + resultData[p] + " --DEST " + cheminTelevers + " --LOG C:\\Users\\ndrissi\\Downloads\\log-" + str(p) + "")
os.system("fme.exe C:\\Users\\ndrissi\\Downloads\\workspaceFME_testv2.fmw --FORMATS_LIST " + formatList + " --SRS_LIST " + srsList + " --POSTGIS_TABLE " + resultData[p] + " --DEST " + cheminTelevers + " --LOG " + cheminTelevers + "\\log-" + str(p) + "")
|
TSIG15/CHIMN
|
src/televersement_regulier/DOUBLONnewBDD.py
|
Python
|
gpl-3.0
| 3,679
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set et sw=4 fenc=utf-8:
#
# Copyright 2016~2018 INVITE Communications Co., Ltd. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
""" ivr - A set of IVR functions for Asterisk.
The pyst project includes several python modules to assist in programming
asterisk with python:
ivr - The IVR Template
"""
#from ivr.ivr import *
__all__ = ['ivr','test','connection', 'odbc']
__version__ = '0.0.8'
#from .test import joke
|
invitecomm/asterisk-ivr
|
ivr/__init__.py
|
Python
|
gpl-3.0
| 1,089
|
helppage = "https://github.com/JorisPLA7/Super-D-mineur"
githubpage = "https://github.com/JorisPLA7/Super-D-mineur/"
rulepage = "http://demineur.hugames.fr/help.php"
import webbrowser
def help():
webbrowser.open(helppage)
def github():
webbrowser.open(githubpage)
def rules():
webbrowser.open(rulepage)
if __name__ == '__main__':
help()
|
JorisPLA7/Super-D-mineur
|
lib/web.py
|
Python
|
gpl-3.0
| 355
|
#TODO: make under 1 min.
#SOLVED
import math
MAX_P = 1000
best_p = 120
best_num_sides = 3
for p in range(2, MAX_P+1):
num_sides = 0
if p % 30 == 0:
print(p)
for a in range(1, MAX_P/2 + 2):
for b in range(1, MAX_P/2 + 2):
c = p - a - b
if a > b and b > c and c**2 + b**2 == a**2 and a + b + c == p and c > 0:
# print("sides {} {} {}".format(a,b,c))
# print("P={}".format(p))
num_sides += 1
if num_sides > best_num_sides:
# print("Change to p={}".format(p))
# import pdb; pdb.set_trace()
best_num_sides = num_sides
best_p = p
print("Done")
print(best_p)
|
Daphron/project-euler
|
p39.py
|
Python
|
gpl-3.0
| 697
|
# Python 3: ArchiveNM.py
# Function:
# This will collect the files in /home/postgres that
# need to be sent to a new Natural Message machine
# that is being initialized. This currently grabs
# directory server and shard server files.
# It can also be used as an archiver.
import datetime
import tarfile
import os
import sys
# For the version code, enter the format used
# in the naturalmsg_svr_#_#_#.py files
test_or_prod = 'prod'
version = '0_0_5'
DSTAMP = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# (do not add a trailing slash on directory names)
pgm_dir = '/var/natmsg'
sql_dir = '/home/postgres/shard/sql/' + test_or_prod
function_dir = '/home/postgres/shard/sql/' + test_or_prod + '/functions'
pgm_files = ('naturalmsg-svr' + version + '.py',
'shardfunc_cp' + version + '.py')
sql_files = ( \
'0001create_db.sh',
'0002create_tables.sql',
'0005shardserver.sql',
'0007shardbig.sql',
'0020payment.sql',
'0500sysmon.sql',
'blog01.sql' \
)
function_files = ( \
'nm_blog_entry_newest.sql',
'read_inbasket_stage010.sql',
'read_inbasket_stage020.sql',
'read_inbasket_stage030.sql',
'scan_shard_delete.sql',
'shard_burn.sql',
'shard_delete_db_entries.sql',
'shard_delete.sql',
'shard_expire_big.sql',
'shard_expire.sql',
'shard_id_exists.sql',
'smd_create0010.sql',
'sysmon001.sql' \
)
tar_fname_base = 'NatMsgSQLArchive' + version
tar_fname = tar_fname_base + '.tar'
if os.path.isfile(tar_fname):
# The tar file already exists, rename it
try:
os.renames(tar_fname, tar_fname_base + '-' + DSTAMP + '.tar')
except:
print('Error renaming an existing tar file: ' + tar_fname)
print('Maybe you do not have permission.')
sys.exit(12)
t = tarfile.TarFile(tar_fname, mode='w')
for f in pgm_files:
# the full path is already specified in the file list.
t.add(os.path.normpath(pgm_dir + '/' + f))
for f in sql_files:
t.add(os.path.normpath(sql_dir + '/' + f))
for f in function_files:
t.add(os.path.normpath(function_dir + '/' + f))
t.close()
|
naturalmessage/natmsgshardbig
|
sql/ArchiveNM.py
|
Python
|
gpl-3.0
| 2,018
|
#!/usr/bin/python
"""
VM starter - start your virtual hosts based on config file
--------------------------------------------------------------------------------
@author Petr Juhanak, http://www.hackerlab.cz
@release 1.0, 20130808
@licence GPLv3 http:://www.gnu.org/licenses/gpl-3.0.html
--------------------------------------------------------------------------------
"""
import re
import sys
import os.path
import subprocess
import time
import operator
START_SYMBOL = [ 'x', 'X', '1','i','I','s','S']
VBOX_MANAGE = 'C:\Program Files\Oracle\VirtualBox\VBoxManage.exe'
if os.name == 'nt':
vbox_manage = os.path.normpath('"'+VBOX_MANAGE+'"')
# functions
def show_banner():
print "VM starter 1.0 - Author Petr Juhanak | http://www.hackerlab.cz (GPLv3)"
print ""
def report_error(message):
print "Usage: vm.py <lab.config> [START|stop|reset]"
print ""
print " [!]", message
def print_vmtable():
file = open(LAB_CONFIG)
for line in file:
print line.strip()
def vmtable(filename):
vmt = []
file = open(filename)
for i,line in enumerate(file):
if not line.startswith(";"):
match = re.search('(.*)\|(.*)\|(.*)\|(.*)', line)
if match:
name = match.group(1).strip()
execute = match.group(2).strip()
delay = match.group(3).strip()
comment = match.group(4).strip()
try:
int(delay)
except ValueError:
delay = '0'
vmt.append([name, execute, delay, comment])
return vmt
# start
show_banner()
if len(sys.argv) > 1:
CONFIG_FILE = sys.argv[1]
else:
report_error("Error: missing config file")
sys.exit(1)
if len(sys.argv) > 2:
ACTION = sys.argv[2].upper().strip()
else:
ACTION = "start".upper().strip()
# start VMs according lab.config
vmt = vmtable(CONFIG_FILE)
vmt.sort(key=lambda x: int(x[2]))
for vm in vmt:
name = vm[0]
execute = vm[1]
delay = vm[2]
comment = vm[3]
if execute in START_SYMBOL:
if ACTION == "START":
cmd = vbox_manage + ' startvm ' + '"' + name + '"'
if ACTION == "RESET":
cmd = vbox_manage + ' controlvm ' + '"' + name + '"' + ' reset'
if ACTION == "STOP":
cmd = vbox_manage + ' controlvm ' + '"' + name + '"' + ' poweroff'
print ""
if (delay > 0) and ACTION in ["START", "RESET"]:
print '[*] ' + ACTION + ' VM ' + name + ' after ' + str(delay) + ' sec.'
time.sleep(float(delay))
else:
print '[*] ' + ACTION + ' VM ' + name
p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
while True:
out = p.stderr.read(1)
if out == '' and p.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
"""
Example LAB.CONFIG:
===================
;
;
; Start VM when start symbol is found [S, X, 1, I]
; with a delay: integer [sec] - default zero
; VM name has to match with your VirtualBOX VM name
;-------------------------------------------------------------------------------
; VM name |S| Delay | Comment
;---------------+---------+-----------------------------------------------------
PFSense VLAN1 |S| 0 | DHCP server/FW - has to start first
WinXP.SP3 |S| 25 | HTTP Apache 1.3 (started with 25sec delay)
bt5 VLAN1 |S| 24 | Backtrack (started with 24sec delay)
VM007 |-| | This VM will not start due to missing start symbol
"""
|
pjuhanak/vm-starter
|
vm.py
|
Python
|
gpl-3.0
| 3,827
|
#!/usr/bin/env Python
import time
import sys
if len(sys.argv) > 1:
INTERFACE = sys.argv[1]
else:
INTERFACE = 'eth1'
STATS = []
print 'Interface:',INTERFACE
def rx():
ifstat = open('/proc/net/dev').readlines()
for interface in ifstat:
#print '----', interface, '-----'
if INTERFACE in interface:
stat = float(interface.split()[1])
STATS[0:] = [stat]
def tx():
ifstat = open('/proc/net/dev').readlines()
for interface in ifstat:
if INTERFACE in interface:
stat = float(interface.split()[9])
STATS[1:] = [stat]
print 'In Out'
rx()
tx()
while True:
time.sleep(1)
rxstat_o = list(STATS)
rx()
tx()
RX = float(STATS[0])
RX_O = rxstat_o[0]
TX = float(STATS[1])
TX_O = rxstat_o[1]
RX_RATE = round((RX - RX_O)/1024/1024,3)
TX_RATE = round((TX - TX_O)/1024/1024,3)
print RX_RATE ,'MB ',TX_RATE ,'MB'
|
soarpenguin/python-scripts
|
net.py
|
Python
|
gpl-3.0
| 954
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import drm_swig as drm
class qa_cell_mapping_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.tp = drm.transm_params(1, 3, False, 0, 1, 0, 1, 1, 0, False, 24000, "station label", "text message")
vlen_msc = self.tp.msc().N_MUX() * self.tp.ofdm().M_TF()
vlen_sdc = self.tp.sdc().N()
vlen_fac = self.tp.fac().N() * self.tp.ofdm().M_TF()
self.cell_mapping = drm.cell_mapping_cc(self.tp, (vlen_msc, vlen_sdc, vlen_fac))
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_cell_mapping_cc, "qa_cell_mapping_cc.xml")
|
kit-cel/gr-drm
|
python/qa_cell_mapping_cc.py
|
Python
|
gpl-3.0
| 1,593
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 17 17:19:10 2016
@author: Michael
"""
from PyQt5 import QtWidgets
class AboutWindow(QtWidgets.QTextEdit):
def __init__(self, parent=None):
super().__init__(parent)
self.setReadOnly(True)
self.setHtml(
"""
<h1 id="kano">Kano</h1>
<p>Copyright (c) 2017, Michael Schreier <br>
All rights reserved.</p>
<p>This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.</p>
<p>This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.</p>
<p>You should have received a copy of the GNU General Public License along with this program. If not, see <a href="http://www.gnu.org/licenses/">http://www.gnu.org/licenses/</a></p>
<hr>
<p>Kano has been built using the following libraries:</p>
<h3 id="entypo">Entypo+</h3>
<blockquote>
<p>All icons used by Kano are taken from the “Entypo+” library by Daniel Bruce, available under the Creative Commons license CC BY-SA 4.0.</p>
</blockquote>
<h3 id="pyqt5">PyQt5</h3>
<blockquote>
<p>Copyright (c) 2017, Riverbank Computing Limited <br>
All rights reserved.</p>
<p>This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by >the Free Software Foundation, either version 3 of the License, or (at your option) any later version.</p>
<p>This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.</p>
<p>You should have received a copy of the GNU General Public License along with this program. If not, see <a href="http://www.gnu.org/licenses/">http://www.gnu.org/licenses/</a></p>
</blockquote>
<h3 id="fuzzywuzzy">FuzzyWuzzy</h3>
<blockquote>
<p>Copyright (c) 2017, SeatGeak <br>
All rights reserved.</p>
<p>This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.</p>
<p>This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.</p>
<p>You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA</p>
</blockquote>
<h3 id="pyyaml">PyYAML</h3>
<blockquote>
<p>Copyright (c) 2006, Kirill Simonov <br>
All rights reserved.</p>
<p>Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:</p>
<p>The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.</p>
<p>THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.</p>
</blockquote>
"""
)
|
MichaelSchreier/Kano
|
class_UI_aboutWindow.py
|
Python
|
gpl-3.0
| 4,429
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time, sys, socket, os
import threading
import urllib2
import json
import Queue
import sqlite3
import electrum_doge as electrum
electrum.set_verbosity(False)
import ConfigParser
config = ConfigParser.ConfigParser()
config.read("merchant.conf")
my_password = config.get('main','password')
my_host = config.get('main','host')
my_port = config.getint('main','port')
database = config.get('sqlite3','database')
received_url = config.get('callback','received')
expired_url = config.get('callback','expired')
cb_password = config.get('callback','password')
wallet_path = config.get('electrum','wallet_path')
xpub = config.get('electrum','xpub')
pending_requests = {}
num = 0
def check_create_table(conn):
global num
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='electrum_payments';")
data = c.fetchall()
if not data:
c.execute("""CREATE TABLE electrum_payments (address VARCHAR(40), amount FLOAT, confirmations INT(8), received_at TIMESTAMP, expires_at TIMESTAMP, paid INT(1), processed INT(1));""")
conn.commit()
c.execute("SELECT Count(address) FROM 'electrum_payments'")
num = c.fetchone()[0]
print "num rows", num
def row_to_dict(x):
return {
'id':x[0],
'address':x[1],
'amount':x[2],
'confirmations':x[3],
'received_at':x[4],
'expires_at':x[5],
'paid':x[6],
'processed':x[7]
}
# this process detects when addresses have received payments
def on_wallet_update():
for addr, v in pending_requests.items():
h = wallet.history.get(addr, [])
requested_amount = v.get('requested')
requested_confs = v.get('confirmations')
value = 0
for tx_hash, tx_height in h:
tx = wallet.transactions.get(tx_hash)
if not tx: continue
if wallet.verifier.get_confirmations(tx_hash) < requested_confs: continue
for o in tx.outputs:
o_address, o_value = o
if o_address == addr:
value += o_value
s = (value)/1.e8
print "balance for %s:"%addr, s, requested_amount
if s>= requested_amount:
print "payment accepted", addr
out_queue.put( ('payment', addr))
stopping = False
def do_stop(password):
global stopping
if password != my_password:
return "wrong password"
stopping = True
return "ok"
def process_request(amount, confirmations, expires_in, password):
global num
if password != my_password:
return "wrong password"
try:
amount = float(amount)
confirmations = int(confirmations)
expires_in = float(expires_in)
except Exception:
return "incorrect parameters"
account = wallet.default_account()
addr = account.get_address(0, num)
num += 1
out_queue.put( ('request', (addr, amount, confirmations, expires_in) ))
return addr
def do_dump(password):
if password != my_password:
return "wrong password"
conn = sqlite3.connect(database);
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT oid, * FROM electrum_payments;")
data = cur.fetchall()
return map(row_to_dict, data)
def getrequest(oid, password):
oid = int(oid)
conn = sqlite3.connect(database);
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT oid, * FROM electrum_payments WHERE oid=%d;"%(oid))
data = cur.fetchone()
return row_to_dict(data)
def send_command(cmd, params):
import jsonrpclib
server = jsonrpclib.Server('http://%s:%d'%(my_host, my_port))
try:
f = getattr(server, cmd)
except socket.error:
print "Server not running"
return 1
try:
out = f(*params)
except socket.error:
print "Server not running"
return 1
print json.dumps(out, indent=4)
return 0
def db_thread():
conn = sqlite3.connect(database);
# create table if needed
check_create_table(conn)
while not stopping:
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT address, amount, confirmations FROM electrum_payments WHERE paid IS NULL;")
data = cur.fetchall()
# add pending requests to the wallet
for item in data:
addr, amount, confirmations = item
if addr in pending_requests:
continue
else:
with wallet.lock:
print "subscribing to %s"%addr
pending_requests[addr] = {'requested':float(amount), 'confirmations':int(confirmations)}
wallet.synchronizer.subscribe_to_addresses([addr])
wallet.up_to_date = False
try:
cmd, params = out_queue.get(True, 10)
except Queue.Empty:
cmd = ''
if cmd == 'payment':
addr = params
# set paid=1 for received payments
print "received payment from", addr
cur.execute("update electrum_payments set paid=1 where address='%s'"%addr)
elif cmd == 'request':
# add a new request to the table.
addr, amount, confs, minutes = params
sql = "INSERT INTO electrum_payments (address, amount, confirmations, received_at, expires_at, paid, processed)"\
+ " VALUES ('%s', %f, %d, datetime('now'), datetime('now', '+%d Minutes'), NULL, NULL);"%(addr, amount, confs, minutes)
print sql
cur.execute(sql)
# set paid=0 for expired requests
cur.execute("""UPDATE electrum_payments set paid=0 WHERE expires_at < CURRENT_TIMESTAMP and paid is NULL;""")
# do callback for addresses that received payment or expired
cur.execute("""SELECT oid, address, paid from electrum_payments WHERE paid is not NULL and processed is NULL;""")
data = cur.fetchall()
for item in data:
oid, address, paid = item
paid = bool(paid)
headers = {'content-type':'application/json'}
data_json = { 'address':address, 'password':cb_password, 'paid':paid }
data_json = json.dumps(data_json)
url = received_url if paid else expired_url
if not url:
continue
req = urllib2.Request(url, data_json, headers)
try:
response_stream = urllib2.urlopen(req)
print 'Got Response for %s' % address
cur.execute("UPDATE electrum_payments SET processed=1 WHERE oid=%d;"%(oid))
except urllib2.HTTPError:
print "cannot do callback", data_json
except ValueError, e:
print e
print "cannot do callback", data_json
conn.commit()
conn.close()
print "database closed"
if __name__ == '__main__':
if len(sys.argv) > 1:
cmd = sys.argv[1]
params = sys.argv[2:] + [my_password]
ret = send_command(cmd, params)
sys.exit(ret)
# start network
c = electrum.SimpleConfig({'wallet_path':wallet_path})
daemon_socket = electrum.daemon.get_daemon(c,True)
network = electrum.NetworkProxy(daemon_socket,config)
network.start()
# wait until connected
while network.is_connecting():
time.sleep(0.1)
if not network.is_connected():
print_msg("daemon is not connected")
sys.exit(1)
# create watching_only wallet
storage = electrum.WalletStorage(c)
if not storage.file_exists:
print "creating wallet file"
wallet = electrum.wallet.Wallet.from_xpub(xpub, storage)
else:
wallet = electrum.wallet.Wallet(storage)
wallet.synchronize = lambda: None # prevent address creation by the wallet
wallet.start_threads(network)
network.register_callback('updated', on_wallet_update)
threading.Thread(target=db_thread, args=()).start()
out_queue = Queue.Queue()
# server thread
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
server = SimpleJSONRPCServer(( my_host, my_port))
server.register_function(process_request, 'request')
server.register_function(do_dump, 'dump')
server.register_function(getrequest, 'getrequest')
server.register_function(do_stop, 'stop')
server.socket.settimeout(1)
while not stopping:
try:
server.handle_request()
except socket.timeout:
continue
|
electrumalt/electrum-doge
|
scripts/merchant/merchant.py
|
Python
|
gpl-3.0
| 9,336
|
# public class Solution {
# public char[][] updateBoard(char[][] board, int[] click) {
# int m = board.length, n = board[0].length;
# int row = click[0], col = click[1];
#
# if (board[row][col] == 'M') { // Mine
# board[row][col] = 'X';
# }
# else { // Empty
# // Get number of mines first.
# int count = 0;
# for (int i = -1; i < 2; i++) {
# for (int j = -1; j < 2; j++) {
# if (i == 0 && j == 0) continue;
# int r = row + i, c = col + j;
# if (r < 0 || r >= m || c < 0 || c >= n) continue;
# if (board[r][c] == 'M' || board[r][c] == 'X') count++;
# }
# }
#
# if (count > 0) { // If it is not a 'B', stop further DFS.
# board[row][col] = (char)(count + '0');
# }
# else { // Continue DFS to adjacent cells.
# board[row][col] = 'B';
# for (int i = -1; i < 2; i++) {
# for (int j = -1; j < 2; j++) {
# if (i == 0 && j == 0) continue;
# int r = row + i, c = col + j;
# if (r < 0 || r >= m || c < 0 || c >= n) continue;
# if (board[r][c] == 'E') updateBoard(board, new int[] {r, c});
# }
# }
# }
# }
#
# return board;
# }
# }
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
if not board:
return
m, n = len(board), len(board[0])
queue = collections.deque()
queue.append((click[0], click[1]))
valid_neighbours = lambda (i, j): 0<=i<m and 0<=j<n
while queue:
x, y = queue.pop()
if board[x][y] == 'M':
board[x][y] = 'X'
else:
# Filter out the valid neighbours
neighbours = filter(valid_neighbours, [(x-1, y), (x+1, y),
(x, y-1), (x, y+1), (x-1, y-1), (x+1, y-1), (x-1, y+1), (x+1, y+1)])
# Count the number of mines amongst the neighbours
mine_count = sum([board[i][j]=='M' for i, j in neighbours])
# If at least one neighbour is a potential mine, store the mine count.
if mine_count > 0:
board[x][y] = str(mine_count)
# If no neighbour is a mine, then add all unvisited neighbours
# to the queue for future processing
else:
board[x][y] = 'B'
queue.extend([(i, j) for (i, j) in neighbours if board[i][j]=='E'])
return board
|
sadad111/leetcodebox
|
Minesweeper.py
|
Python
|
gpl-3.0
| 2,887
|
#!coding: utf-8
import base64
import calendar
import os
import re
import paramiko
from io import StringIO
import hashlib
import threading
import time
import pyte
def ssh_key_string_to_obj(text):
key_f = StringIO(text)
key = None
try:
key = paramiko.RSAKey.from_private_key(key_f)
except paramiko.SSHException:
pass
try:
key = paramiko.DSSKey.from_private_key(key_f)
except paramiko.SSHException:
pass
return key
def ssh_pubkey_gen(private_key=None, username='jumpserver', hostname='localhost'):
if isinstance(private_key, str):
private_key = ssh_key_string_to_obj(private_key)
if not isinstance(private_key, (paramiko.RSAKey, paramiko.DSSKey)):
raise IOError('Invalid private key')
public_key = "%(key_type)s %(key_content)s %(username)s@%(hostname)s" % {
'key_type': private_key.get_name(),
'key_content': private_key.get_base64(),
'username': username,
'hostname': hostname,
}
return public_key
def ssh_key_gen(length=2048, type='rsa', password=None,
username='jumpserver', hostname=None):
"""Generate user ssh private and public key
Use paramiko RSAKey generate it.
:return private key str and public key str
"""
if hostname is None:
hostname = os.uname()[1]
f = StringIO()
try:
if type == 'rsa':
private_key_obj = paramiko.RSAKey.generate(length)
elif type == 'dsa':
private_key_obj = paramiko.DSSKey.generate(length)
else:
raise IOError('SSH private key must be `rsa` or `dsa`')
private_key_obj.write_private_key(f, password=password)
private_key = f.getvalue()
public_key = ssh_pubkey_gen(private_key_obj, username=username, hostname=hostname)
return private_key, public_key
except IOError:
raise IOError('These is error when generate ssh key.')
def content_md5(data):
"""计算data的MD5值,经过Base64编码并返回str类型。
返回值可以直接作为HTTP Content-Type头部的值
"""
if isinstance(data, str):
data = hashlib.md5(data.encode('utf-8'))
return base64.b64encode(data.digest())
_STRPTIME_LOCK = threading.Lock()
_GMT_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
_ISO8601_FORMAT = "%Y-%m-%dT%H:%M:%S.000Z"
def to_unixtime(time_string, format_string):
with _STRPTIME_LOCK:
return int(calendar.timegm(time.strptime(str(time_string), format_string)))
def http_date(timeval=None):
"""返回符合HTTP标准的GMT时间字符串,用strftime的格式表示就是"%a, %d %b %Y %H:%M:%S GMT"。
但不能使用strftime,因为strftime的结果是和locale相关的。
"""
return formatdate(timeval, usegmt=True)
def http_to_unixtime(time_string):
"""把HTTP Date格式的字符串转换为UNIX时间(自1970年1月1日UTC零点的秒数)。
HTTP Date形如 `Sat, 05 Dec 2015 11:10:29 GMT` 。
"""
return to_unixtime(time_string, _GMT_FORMAT)
def iso8601_to_unixtime(time_string):
"""把ISO8601时间字符串(形如,2012-02-24T06:07:48.000Z)转换为UNIX时间,精确到秒。"""
return to_unixtime(time_string, _ISO8601_FORMAT)
def make_signature(access_key_secret, date=None):
if isinstance(date, bytes):
date = date.decode("utf-8")
if isinstance(date, int):
date_gmt = http_date(date)
elif date is None:
date_gmt = http_date(int(time.time()))
else:
date_gmt = date
data = str(access_key_secret) + "\n" + date_gmt
return content_md5(data)
class TtyIOParser(object):
def __init__(self, width=80, height=24):
self.screen = pyte.Screen(width, height)
self.stream = pyte.ByteStream()
self.stream.attach(self.screen)
self.ps1_pattern = re.compile(r'^\[?.*@.*\]?[\$#]\s|mysql>\s')
def clean_ps1_etc(self, command):
return self.ps1_pattern.sub('', command)
def parse_output(self, data, sep='\n'):
"""
Parse user command output
:param data: output data list like, [b'data', b'data']
:param sep: line separator
:return: output unicode data
"""
output = []
for d in data:
self.stream.feed(d)
for line in self.screen.display:
if line.strip():
output.append(line)
self.screen.reset()
return sep.join(output[0:-1])
def parse_input(self, data):
"""
Parse user input command
:param data: input data list, like [b'data', b'data']
:return: command unicode
"""
command = []
for d in data:
self.stream.feed(d)
for line in self.screen.display:
line = line.strip()
if line:
command.append(line)
if command:
command = command[-1]
else:
command = ''
self.screen.reset()
command = self.clean_ps1_etc(command)
return command
def wrap_with_line_feed(s, before=0, after=1):
if isinstance(s, bytes):
return b'\r\n' * before + s + b'\r\n' * after
return '\r\n' * before + s + '\r\n' * after
def wrap_with_color(text, color='white', background=None,
bolder=False, underline=False):
bolder_ = '1'
underline_ = '4'
color_map = {
'black': '30',
'red': '31',
'green': '32',
'brown': '33',
'blue': '34',
'purple': '35',
'cyan': '36',
'white': '37',
}
background_map = {
'black': '40',
'red': '41',
'green': '42',
'brown': '43',
'blue': '44',
'purple': '45',
'cyan': '46',
'white': '47',
}
wrap_with = []
if bolder:
wrap_with.append(bolder_)
if underline:
wrap_with.append(underline_)
if background:
wrap_with.append(background_map.get(background, ''))
wrap_with.append(color_map.get(color, ''))
data = '\033[' + ';'.join(wrap_with) + 'm' + text + '\033[0m'
if isinstance(text, bytes):
return data.encode('utf-8')
return data
def wrap_with_warning(text, bolder=False):
return wrap_with_color(text, color='red', bolder=bolder)
def wrap_with_info(text, bolder=False):
return wrap_with_color(text, color='brown', bolder=bolder)
def wrap_with_primary(text, bolder=False):
return wrap_with_color(text, color='green', bolder=bolder)
def wrap_with_title(text):
return wrap_with_color(text, color='black', background='green')
|
ibuler/coco
|
coco/utils.py
|
Python
|
gpl-3.0
| 6,619
|
import os
def remove_fname_extension(fname):
return os.path.splitext(fname)[0]
def change_fname_extension(fname, extension):
return remove_fname_extension(fname) + '.' + extension
def concat(path, fname):
return path + '/' + fname
|
matt77hias/FileUtils
|
src/name.py
|
Python
|
gpl-3.0
| 249
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
"""
These are the views that control the spaces, meetings and documents.
"""
import datetime
import itertools
import hashlib
# Generic class-based views
from django.views.generic.base import TemplateView, RedirectView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.views.generic import FormView
# Decorators. the first is a wrapper to convert function-based decorators
# to method decorators that can be put in subclass methods.
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.contrib.admin.views.decorators import staff_member_required
# Response types
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404, redirect
# Some extras
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.template import RequestContext
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.db import connection
from django.core.mail import send_mail
# Function-based views
from django.views.generic.list_detail import object_list, object_detail
from django.views.generic.create_update import create_object, update_object
from django.views.generic.create_update import delete_object
# e-cidadania data models
from core.spaces.models import Space, Entity, Document, Event, Intent
from apps.ecidadania.news.models import Post
from core.spaces.forms import SpaceForm, DocForm, EventForm, \
EntityFormSet, UserRoleForm
from apps.ecidadania.proposals.models import Proposal, ProposalSet
from apps.ecidadania.staticpages.models import StaticPage
from apps.ecidadania.debate.models import Debate
from django.conf import settings
#thirdparty
from apps.thirdparty.userroles import roles
from apps.thirdparty.userroles.models import set_user_role
#
# RSS FEED
#
class SpaceFeed(Feed):
"""
Returns a space feed with the content of various applciations. In the future
this function must detect applications and returns their own feeds.
"""
def get_object(self, request, space_url):
current_space = get_object_or_404(Space, url=space_url)
return current_space
def title(self, obj):
return _("%s feed") % obj.name
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return _("All the recent activity in %s ") % obj.name
def items(self, obj):
results = itertools.chain(
Post.objects.all().filter(space=obj).order_by('-pub_date')[:10],
Proposal.objects.all().filter(space=obj).order_by('-pub_date')[:10],
Event.objects.all().filter(space=obj).order_by('-pub_date')[:10],
)
return sorted(results, key=lambda x: x.pub_date, reverse=True)
#
# INTENT VIEWS
#
@login_required
def add_intent(request, space_url):
"""
Returns a page where the logged in user can click on a "I want to
participate" button, which after sends an email to the administrator of
the space with a link to approve the user to use the space.
:attributes: space, intent, token
:rtype: Multiple entity objects.
:context: space_url, heading
"""
space = get_object_or_404(Space, url=space_url)
try:
intent = Intent.objects.get(user=request.user, space=space)
heading = _("Access has been already authorized")
except Intent.DoesNotExist:
token = hashlib.md5("%s%s%s" % (request.user, space,
datetime.datetime.now())).hexdigest()
intent = Intent(user=request.user, space=space, token=token)
intent.save()
subject = _("New participation request")
body = _("User {0} wants to participate in space {1}.\n \
Plese click on the link below to approve.\n {2}"\
.format(request.user.username, space.name, intent.get_approve_url()))
heading = _("Your request is being processed.")
send_mail(subject=subject, message=body,
from_email="noreply@ecidadania.org",
recipient_list=[space.author.email])
return render_to_response('space_intent.html', \
{'space_name': space.name, 'heading': heading}, \
context_instance=RequestContext(request))
class ValidateIntent(DetailView):
context_object_name = 'space_name'
template_name = 'spaces/validate_intent.html'
heading = _("The requested intent does not exist!")
def get_object(self):
space_object = get_object_or_404(Space, url=self.kwargs['space_url'])
if self.request.user.is_staff:
intent = get_object_or_404(Intent, token=self.kwargs['token'])
intent.user.profile.spaces.add(space_object)
self.heading = _("The user has been authorized to participate in space \"%s\"." % space_object.name)
messages.info(self.request, _("Authorization successful"))
self.template_name = 'validate_intent.html'
return space_object
def get_context_data(self, **kwargs):
context = super(ValidateIntent, self).get_context_data(**kwargs)
context['heading'] = self.heading
return context
#
# User roles.
#
@user_passes_test(lambda u: u.is_superuser)
def add_role(request):
"""
This function will allow the site admin to assign roles to the users.
"""
userrole_form = UserRoleForm(request.POST or None)
if request.method == 'POST':
if userrole_form.is_valid():
userrole_uncommitted = userrole_form.save(commit=False)
set_user_role(userrole_uncommitted.user, userrole_uncommitted.name)
return redirect('/spaces/')
else:
return render_to_response('spaces/space_roles.html', {'form':userrole_form}, context_instance = RequestContext(request))
else:
return render_to_response('spaces/space_roles.html', {'form':userrole_form}, context_instance = RequestContext(request))
# SPACE VIEWS
#
# Please take in mind that the create_space view can't be replaced by a CBV
# (class-based view) since it manipulates two forms at the same time. Apparently
# that creates some trouble in the django API. See this ticket:
# https://code.djangoproject.com/ticket/16256
@permission_required('spaces.add_space')
def create_space(request):
"""
Returns a SpaceForm form to fill with data to create a new space. There
is an attached EntityFormset to save the entities related to the space. Only
site administrators are allowed to create spaces.
:attributes: - space_form: empty SpaceForm instance
- entity_forms: empty EntityFormSet
:rtype: Space object, multiple entity objects.
:context: form, entityformset
"""
space_form = SpaceForm(request.POST or None, request.FILES or None)
entity_forms = EntityFormSet(request.POST or None, request.FILES or None,
queryset=Entity.objects.none())
if request.user.is_staff:
if request.method == 'POST':
if space_form.is_valid() and entity_forms.is_valid():
space_form_uncommited = space_form.save(commit=False)
space_form_uncommited.author = request.user
new_space = space_form_uncommited.save()
space = get_object_or_404(Space, name=space_form_uncommited.name)
ef_uncommited = entity_forms.save(commit=False)
for ef in ef_uncommited:
ef.space = space
ef.save()
# We add the created spaces to the user allowed spaces
request.user.profile.spaces.add(space)
#messages.success(request, _('Space %s created successfully.') % space.name)
return redirect('/spaces/' + space.url)
return render_to_response('spaces/space_form.html',
{'form': space_form,
'entityformset': entity_forms},
context_instance=RequestContext(request))
else:
return render_to_response('not_allowed.html',
context_instance=RequestContext(request))
class ViewSpaceIndex(DetailView):
"""
Returns the index page for a space. The access to spaces is restricted and
filtered in the get_object method. This view gathers information from all
the configured modules in the space.
:attributes: space_object, place
:rtype: Object
:context: get_place, entities, documents, proposals, publication
"""
context_object_name = 'get_place'
template_name = 'spaces/space_index.html'
def get_object(self):
space_url = self.kwargs['space_url']
space_object = get_object_or_404(Space, url=space_url)
if space_object.public == True or self.request.user.is_staff:
if self.request.user.is_anonymous():
messages.info(self.request, _("Hello anonymous user. Remember \
that this space is public to view, but \
you must <a href=\"/accounts/register\">register</a> \
or <a href=\"/accounts/login\">login</a> to participate."))
return space_object
if self.request.user.is_anonymous():
messages.info(self.request, _("You're an anonymous user. \
You must <a href=\"/accounts/register\">register</a> \
or <a href=\"/accounts/login\">login</a> to access here."))
self.template_name = 'not_allowed.html'
return space_object
for i in self.request.user.profile.spaces.all():
if i.url == space_url:
return space_object
messages.warning(self.request, _("You're not registered to this space."))
self.template_name = 'not_allowed.html'
return space_object
# Get extra context data
def get_context_data(self, **kwargs):
context = super(ViewSpaceIndex, self).get_context_data(**kwargs)
place = get_object_or_404(Space, url=self.kwargs['space_url'])
context['entities'] = Entity.objects.filter(space=place.id)
context['documents'] = Document.objects.filter(space=place.id)
context['proposalsets'] = ProposalSet.objects.filter(space=place.id)
context['proposals'] = Proposal.objects.filter(space=place.id).order_by('-pub_date')
context['publication'] = Post.objects.filter(space=place.id).order_by('-pub_date')[:10]
context['page'] = StaticPage.objects.filter(show_footer=True).order_by('-order')
context['messages'] = messages.get_messages(self.request)
context['debates'] = Debate.objects.filter(space=place.id).order_by('-date')
context['event'] = Event.objects.filter(space=place.id).order_by('-event_date')
return context
# Please take in mind that the edit_space view can't be replaced by a CBV
# (class-based view) since it manipulates two forms at the same time. Apparently
# that creates some trouble in the django API. See this ticket:
# https://code.djangoproject.com/ticket/16256
@permission_required('spaces.edit_space')
def edit_space(request, space_url):
"""
Returns a form filled with the current space data to edit. Access to
this view is restricted only to site and space administrators. The filter
for space administrators is given by the edit_space permission and their
belonging to that space.
:attributes: - place: current space intance.
- form: SpaceForm instance.
- form_uncommited: form instance before commiting to the DB,
so we can modify the data.
:param space_url: Space URL
:rtype: HTML Form
:context: form, get_place
"""
place = get_object_or_404(Space, url=space_url)
form = SpaceForm(request.POST or None, request.FILES or None, instance=place)
entity_forms = EntityFormSet(request.POST or None, request.FILES or None,
queryset=Entity.objects.all().filter(space=place))
if request.method == 'POST':
if form.is_valid() and entity_forms.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.author = request.user
new_space = form_uncommited.save()
space = get_object_or_404(Space, name=form_uncommited.name)
ef_uncommited = entity_forms.save(commit=False)
for ef in ef_uncommited:
ef.space = space
ef.save()
messages.success(request, _('Space edited successfully'))
return redirect('/spaces/' + space.url + '/')
for i in request.user.profile.spaces.all():
if i.url == space_url or request.user.is_staff:
return render_to_response('spaces/space_form.html',
{'form': form, 'get_place': place,
'entityformset': entity_forms},
context_instance=RequestContext(request))
return render_to_response('not_allowed.html', context_instance=RequestContext(request))
class DeleteSpace(DeleteView):
"""
Returns a confirmation page before deleting the space object completely.
This does not delete the space related content. Only the site administrators
can delete a space.
:rtype: Confirmation
"""
context_object_name = 'get_place'
success_url = '/'
@method_decorator(permission_required('spaces.delete_space'))
def dispatch(self, *args, **kwargs):
return super(DeleteSpace, self).dispatch(*args, **kwargs)
def get_object(self):
return get_object_or_404(Space, url = self.kwargs['space_url'])
class GoToSpace(RedirectView):
"""
Sends the user to the selected space. This view only accepts GET petitions.
GoToSpace is a django generic :class:`RedirectView`.
:Attributes: **self.place** - Selected space object
:rtype: Redirect
"""
def get_redirect_url(self, **kwargs):
self.place = get_object_or_404(Space, name = self.request.GET['spaces'])
return '/spaces/%s' % self.place.url
class ListSpaces(ListView):
"""
Return a list of spaces in the system (except private ones) using a generic view.
The users associated to a private spaces will see it, but not the other private
spaces. ListSpaces is a django generic :class:`ListView`.
:rtype: Object list
:contexts: object_list
"""
paginate_by = 10
def get_queryset(self):
public_spaces = Space.objects.all().filter(public=True)
if not self.request.user.is_anonymous():
user_spaces = self.request.user.profile.spaces.all()
return public_spaces | user_spaces
return public_spaces
#
# DOCUMENTS VIEWS
#
class AddDocument(FormView):
"""
Upload a new document and attach it to the current space.
:rtype: Object
:context: form, get_place
"""
form_class = DocForm
template_name = 'spaces/document_form.html'
def get_success_url(self):
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
return '/spaces/' + self.space.url
def form_valid(self, form):
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
form_uncommited = form.save(commit=False)
form_uncommited.space = self.space
form_uncommited.author = self.request.user
form_uncommited.save()
#print form.cleaned_data
return super(AddDocument, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(AddDocument, self).get_context_data(**kwargs)
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
context['get_place'] = self.space
return context
@method_decorator(permission_required('spaces.add_document'))
def dispatch(self, *args, **kwargs):
return super(AddDocument, self).dispatch(*args, **kwargs)
class EditDocument(UpdateView):
"""
Returns a DocForm filled with the current document data.
:rtype: HTML Form
:context: doc, get_place
"""
model = Document
template_name = 'spaces/document_form.html'
def get_success_url(self):
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
return '/spaces/' + self.space.name
def get_object(self):
cur_doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])
return cur_doc
def get_context_data(self, **kwargs):
context = super(EditDocument, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
@method_decorator(permission_required('spaces.edit_document'))
def dispatch(self, *args, **kwargs):
return super(EditDocument, self).dispatch(*args, **kwargs)
class DeleteDocument(DeleteView):
"""
Returns a confirmation page before deleting the current document.
:rtype: Confirmation
:context: get_place
"""
def get_object(self):
return get_object_or_404(Document, pk = self.kwargs['doc_id'])
def get_success_url(self):
current_space = self.kwargs['space_url']
return '/spaces/{0}'.format(current_space)
def get_context_data(self, **kwargs):
context = super(DeleteDocument, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
class ListDocs(ListView):
"""
Returns a list of documents attached to the current space.
:rtype: Object list
:context: object_list, get_place
"""
paginate_by = 25
context_object_name = 'document_list'
def get_queryset(self):
place = get_object_or_404(Space, url=self.kwargs['space_url'])
objects = Document.objects.all().filter(space=place.id).order_by('pub_date')
if self.request.user.is_staff:
return objects
if self.request.user.is_anonymous():
self.template_name = 'not_allowed.html'
return objects
for i in self.request.user.profile.spaces.all():
if i.url == place:
return objects
self.template_name = 'not_allowed.html'
return objects
def get_context_data(self, **kwargs):
context = super(ListDocs, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
#
# EVENT VIEWS
#
class AddEvent(FormView):
"""
Returns an empty MeetingForm to create a new Meeting. Space and author fields
are automatically filled with the request data.
:rtype: HTML Form
:context: form, get_place
"""
form_class = EventForm
template_name = 'spaces/event_form.html'
def get_success_url(self):
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
return '/spaces/' + self.space.url + '/'
def form_valid(self, form):
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
form_uncommited = form.save(commit=False)
form_uncommited.event_author = self.request.user
form_uncommited.space = self.space
form_uncommited.save()
return super(AddEvent, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(AddEvent, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
class ViewEvent(DetailView):
"""
View the content of a event.
:rtype: Object
:context: event, get_place
"""
context_object_name = 'event'
template_name = 'spaces/event_detail.html'
def get_object(self):
space_url = self.kwargs['space_url']
if self.request.user.is_anonymous():
self.template_name = 'not_allowed.html'
return get_object_or_404(Space, url = space_url)
return get_object_or_404(Event, pk = self.kwargs['event_id'])
def get_context_data(self, **kwargs):
context = super(ViewEvent, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
class EditEvent(UpdateView):
"""
Returns a MeetingForm filled with the current Meeting data to be edited.
:rtype: HTML Form
:context: event, get_place
"""
model = Event
template_name = 'spaces/event_form.html'
def get_object(self):
cur_event = get_object_or_404(Event, pk=self.kwargs['event_id'])
return cur_event
def get_success_url(self):
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
return '/spaces/' + self.space.name
def get_context_data(self, **kwargs):
context = super(EditEvent, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
@method_decorator(permission_required('spaces.edit_event'))
def dispatch(self, *args, **kwargs):
return super(EditEvent, self).dispatch(*args, **kwargs)
class DeleteEvent(DeleteView):
"""
Returns a confirmation page before deleting the Meeting object.
:rtype: Confirmation
:context: get_place
"""
def get_object(self):
return get_object_or_404(Event, pk = self.kwargs['event_id'])
def get_success_url(self):
current_space = self.kwargs['space_url']
return '/spaces/{0}'.format(current_space)
def get_context_data(self, **kwargs):
context = super(DeleteEvent, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
class ListEvents(ListView):
"""
List all the events attached to a space.
:rtype: Object list
:context: event_list, get_place
"""
paginate_by = 25
context_object_name = 'event_list'
def get_queryset(self):
place = get_object_or_404(Space, url=self.kwargs['space_url'])
objects = Event.objects.all().filter(space=place.id).order_by\
('event_date')
return objects
def get_context_data(self, **kwargs):
context = super(ListEvents, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
return context
#
# NEWS RELATED
#
class ListPosts(ListView):
"""
Returns a list with all the posts attached to that space. It's similar to
an archive, but without classification or filtering.
:rtype: Object list
:context: post_list
"""
paginate_by = 10
context_object_name = 'post_list'
template_name = 'news/news_list.html'
def get_queryset(self):
place = get_object_or_404(Space, url=self.kwargs['space_url'])
if settings.DEBUG:
messages.set_level(self.request, messages.DEBUG)
messages.debug(self.request, "Succesful query.")
return Post.objects.all().filter(space=place).order_by('-pub_date')
def get_context_data(self, **kwargs):
context = super(ListPosts, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])
context['messages'] = messages.get_messages(self.request)
return context
|
bithinalangot/ecidadania-dev
|
src/core/spaces/views.py
|
Python
|
gpl-3.0
| 25,152
|
import time
import numpy as np
"""
Module for prime related computations
Includes:
Infinite prime iterator
Thresholded prime iterator
Primality test
Prime Factorization
Divisor Computation
Divisor Cardinality
"""
class EratosthenesSieve(object):
"""
Dynamic Sieve of Eratosthenes for prime related computations
Attributes:
threshold: current size of the sieve
primes: list with known prime numbers
table: sieve table. 0:Composite 1:Prime
rate: rate of expansion of the sieve table
lastnumber: last checked number in the sieve
"""
def __init__(self,init_thr=1000,rate=10,sqrt_thr=False):
"""
Dynamic Eratosthenes Sieve for prime generation and factorization
Args:
init_thr: initial size of the sieve table
rate: rate of expansion of the sieve table
sqrt_thr: if True square root of init_thr is used instead
"""
if not sqrt_thr:
self.threshold = int(init_thr)
else:
self.threshold = int(init_thr**0.5)+1
self.primes = []
self.table = np.ones(self.threshold,dtype=np.int64)
self.table[[0,1]] = 0
self.rate=rate
self.lastnumber=1
def _resize_table(self):
"""
Create a new table for the size self.rate times bigger
the previous one
"""
self.threshold *= self.rate
self.table = np.ones(self.threshold,dtype=np.int64)
for p in self.primes:
self.table[p*p:self.threshold:p] = 0
def _next_prime(self,limit=0):
"""
Computes the next prime greater than the limit
Resizes the sieve table as needed
Args:
limit: stopping criteria for prime generation
Returns:
The next prime larger than the limit
"""
while(True):
for p in range(self.lastnumber,self.threshold):
if self.table[p] == 1:
self.primes.append(p)
self.lastnumber = p+1
self.table[p*p:self.threshold:p] = 0
if p > limit:
return p
else:
self.lastnumber = self.threshold
self._resize_table()
def __next__(self,limit=0):
return self._next_prime()
def __iter__(self):
return self
def below(self,threshold):
"""
Iterator of primes p < threshold
Args:
threshold: stopping criteria for prime generation
Returns:
generator object for primes p < threshold
"""
threshold = int(threshold)
if threshold > self.lastnumber:
self._next_prime(threshold)
for p in self.primes[:-1]:
yield p
def is_prime(self,num):
"""
Primality check for an integer number
Args:
num: number to be checked
Returns:
True if prime, False otherwise
"""
if num < self.lastnumber:
return self.table[num] == 1
for p in self.below(int(num**0.5)+1):
if num % p == 0:
return False
else:
return True
def factor(self,num):
"""
Prime factorization of num
Args:
num: number to be factored
Returns:
list of tuples (prime,exponent)
"""
num = int(num)
F = []
max_factor = int(num**0.5+1)
for p in self.below(max_factor):
if num > 1:
i=0
while num%p == 0:
i+=1
num //= p
if i > 0:
F.append((p,i))
else:
break
if num > 1:
F += [(num,1)]
return F
def num_divisors(self,num):
"""
Cardinality of the divisor set of num
Does not compute the divisors so it is faster
Args:
num: number from which we get the divisors
Returns:
integer representing the number of divisors
"""
d = self.factor(int(num))
return np.product([ b+1 for a,b in d])
def divisors(self,n):
"""
Iterator over the divisors of num
Args:
num: number whose divisors we want to computes
Returns:
generator object over the divisors
"""
def divisorsRecurrence(L):
if L == []:
yield 1
else:
(a,b) = L[0]
for d in divisorsRecurrence(L[1:]):
acc = 1
for i in range(b+1):
yield d*acc
acc *= a
F = self.factor(n)
return divisorsRecurrence(F)
def sum_proper_divisors(self,num):
"""
Sum of the proper divisor set of num
Does not compute the divisors since it uses
the multiplicativity property of the sigma
function
Args:
num: number from which we get the divisors
Returns:
integer representing the number of divisors
"""
factors = self.factor(int(num))
div_sum = np.prod([ (p**(e+1)-1) // (p-1) for p,e in factors ])
return div_sum-num
def primes_below(limit):
limit = int(limit)
primes = np.ones(limit,dtype=np.int64)
primes[[0,1]] = 0
for (i, isprime) in enumerate(primes):
if isprime == 1:
yield i
primes[i*i:limit:i] = 0
|
JJGO/ProjectEuler
|
euler/primes.py
|
Python
|
gpl-3.0
| 5,689
|
from collections import namedtuple
from uuid import uuid4
from GEMEditor.model.classes.annotation import Annotation
class Reference:
""" ReferenceItem contains the information a pubmed or similar literature reference
Authors are saved as author instances """
def __init__(self, id=None, pmid="", pmc="", doi="", url="",
authors=None, year="", title="", journal="", abstract=""):
super(Reference, self).__init__()
self._linked_items = set()
self.id = id or str(uuid4())
self.pmid = pmid
self.pmc = pmc
self.doi = doi
self.url = url
if authors is None:
self.authors = []
else:
self.authors = authors
self.year = year
self.title = title
self.journal = journal
self.abstract = abstract
@property
def linked_items(self):
return self._linked_items.copy()
@property
def annotation(self):
result = set()
if self.pmid:
result.add(Annotation("pubmed", self.pmid))
if self.pmc:
result.add(Annotation("pmc", self.pmc))
if self.doi:
result.add(Annotation("doi", self.doi))
return result
def add_link(self, item, reciprocal=True):
""" Remove reference link from item
All items that inherit from this class
should be able to link to each other.
Parameters
----------
reference: GEMEditor.model.classes.base.ReferenceLink
reciprocal: bool
"""
self._linked_items.add(item)
if reciprocal:
item.add_reference(self, reciprocal=False)
def remove_link(self, item, reciprocal=True):
""" Remove reference link from item
Parameters
----------
item: GEMEditor.model.classes.base.ReferenceLink
reciprocal: bool
"""
self._linked_items.discard(item)
if reciprocal:
item.remove_reference(self, reciprocal=False)
def remove_all_links(self):
""" Remove all reference links """
for item in self.linked_items:
self.remove_link(item, reciprocal=True)
def reference_string(self):
""" Get the authors part of the usual citation of scientific literature i.e.:
Lastname F et al., YYYY if there are more than 2 authors
Lastname1 F1 and Lastname2 F2, YYYY if there are 2 authors
Lastname F, YYYY if there is only one author
Input tuple with (lastname, firstname, initials)
"""
# If there are more than 2 authors return a string
if len(self.authors) > 2:
return "{0} et al., {1}".format(self.authors[0].display_str, self.year)
elif len(self.authors) == 2:
return "{0} and {1}, {2}".format(self.authors[0].display_str,
self.authors[1].display_str,
self.year)
elif self.authors:
return "{0}, {1}".format(self.authors[0].display_str, self.year)
else:
return ""
def __str__(self):
id_strings = []
for attrib in ("pmid", "pmc", "doi"):
if getattr(self, attrib):
id_strings.append("{0}: {1}".format(attrib.upper(), getattr(self, attrib)))
return "ID: {id}\n" \
"Authors: {authors}\n" \
"Title: {title}\n" \
"{id_strings}".format(id=self.id,
authors=self.reference_string(),
title=self.title,
id_strings="; ".join(id_strings))
class Author(namedtuple("Author", ["lastname", "firstname", "initials"])):
__slots__ = ()
def __new__(cls, lastname="", firstname="", initials=""):
self = super(Author, cls).__new__(cls,
lastname=lastname,
firstname=firstname,
initials=initials)
return self
@property
def display_str(self):
if self.initials:
return "{0} {1}".format(self.lastname, self.initials)
else:
return self.lastname
|
JuBra/GEMEditor
|
GEMEditor/model/classes/reference.py
|
Python
|
gpl-3.0
| 4,310
|
import unittest
from datetime import datetime
import tempfile
import os
from due.agent import Agent
from due.episode import Episode
from due.event import Event
from due.persistence import serialize, deserialize
from due.models.tfidf import TfIdfAgent
from due.models.dummy import DummyAgent
class TestTfIdfAgent(unittest.TestCase):
def test_save_load(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
saved_agent = agent.save()
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, 'serialized_tfidf_agent.due')
serialize(saved_agent, path)
loaded_agent = Agent.load(deserialize(path))
assert agent.parameters == loaded_agent.parameters
assert agent._normalized_past_utterances == loaded_agent._normalized_past_utterances
assert [e.save() for e in loaded_agent._past_episodes] == [e.save() for e in agent._past_episodes]
expected_utterance = agent._process_utterance('aaa bbb ccc mario')
loaded_utterance = loaded_agent._process_utterance('aaa bbb ccc mario')
assert (agent._vectorizer.transform([expected_utterance]) != loaded_agent._vectorizer.transform([loaded_utterance])).nnz == 0
assert (agent._vectorized_past_utterances != loaded_agent._vectorized_past_utterances).nnz == 0
assert agent.utterance_callback(_get_test_episode())[0].payload, loaded_agent.utterance_callback(_get_test_episode())[0].payload
def test_utterance_callback(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
result = agent.utterance_callback(_get_test_episode())
self.assertEqual(result[0].payload, 'bbb')
def test_tfidf_agent(self):
cb = TfIdfAgent()
# Learn sample episode
sample_episode, alice, bob = _sample_episode()
cb.learn_episodes([sample_episode])
# Predict answer
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def test_agent_load(self):
sample_episode, alice, bob = _sample_episode()
cb = TfIdfAgent()
cb.learn_episodes([sample_episode])
test_dir = tempfile.mkdtemp()
test_path = os.path.join(test_dir, 'test_agent_load.pkl')
serialize(cb.save(), test_path)
loaded_cb = Agent.load(deserialize(test_path))
self.assertIsInstance(loaded_cb, TfIdfAgent)
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = loaded_cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def _get_train_episodes():
result = []
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'bbb'),
Event(Event.Type.Utterance, datetime.now(), 'a', 'ccc'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'ddd')
]
result.append(e)
e = Episode('1', '2')
e.events = [
Event(Event.Type.Utterance, datetime.now(), '1', '111'),
Event(Event.Type.Utterance, datetime.now(), '2', '222'),
Event(Event.Type.Utterance, datetime.now(), '1', '333'),
Event(Event.Type.Utterance, datetime.now(), '2', '444')
]
result.append(e)
return result
def _get_test_episode():
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
]
return e
def _sample_episode():
alice = DummyAgent('alice')
bob = DummyAgent('bob')
result = alice.start_episode(bob)
alice.say("Hi!", result)
bob.say("Hello", result)
alice.say("How are you?", result)
bob.say("Good thanks, and you?", result)
alice.say("All good", result)
return result, alice, bob
|
dario-chiappetta/Due
|
due/models/test_tfidf.py
|
Python
|
gpl-3.0
| 3,631
|
import numpy as np
import warnings
import subprocess
import pogoFunctions as pF
import pdb
from PolyInterface import poly
class PogoInput:
def __init__(self,
fileName,
elementTypes,
signals,
historyMeasurement,
nodes = None,
elements = None,
geometryFile = None,
precision=8,
targetMeshSize = 5e-5,
nDims=2,
nDofPerNode = None,
notes = None,
runName = 'pogoJob',
nt = 100,
dt = 1e-8,
elementTypeRefs = None,
materialTypeRefs = None,
orientationRefs = None,
elementParameters = None,
materials = [[0,7e10,0.34,2700],],
orientations = None,
boundaryConditions = None,
historyMeasurementFrequency = 20,
fieldStoreIncrements = None,
folderIn = None,
totalForce = False,
version = 1.03,
writeFile = True):
self.fileName = fileName
### Header
self.header = np.array(['']*20, dtype='str')
if version not in [1.03, 1.04]:
raise ValueError('Input file version must be 1.03 or 1.04.')
headerString = '%pogo-inp{}'.format(version)
for c1 in range(0, len(headerString)):
self.header[c1] = headerString[c1]
### Precision
if precision not in [4,8]:
raise ValueError('Precision must be 4 or 8.')
self.precision = np.array([precision,],dtype='int32')
self.nDims = np.array([nDims,],dtype='int32')
### Number of degrees of freedom per node
if nDofPerNode == None:
nDofPerNode = self.nDims
if nDofPerNode not in [1,2,3]:
raise ValueError('Number of degrees of freedom must be 1, 2 or 3')
self.nDofPerNode = np.array([nDofPerNode,],dtype='int32')
### Set notes
self.notes = np.array(['']*1024, dtype='str')
if notes != None:
if len(notes) > 1024:
notes = notes[:1024]
for character in range(len(notes)):
self.notes[character] = notes[character]
### Set runname
self.runName = np.array(['']*80, dtype='str')
if len(runName) > 80:
runName = runName[:80]
for character in range(0, len(runName)):
self.runName[character] = runName[character]
### Set time step and run time
self.nt = np.array([nt,],dtype='int32')
self.dt = np.array([dt,],dtype=self.getPrecString())
### Node generation if necessary
if not np.any(nodes) and not geometryFile:
raise ValueError('Either a poly file or node/element definitions are required')
elif geometryFile and targetMeshSize and not np.any(elements) and not np.any(nodes):
if geometryFile.split('.')[-1] == 'dxf':
print 'Creating poly file from {}'.format(geometryFile)
poly.poly(geometryFile,elementSize = targetMeshSize,writeFile=True)
if geometryFile.split('.')[-1] == 'poly':
geometryFile = geometryFile[:-5]
if self.nDims == 2:
targetMeshArea = targetMeshSize*targetMeshSize
subprocess.call('triangle -q -j -a{:.12}F {}.poly'.format(targetMeshArea,geometryFile))
elif self.nDims == 3:
targetMeshVolume = targetMeshSize*targetMeshSize*targetMeshSize
### Add cwd
subprocess.call('tetgen {:.12}F {}.poly'.format(targetMeshVolume,geometryFile))
nodes = pF.loadNodeFile(geometryFile+'.1.node')
elements = pF.loadElementFile(geometryFile+'.1.ele')
### Number of nodes and node positions
if np.shape(nodes)[0] != nDims:
raise ValueError('nodes must be in shape (nDims, nNodes).')
self.nNodes = np.array([np.shape(nodes)[1],],dtype = 'int32')
self.nodes = nodes.astype(self.getPrecString()).T
### Number of elements and nodes per element
self.nElements = np.array([np.shape(elements)[1],],dtype='int32')
self.nNodesPerElement = np.array([np.shape(elements)[0],],dtype='int32')
### Element type refs
if elementTypeRefs == None:
elementTypeRefs = np.zeros(self.nElements)
if len(elementTypeRefs) != self.nElements:
raise ValueError('elementTypeRefs must be of length nElements.')
#if min(elementTypeRefs) != 0:
# raise ValueError('elementTypeRefs must be 1 indexed.')
self.elementTypeRefs = elementTypeRefs.astype('int32')# - 1
### Material type refs
if materialTypeRefs == None:
materialTypeRefs = np.zeros(self.nElements)
if len(materialTypeRefs) != self.nElements:
raise ValueError('materialTypeRefs must be of length nElements.')
#if min(materialTypeRefs) != 1:
# raise ValueError('materialTypeRefs must be 1 indexed.')
self.materialTypeRefs = materialTypeRefs.astype('int32') #- 1
### Element orientations
if orientationRefs == None:
orientationRefs = np.zeros(self.nElements,dtype = 'int32')
if len(orientationRefs)!= self.nElements:
raise ValueError('orientationRefs must be of length nElements.')
if min(elementTypeRefs) < 0: #unused values are set to 0 so -1 in zero indexing
raise ValueError('orientationRefs must be 1 indexed.')
self.orientationRefs = orientationRefs.astype('int32')# - 1
### Elements
if np.max(elements) > self.nNodes:
raise ValueError('elements points to nodes which are greater than nNodes.')
if np.min(elements) < 0:
raise ValueError('elements must be 1 indexed.')
self.elements = elements.astype('int32') - 1 #convert to zero indexing
self.elements = self.elements.T
### PML sets
self.nPmlSets = np.array([0,],dtype = 'int32')
self.pmlParams = np.array([0,],dtype = 'int32')
### Element types
self.nElementTypes = np.array([len(elementTypes),],dtype = 'int32')
if elementParameters == None:
elementParameters = np.array([0,]*len(elementTypes), dtype = 'int32')
if np.max(self.elementTypeRefs) > self.nElementTypes - 1:
raise ValueError('elementTypeRefs points to element types greater than the number of types of element.')
self.elementTypes = []
for ii,elementType in enumerate(elementTypes):
self.elementTypes.append(ElementType(elementType,elementParameters[ii],self.getPrecString()))
### Material types
self.nMaterials = np.array([len(materials),], dtype = 'int32')
self.materials = []
for material in materials:
self.materials.append(Material(material,self.getPrecString()))
### Orientations
if orientations == None:
self.nOr = np.array([0,],dtype ='int32')
self.orientations = None
else:
self.orientations = []
self.nOr = np.array([len(orientations),],dtype = 'int32')
for orientation in orientations:
self.orientations.append(Orientation(orientation,self.getPrecString()))
### Boundary conditions
if boundaryConditions == None:
self.nFixDof = np.array([0,],dtype ='int32')
self.boundaryConditions = None
else:
nSets = len(boundaryConditions) / 2
self.nFixDof = np.array([sum([len(boundaryConditions[c1*2]) for c1 in range(nSets)]),],dtype = 'int32')
self.boundaryConditions = []
for c1 in range(0,nSets):
#self.boundaryConditions.append(BoundaryCondition(boundaryConditions[c1]))
self.boundaryConditions.append(np.array([(boundaryConditions[c1*2]-1)*4 + boundaryConditions[c1*2+1]-1,],dtype='int32'))
### Input signals
self.nInputSignals = np.array([len(signals),],dtype = 'int32')
self.signals = []
for signal in signals:
self.signals.append(Signal(signal,totalForce,self.getPrecString(),dt))
### History measurements
if historyMeasurement == None:
warnings.warn('Warning : No history measurements requested.')
self.nMeas = 0
self.historyMeasurement = 0
else:
self.nMeas = np.array([len(historyMeasurement),],dtype = 'int32')
self.historyMeasurement = HistoryMeasurement(historyMeasurement,historyMeasurementFrequency)
### Field measurements
if fieldStoreIncrements == None:
self.nFieldStore = np.array([0,],dtype='int32')
self.fieldStoreIncrements = np.array([0,],dtype ='int32')
else:
self.nFieldStore = np.array([len(fieldStoreIncrements),],dtype = 'int32')
if np.max(fieldStoreIncrements) > nt or np.min(fieldStoreIncrements) < 1:
raise ValueError('fieldStoreIncrements out of range [1, nt].')
self.fieldStoreIncrements = np.array([fieldStoreIncrements-1,],dtype = 'int32')
### Write to file
if writeFile:
self.writeFile()
def getPrecString(self):
precString = 'float64'
if self.precision == 4:
self.precString = 'float32'
return precString
def writeFile(self):
with open(self.fileName + '.pogo-inp','wb') as f:
self.header.tofile(f)
self.precision.tofile(f)
self.nDims.tofile(f)
self.nDofPerNode.tofile(f)
self.notes.tofile(f)
self.runName.tofile(f)
self.nt.tofile(f)
self.dt.tofile(f)
self.nNodes.tofile(f)
self.nodes.tofile(f)
self.nElements.tofile(f)
self.nNodesPerElement.tofile(f)
self.elementTypeRefs.tofile(f)
self.materialTypeRefs.tofile(f)
self.orientationRefs.tofile(f)
self.elements.tofile(f)
self.nPmlSets.tofile(f)
self.pmlParams.tofile(f)
self.nElementTypes.tofile(f)
for elementType in self.elementTypes:
elementType.writeElementType(f)
self.nMaterials.tofile(f)
for material in self.materials:
material.writeMaterial(f)
self.nOr.tofile(f)
if not self.orientations == None:
for orientation in self.orientations:
orientation.writeOrientation(f)
self.nFixDof.tofile(f)
if not self.boundaryConditions == None:
for bc in self.boundaryConditions:
bc.tofile(f)
self.nInputSignals.tofile(f)
self.signals[0].nt.tofile(f)
self.signals[0].dt.tofile(f)
for signal in self.signals:
signal.writeSignal(f)
if self.nMeas>0:
self.historyMeasurement.writeHistory(f)
else:
np.array([0,], dtype='int32').tofile(f)
np.array([0,], dtype='int32').tofile(f)
self.nFieldStore.tofile(f)
self.fieldStoreIncrements.tofile(f)
class Material:
def __init__(self,materialInfo,precString):
self.matType = np.array([materialInfo[0],],dtype='int32')
self.matProps = np.array([materialInfo[1:],],dtype=precString)
self.nMatParams = np.array([len(materialInfo[1:]),],dtype='int32')
def writeMaterial(self,fileId):
self.matType.tofile(fileId)
self.nMatParams.tofile(fileId)
self.matProps.tofile(fileId)
class ElementType:
def __init__(self,elementType,elementParams,precString):
self.elTypeSave = np.array(['']*20,dtype='str')
for character in range(len(elementType)):
self.elTypeSave[character] = elementType[character]
if elementParams:
self.nParams = np.array([len(elementParameters),],dtype='int32')
self.params = np.array(elementParams,dtype = precString)
else:
self.params = np.array([0,],dtype='int32')
self.nParams = np.array([0,],dtype='int32')
def writeElementType(self,fileId):
self.elTypeSave.tofile(fileId)
self.nParams.tofile(fileId)
self.params.tofile(fileId)
class Orientation:
def __init__(self,orInfo,precString):
self.paramType = np.array([orInfo[0],], dtype='int32')
self.nOrParams = np.array([len(orInfo[1:]),],dtype='int32')
self.paramValues = np.array([orInfo[1:],],dtype = precString)
def writeOrientation(self,fileId):
self.paramType.tofile(fileId)
self.nOrParams.tofile(fileId)
self.paramValues.tofile(fileId)
class BoundaryCondition:
def __init__(self,BCs):
self.nodes = np.array(BCs[0])
self.dof = np.array(BCs[1])
def writeBoundaryCondition(self,fileId):
dofOut = np.array([(self.nodes-1)*4 + self.dof-1,],dtype='int32')
dofOut.tofile(fileId)
class HistoryMeasurement:
nodes = np.array([],dtype='int32')
dofs = np.array([],dtype='int32')
def __init__(self,histInfo,frequency):
###Add Input checking
for history in histInfo:
self.nodes = np.hstack((self.nodes,history[0]))
self.dofs = np.hstack((self.dofs,history[1]))
self.frequency = np.array([frequency,],dtype = 'int32')
self.nMeas = np.array([len(self.nodes),],dtype = 'int32')
###Must Add Version 1.04 support
def writeHistory(self,fileId):
self.nMeas.tofile(fileId)
self.frequency.tofile(fileId)
pdb.set_trace()
outHist = (self.nodes*4 + self.dofs).astype('int32')# - 1
outHist.tofile(fileId)
class FieldMeasurement:
def __init__(self,increments=0):
###Add input checking
self.increments = np.array([increments - 1],dtype='int32')
class Signal:
def __init__(self, signalInfo, totalForce, precString,dt):
if signalInfo:
nNodes = len(signalInfo[0])
self.type = np.array([signalInfo[3],],dtype = 'int32')
# if len(np.unique(signalInfo[0])) != nNodes:
# errStr = 'Duplicate nodes cannot be specified for a signal'
# raise ValueError(errStr)
if np.size(signalInfo[1]) != 1 and len(signalInfo[1]) != nNodes:
raise ValueError('Signal amplitude must be a scalar or a vector of amplitudes for each node signal applied to.')
if signalInfo[3] not in [0,1]:
raise ValueError('Signal type for signal {} must be 0 or 1.'.format(ii))
self.nNodes = np.array([len(signalInfo[0]),],dtype='int32')
self.nodes = np.array(signalInfo[0],dtype = 'int32')
if type(signalInfo[1]) is float:
if totalForce == True:
if sigType == 1:
raise ValueError('totalForce not supported for displacement load.')
else:
ampVal = signalInfo[1]/nNodes
else:
ampVal = signalInfo[1]
amp = np.array(np.ones(nNodes)*ampVal, dtype=precString)
elif type(signalInfo[1]) is np.ndarray:
if len(signalInfo[1]) != self.nNodes:
raise ValueError('If signal amplitude is an array, a value must be specified for each node in the transducer.')
if totalForce == True:
raise Warning('totalForce is not supported for loads specified for individual nodes.')
amp = np.array([signalInfo[1],], dtype=precString)
else:
raise ValueError('Signal amplitude not recognised')
self.amplitude = amp
self.dof = np.array(signalInfo[2],dtype ='int32')
self.shape = np.array(signalInfo[4],dtype = precString)
self.dt = np.array(dt,dtype=precString)
self.nt = np.array(len(signalInfo[4]),dtype = 'int32')
def writeSignal(self,fileId):
self.nNodes.tofile(fileId)
self.type.tofile(fileId)
dof = self.nodes*4 + self.dof-1
dof.tofile(fileId)
self.amplitude.tofile(fileId)
self.shape.tofile(fileId)
|
ab9621/PogoLibrary
|
pogoInput.py
|
Python
|
gpl-3.0
| 17,129
|
###############################################################################
# Name: php.py #
# Purpose: Define PHP syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: php.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for PHP.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _php.py 67413 2011-04-07 14:39:39Z CJP $"
__revision__ = "$Revision: 67413 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
import _html
from _cpp import AutoIndenter
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
# PHP Keywords
PHP_KEYWORDS = ("__LINE__ __FILE__ __FUNCTION__ __CLASS__ __METHOD__ declare "
"else enddeclare endswitch elseif endif if switch as do endfor "
"endforeach endwhile for foreach while case default switch "
"break continue var bool boolean int integer real "
"double float string array NULL extends global static "
"new true false function "
"class object self final public private protected try catch "
"throw abstract parent interface implements "
# Language Constructs
"die echo empty exit eval include include_once isset list "
"require require_once return print unset")
# PHP Standard Functions/Methods
# (roughly based off of PHP Pocket Reference by O'Reilly)
PHP_FUNC = ("__construct __autoload __destruct __get __set __isset __unset "
"__call __sleep __wakeup __toString __set_state __clone "
"apache_child_terminate apache_lookup_uri apache_note "
"apache_request_headers apache_response_headers apache_setenv "
"ascii2ebcdic ebcdic2ascii getallheaders virtual jewishtojd "
"array_change_key_case array_chunk array_count_values "
"array_diff_assoc array_diff array_fill array_filter array_flip "
"array_intersect_assoc array_intersect array_key_exists array_keys "
"array_map array_merge_recursive array_merge array_multisort "
"array_pad array_pop array_push array_rand array_reduce array "
"array_reverse array_search array_shift array_slice array_splice "
"array_sum array_unique array_unshift array_values array_walk "
"arsort asort compact count current each end extract in_array key "
"krsort ksort natcasesort natsort next pos prev range reset "
"rsort shuffle sizeof sort uasort uksort usort aspell_check "
"aspell_new aspell_suggest bcadd bccomp bcdiv bcmod bcmul bcpow "
"bcpowmod bcscale bcsqrt bcsub bzclose bzcompress bzdecompress "
"bzerrno bzerror bzerrstr bzflush bzopen bzread bzwrite "
"cal_days_in_month cal_from_jd cal_info cal_to_jd easter_date "
"easter_days frenchtojd gregoriantojd jddayofweek jdmonthname "
"jdtofrench jdtogregorian jdtojewish jdtojulian jdtounix "
"juliantojd unixtojd ccvs_add ccvs_auth ccvs_command ccvs_count "
"ccvs_delete ccvs_done ccvs_init ccvs_lookup ccvs_new ccvs_report "
"ccvs_return ccvs_reverse ccvs_sale ccvs_status ccvs_textvalue "
"ccvs_void call_user_method_array call_user_method class_exists "
"get_class_methods get_class_vars get_class get_declared_classes "
"get_object_vars get_parent_class is_a is_subclass_of com_load "
"com_addref com_get com_invoke com_isenum com_load_typelib "
"com_propget com_propput com_propset com_release com_set "
"cpdf_add_annotation cpdf_add_outline cpdf_arc cpdf_begin_text "
"cpdf_circle cpdf_clip cpdf_close cpdf_closepath_fill_stroke "
"cpdf_closepath_stroke cpdf_closepath cpdf_continue_text "
"cpdf_end_text cpdf_fill_stroke cpdf_fill cpdf_finalize_page "
"cpdf_finalize cpdf_global_set_document_limits cpdf_import_jpeg "
"cpdf_lineto cpdf_moveto cpdf_newpath cpdf_open cpdf_output_buffer "
"cpdf_page_init cpdf_place_inline_image cpdf_rect cpdf_restore "
"cpdf_rlineto cpdf_rmoveto cpdf_rotate_text cpdf_rotate "
"cpdf_save_to_file cpdf_save cpdf_scale cpdf_set_action_url "
"cpdf_set_char_spacing cpdf_set_creator cpdf_set_current_page "
"cpdf_set_font_directories cpdf_set_font_map_file cpdf_set_font "
"cpdf_set_horiz_scaling cpdf_set_keywords cpdf_set_leading "
"cpdf_set_page_animation cpdf_set_subject cpdf_set_text_matrix "
"cpdf_set_text_pos cpdf_set_text_rendering cpdf_set_text_rise "
"cpdf_set_title cpdf_set_viewer_preferences cpdf_set_word_spacing "
"cpdf_setdash cpdf_setflat cpdf_setgray_fill cpdf_setgray_stroke "
"cpdf_setgray cpdf_setlinecap cpdf_setlinejoin cpdf_setlinewidth "
"cpdf_setmiterlimit cpdf_setrgbcolor_fill cpdf_setrgbcolor_stroke "
"cpdf_setrgbcolor cpdf_show_xy cpdf_show cpdf_stringwidth "
"cpdf_text cpdf_translate crack_check crack_closedict cpdf_curveto "
"crack_getlastmessage crack_opendict ctype_alnum ctype_alpha "
"ctype_cntrl ctype_digit ctype_graph ctype_lower ctype_print "
"ctype_punct ctype_space ctype_upper ctype_xdigit curl_close "
"curl_errno curl_error curl_exec curl_getinfo curl_init chgrp "
"curl_version checkdate date getdate gettimeofday gmdate gmmktime "
"gmstrftime localtime microtime mktime strftime strtotime time "
"dba_close dba_delete dba_exists dba_fetch dba_firstkey filetype "
"dba_insert dba_list dba_nextkey dba_open dba_optimize dba_popen "
"dba_replace dba_sync dbase_add_record dbase_close dbase_create "
"dbase_delete_record dbase_get_record_with_names dbase_get_record "
"dbase_numfields dbase_numrecords dbase_open dbase_pack filectime "
"dbase_replace_record dblist dbmclose dbmdelete dbmexists dbmfetch "
"dbmfirstkey dbminsert dbmnextkey dbmopen dbmreplace basename "
"chmod chown clearstatcache copy delete dirname disk_free_space "
"disk_total_space diskfreespace fclose feof fflush fgetc fgetcsv "
"fgets fgetss file_exists file_get_contents file fileatime ftell "
"filegroup fileinode filemtime fileowner fileperms filesize popen "
"flock fnmatch fopen fpassthru fputs fread fscanf fseek fstat stat "
"ftruncate fwrite glob is_dir is_executable is_file is_link "
"is_readable is_uploaded_file is_writable is_writeable link "
"lstat mkdir move_uploaded_file parse_ini_file pathinfo pclose "
"readfile readlink realpath rename rewind rmdir set_file_buffer "
"symlink tempnam tmpfile touch umask unlink ftp_cdup ftp_chdir "
"ftp_close ftp_connect ftp_delete ftp_exec ftp_fget ftp_fput "
"ftp_get_option ftp_get ftp_login ftp_mdtm ftp_mkdir textdomain "
"ftp_nb_fget ftp_nb_fput ftp_nb_get ftp_nb_put ftp_nlist ftp_pasv "
"ftp_put ftp_pwd ftp_quit ftp_rawlist ftp_rename ftp_rmdir checkin "
"ftp_set_option ftp_site ftp_size ftp_ssl_connect ftp_systype "
"call_user_func_array call_user_func create_function func_get_arg "
"func_get_args func_num_args function_exists get_defined_functions "
"register_shutdown_function register_tick_function method_exists "
"unregister_tick_function bind_textdomain_codeset bindtextdomain "
"dcgettext dcngettext dgettext dngettext gettext ngettext "
"gmp_abs gmp_add gmp_and gmp_clrbit gmp_cmp gmp_com gmp_div_q "
"gmp_div_qr gmp_div_r gmp_div gmp_divexact gmp_fact gmp_gcd "
"gmp_hamdist gmp_init gmp_intval gmp_invert gmp_jacobi gmp_gcdext "
"gmp_mod gmp_mul gmp_neg gmp_or gmp_perfect_square gmp_popcount "
"gmp_pow gmp_powm gmp_prob_prime gmp_random gmp_scan0 gmp_scan1 "
"gmp_setbit gmp_sign gmp_sqrt gmp_sqrtrm gmp_strval gmp_sub "
"header headers_sent setcookie hw_api_attribute hwapi_hgcsp "
"hw_api_content hw_api_object key langdepvalue value values insert "
"checkout children mimetype read content copy dbstat dcstat "
"dstofsrcanchors count reason find ftstat hwstat identify info "
"insertanchor insertcollection insertdocument link lock move "
"attreditable count insert remove title value object dstanchors "
"parents description type remove replace setcommitedversion assign "
"srcanchors srcsofdst unlock user userlist iconv_get_encoding "
"iconv_set_encoding iconv ob_iconv_handler exif_imagetype gmp_xor "
"exif_read_data exif_thumbnail gd_info getimagesize cpdf_stroke "
"image_type_to_mime_type image2wbmp imagealphablending imagearc "
"imagechar imagecharup imagecolorallocate imagecolorallocatealpha "
"imagecolorat imagecolorclosest imagecolorclosestalpha curl_setopt "
"imagecolorclosesthwb imagecolordeallocate imagecolorexact "
"imagecolorexactalpha imagecolorresolve imagecolorresolvealpha "
"imagecolorset imagecolorsforindex imagecolorstotal dba_handlers "
"imagecolortransparent imagecopy imagecopymerge imagecopymergegray "
"imagecopyresampled imagecopyresized imagecreate objectbyanchor "
"imagecreatefromgd2part imagecreatefromgd imagecreatefromgif "
"imagecreatefromjpeg imagecreatefrompng imagecreatefromstring "
"imagecreatefromwbmp imagecreatefromxbm imagecreatefromxpm "
"imagecreatetruecolor imagedashedline imagedestroy imageellipse "
"imagefill imagefilledarc imagefilledellipse imagefilledpolygon "
"imagefilledrectangle imagefilltoborder imagefontheight "
"imageftbbox imagefttext imagegammacorrect imagegd2 imagegd "
"imageinterlace imagejpeg imageline imageloadfont imagepalettecopy "
"imagepng imagepolygon imagepsbbox imagepscopyfont imagefontwidth "
"imagepsextendfont imagepsfreefont imagepsloadfont gmp_legendre "
"imagepstext imagerectangle imagerotate imagesetbrush imagegif "
"imagesetstyle imagesetthickness imagesettile imagestring "
"imagestringup imagesx imagesy imagetruecolortopalette "
"imagettftext imagetypes imagewbmp iptcembed iptcparse jpeg2wbmp "
"png2wbmp read_exif_data imap_8bit imap_alerts imap_append "
"imap_binary imap_body imap_bodystruct imap_check imap_base64 "
"imap_close imap_createmailbox imap_delete imap_deletemailbox "
"imap_errors imap_expunge imap_fetch_overview imap_fetchbody "
"imap_fetchheader imap_fetchstructure imap_get_quota imagettfbbox "
"imap_getmailboxes imap_getsubscribed imap_header imap_headerinfo "
"imap_headers imap_last_error imap_list imap_listmailbox "
"imap_listsubscribed imap_lsub imap_mail_compose imap_mail_copy "
"imap_mail_move imap_mail imap_mailboxmsginfo imap_listscan "
"imap_msgno imap_num_msg imap_num_recent imap_open imap_ping "
"imap_renamemailbox imap_reopen imap_rfc822_parse_adrlist linkinfo "
"imap_rfc822_parse_headers imap_rfc822_write_address imap_qprint "
"imap_search imap_set_quota imap_setacl imap_setflag_full "
"imap_status imap_subscribe imap_thread imap_uid imap_undelete "
"imap_unsubscribe imap_utf7_decode imap_utf7_encode imap_utf8 "
"assert_options assert dl extension_loaded get_cfg_var imap_sort "
"get_defined_constants get_extension_funcs get_included_files "
"get_loaded_extensions get_magic_quotes_gpc get_current_user "
"get_required_files getenv getlastmod getmygid getmyinode getmypid "
"getmyuid getopt getrusage ini_alter ini_get_all ini_get "
"ini_set php_ini_scanned_files php_logo_guid php_sapi_name "
"phpcredits phpinfo phpversion putenv set_magic_quotes_runtime "
"set_time_limit version_compare zend_logo_guid zend_version "
"ldap_8859_to_t61 ldap_add ldap_bind ldap_close ldap_compare "
"ldap_connect ldap_count_entries ldap_delete ldap_dn2ufn php_uname "
"ldap_errno ldap_error ldap_explode_dn ldap_first_attribute "
"ldap_first_entry ldap_first_reference ldap_free_result "
"ldap_get_attributes ldap_get_dn ldap_get_entries ldap_get_option "
"ldap_get_values_len ldap_get_values ldap_list ldap_mod_add "
"ldap_mod_del ldap_mod_replace ldap_modify ldap_next_attribute "
"ldap_next_entry ldap_next_reference ldap_parse_reference hypot "
"ldap_parse_result ldap_read ldap_rename ldap_search ldap_err2str "
"ldap_set_option ldap_set_rebind_proc ldap_sort ldap_start_tls "
"ldap_t61_to_8859 ldap_unbind ezmlm_hash mail abs acos acosh asin "
"asinh atan2 atan atanh base_convert bindec ceil cos cosh decbin "
"dechex decoct deg2rad exp expm1 floor fmod getrandmax hexdec "
"is_finite is_infinite is_nan lcg_value log10 log1p log max min "
"mt_getrandmax mt_rand mt_srand octdec pi pow rad2deg rand round "
"sin sinh sqrt srand tan tanh mb_convert_case mb_convert_encoding "
"mb_convert_kana mb_convert_variables mb_decode_mimeheader "
"mb_decode_numericentity mb_detect_encoding mb_detect_order "
"mb_encode_mimeheader mb_encode_numericentity mb_ereg_match "
"mb_ereg_replace mb_ereg_search_getpos mb_ereg_search_getregs "
"mb_ereg_search_init mb_ereg_search_pos mb_ereg_search_regs "
"mb_ereg_search_setpos mb_ereg_search mb_ereg mb_eregi_replace "
"mb_eregi mb_get_info mb_http_input mb_http_output ini_restore "
"mb_internal_encoding mb_language mb_output_handler mb_parse_str "
"mb_preferred_mime_name mb_regex_encoding mb_regex_set_options "
"mb_send_mail mb_split mb_strcut mb_strimwidth mb_strlen mb_strpos "
"mb_strrpos mb_strtolower mb_strtoupper mb_strwidth imagesetpixel "
"mb_substitute_character mb_substr_count mb_substr mcrypt_cbc "
"mcrypt_cfb mcrypt_create_iv mcrypt_decrypt mcrypt_ecb "
"mcrypt_enc_get_algorithms_name mcrypt_enc_get_block_size "
"mcrypt_enc_get_iv_size mcrypt_enc_get_key_size ftp_nb_continue "
"mcrypt_enc_get_modes_name mcrypt_enc_get_supported_key_sizes "
"mcrypt_enc_is_block_algorithm_mode mcrypt_enc_is_block_algorithm "
"mcrypt_enc_is_block_mode mcrypt_enc_self_test mcrypt_encrypt "
"mcrypt_generic_deinit mcrypt_generic_end mcrypt_generic_init "
"mcrypt_generic mcrypt_get_block_size mcrypt_get_cipher_name "
"mcrypt_get_iv_size mcrypt_get_key_size mcrypt_list_algorithms "
"mcrypt_list_modes mcrypt_module_close imap_scanmailbox "
"mcrypt_module_get_algo_key_size imap_get_quotaroot "
"mcrypt_module_is_block_algorithm_mode imap_mime_header_decode "
"mcrypt_module_is_block_mode mcrypt_module_open imagecreatefromgd2 "
"mcrypt_ofb mdecrypt_generic mhash_count mhash_get_block_size "
"mhash_get_hash_name mhash_keygen_s2k mhash mime_content_type "
"connection_aborted connection_status connection_timeout constant "
"defined get_browser highlight_file highlight_string "
"ignore_user_abort pack show_source sleep uniqid unpack usleep "
"msql_affected_rows msql_close msql_connect msql_create_db define "
"msql_data_seek msql_dbname msql_drop_db msql_dropdb msql_error "
"msql_fetch_array msql_fetch_field msql_fetch_object msql_createdb "
"msql_field_seek msql_fieldflags msql_fieldlen msql_fieldname "
"msql_fieldtable msql_fieldtype msql_free_result msql_freeresult "
"msql_list_dbs msql_list_fields msql_list_tables msql_listdbs "
"msql_listfields msql_listtables msql_num_fields msql_num_rows "
"msql_numfields msql_numrows msql_pconnect msql_query msql_regcase "
"msql_result msql_select_db msql_selectdb msql_tablename msql "
"checkdnsrr closelog debugger_off debugger_on gethostbyaddr "
"dns_check_record dns_get_mx dns_get_record fsockopen "
"gethostbyname gethostbynamel getmxrr getprotobyname "
"getservbyname getservbyport ip2long long2ip openlog pfsockopen "
"socket_get_status socket_set_blocking socket_set_timeout syslog "
"ocibindbyname ocicancel OCICollAppend ocicollassign "
"ocicollgetelem ocicollmax ocicollsize ocicolltrim ocicolumnisnull "
"ocicolumnname ocicolumnprecision ocicolumnscale ocicolumnsize "
"ocicolumntype ocicolumntyperaw ocicommit ocidefinebyname ocierror "
"ociexecute ocifetch ocifetchinto ocifetchstatement msql_fetch_row "
"ocifreecursor OCIFreeDesc ocifreestatement ociinternaldebug "
"ocilogoff ocilogon ocinewcollection ocinewcursor ocinewdescriptor "
"ocinlogon ocinumcols ociparse ociplogon ociresult ocirollback "
"ocirowcount ocisavelob ocisavelobfile ociserverversion ociloadlob "
"ocisetprefetch ocistatementtype ociwritelobtofile flush ob_clean "
"ob_end_clean ob_end_flush ob_flush ob_get_contents ob_get_length "
"ob_get_level ob_get_status ob_gzhandler ob_implicit_flush "
"overload pcntl_exec pcntl_fork pcntl_signal pcntl_waitpid "
"pcntl_wexitstatus pcntl_wifexited pcntl_wifsignaled ob_start "
"pcntl_wstopsig pcntl_wtermsig preg_grep preg_match_all preg_match "
"preg_quote preg_replace_callback preg_replace preg_split "
"pdf_add_annotation pdf_add_bookmark pdf_add_launchlink "
"pdf_add_note pdf_add_outline pdf_add_pdflink pdf_add_thumbnail "
"pdf_add_weblink pdf_arc pdf_arcn pdf_attach_file pdf_begin_page "
"pdf_begin_pattern pdf_begin_template pdf_circle pdf_add_locallink "
"pdf_close_pdi_page pdf_close_pdi pdf_close pcntl_wifstopped "
"pdf_closepath_stroke pdf_closepath pdf_concat pdf_continue_text "
"pdf_curveto pdf_delete pdf_end_page pdf_end_pattern "
"pdf_endpath pdf_fill_stroke pdf_fill pdf_findfont pdf_get_buffer "
"pdf_get_font pdf_get_fontname pdf_get_fontsize pdf_open_pdi_page "
"pdf_get_image_width pdf_get_majorversion pdf_get_minorversion "
"pdf_get_parameter pdf_get_pdi_parameter pdf_get_pdi_value "
"pdf_initgraphics pdf_lineto pdf_makespotcolor pdf_moveto pdf_new "
"pdf_open_CCITT pdf_open_file pdf_open_gif pdf_open_image_file "
"pdf_open_image pdf_open_jpeg pdf_open_memory_image "
"pdf_open_pdi pdf_open_png pdf_open_tiff pdf_open pdf_place_image "
"pdf_place_pdi_page pdf_rect pdf_restore pdf_rotate pdf_get_value "
"pdf_set_border_color pdf_set_border_dash pdf_set_border_style "
"pdf_set_char_spacing pdf_set_duration pdf_set_font "
"pdf_set_info_author pdf_set_info_creator pdf_set_info_keywords "
"pdf_set_info_subject pdf_set_info_title pdf_set_info "
"pdf_set_parameter pdf_set_text_matrix pdf_set_text_pos "
"pdf_set_text_rendering pdf_set_text_rise pdf_set_value "
"pdf_set_word_spacing pdf_setcolor pdf_setdash pdf_setflat "
"pdf_setgray_fill pdf_setgray_stroke pdf_setgray pdf_setlinecap "
"pdf_setlinejoin pdf_setlinewidth pdf_setmatrix pdf_setmiterlimit "
"pdf_setpolydash pdf_setrgbcolor_fill pdf_setrgbcolor_stroke "
"pdf_setrgbcolor pdf_show_boxed pdf_show_xy pdf_show pdf_skew "
"pdf_stringwidth pdf_stroke pdf_translate pg_affected_rows "
"pg_cancel_query pg_client_encoding pg_close pg_connect "
"pg_connection_busy pg_connection_reset pg_connection_status "
"pg_copy_from pg_copy_to pg_dbname pg_delete pg_end_copy "
"pg_escape_string pg_fetch_all pg_fetch_array pg_fetch_assoc "
"pg_fetch_object pg_fetch_result pg_fetch_row pg_field_is_null "
"pg_field_name pg_field_num pg_field_prtlen pg_field_size "
"pg_free_result pg_get_notify pg_get_pid pg_get_result pg_host "
"pg_last_error pg_last_notice pg_last_oid pg_lo_close pg_lo_create "
"pg_lo_export pg_lo_import pg_lo_open pg_lo_read_all pg_lo_read "
"pg_lo_seek pg_lo_tell pg_lo_unlink pg_lo_write pg_meta_data "
"pg_num_fields pg_num_rows pg_options pg_pconnect pg_ping pg_port "
"pg_put_line pg_query pg_result_error pg_result_seek pg_field_type "
"pg_select pg_send_query pg_set_client_encoding pg_trace pg_tty "
"pg_unescape_bytea pg_untrace pg_update posix_ctermid posix_getcwd "
"posix_getegid posix_geteuid posix_getgid posix_getgrgid pg_insert "
"posix_getgroups posix_getlogin posix_getpgid posix_getpgrp "
"posix_getppid posix_getpwnam posix_getpwuid posix_getrlimit "
"posix_getuid posix_isatty posix_kill posix_mkfifo posix_setegid "
"posix_seteuid posix_setgid posix_setpgid posix_setsid pdf_setfont "
"posix_times posix_ttyname posix_uname pspell_add_to_personal "
"pspell_add_to_session pspell_check pspell_clear_session "
"pspell_config_create pspell_config_ignore pspell_config_mode "
"pspell_config_personal pspell_config_repl posix_setuid "
"pspell_config_save_repl pspell_new_config pspell_new_personal "
"pspell_new pspell_save_wordlist pspell_store_replacement "
"recode_file recode_string recode ereg_replace ereg eregi_replace "
"split spliti sql_regcase ftok msg_get_queue msg_receive "
"msg_send msg_set_queue msg_stat_queue sem_acquire sem_get "
"sem_remove shm_attach shm_detach shm_get_var shm_put_var "
"shm_remove session_cache_expire session_cache_limiter sem_release "
"session_destroy session_encode session_get_cookie_params eregi "
"session_is_registered session_module_name session_name session_id "
"session_register session_save_path session_set_cookie_params "
"session_set_save_handler session_start session_unregister "
"session_write_close snmp_get_quick_print snmp_set_quick_print "
"snmprealwalk snmpset snmpwalk snmpwalkoid socket_accept snmpget "
"socket_clear_error socket_close socket_connect session_unset "
"socket_create_pair socket_create socket_get_option socket_bind "
"socket_getsockname socket_iovec_add socket_iovec_alloc "
"socket_iovec_delete socket_iovec_fetch socket_iovec_free "
"socket_iovec_set socket_last_error socket_listen socket_read "
"socket_readv socket_recv socket_recvfrom socket_recvmsg "
"socket_send socket_sendmsg socket_sendto socket_set_nonblock "
"socket_set_option socket_shutdown socket_strerror socket_write "
"socket_writev stream_context_create stream_context_get_options "
"stream_context_set_option stream_context_set_params socket_select "
"stream_filter_prepend stream_get_filters stream_get_meta_data "
"stream_get_wrappers stream_register_filter stream_filter_append "
"stream_select stream_set_blocking stream_set_timeout posix_getpid "
"stream_set_write_buffer addcslashes addslashes bin2hex chop chr "
"chunk_split convert_cyr_string count_chars crc32 crypt "
"fprintf get_html_translation_table hebrev hebrevc explode "
"htmlentities htmlspecialchars implode join levenshtein localeconv "
"md5_file md5 metaphone money_format nl_langinfo nl2br pg_convert "
"parse_str printf quoted_printable_decode quotemeta rtrim "
"setlocale sha1_file sha1 similar_text soundex sprintf sscanf ord "
"str_repeat str_replace str_rot13 str_shuffle str_word_count ltrim "
"strchr strcmp strcoll strcspn strip_tags stripcslashes strcasecmp "
"stristr strlen strnatcasecmp strnatcmp strncasecmp strncmp strpos "
"strrchr strrev strrpos strspn strstr strtok strtolower strtoupper "
"substr_count substr_replace substr trim ucfirst ucwords vprintf "
"wordwrap base64_decode base64_encode get_meta_tags parse_url "
"rawurldecode rawurlencode urldecode urlencode doubleval "
"get_defined_vars get_resource_type gettype stripslashes str_pad "
"intval is_array is_bool is_callable is_double is_float is_int "
"is_long is_null is_numeric is_object is_real is_resource floatval "
"is_string print_r serialize settype strval unserialize "
"var_dump var_export utf8_decode utf8_encode xml_error_string "
"xml_get_current_byte_index xml_get_current_column_number "
"xml_get_current_line_number xml_get_error_code is_scalar vsprintf "
"xml_parse xml_parser_create_ns xml_parser_create xml_parser_free "
"xml_parser_get_option xml_parser_set_option number_format "
"xml_set_default_handler xml_set_element_handler is_integer "
"xml_set_end_namespace_decl_handler xml_parse_into_struct strtr "
"xml_set_notation_decl_handler xml_set_object html_entity_decode "
"xml_set_processing_instruction_handler stream_register_wrapper "
"xml_set_unparsed_entity_decl_handler xslt_create xslt_errno "
"xslt_free xslt_output_process xslt_set_base xslt_set_encoding "
"xslt_set_error_handler xslt_set_log xslt_set_sax_handler "
"xslt_set_sax_handlers xslt_set_scheme_handler socket_getpeername "
"zip_close zip_entry_close zip_entry_compressedsize xslt_error "
"zip_entry_compressionmethod zip_entry_filesize zip_entry_name "
"zip_entry_open zip_entry_read zip_open zip_read session_decode "
"get_magic_quotes_runtime xslt_set_scheme_handlers pspell_suggest "
"xml_set_start_namespace_decl_handler import_request_variables "
"xml_set_external_entity_ref_handler socket_create_listen "
"xml_set_character_data_handler session_readonly shm_remove_var "
"msg_remove_queue pspell_config_runtogether posix_getsid "
"posix_getgrnam pg_result_status pg_escape_bytea pdf_set_leading "
"pdf_set_horiz_scaling pdf_save pdf_scale pdf_get_image_height "
"pdf_end_template pdf_closepath_fill_stroke ocicollassignelem "
"pdf_clip pdf_close_image ocifreecollection getprotobynumber "
"mcrypt_module_self_test define_syslog_variables "
"mcrypt_module_get_supported_key_sizes imap_clearflag_full "
"mcrypt_module_is_block_algorithm imagepsencodefont "
"mcrypt_module_get_algo_block_size imagepsslantfont count ")
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (stc.STC_HPHP_DEFAULT, 'default_style'),
(stc.STC_HPHP_COMMENT, 'comment_style'),
(stc.STC_HPHP_COMMENTLINE, 'comment_style'),
(stc.STC_HPHP_COMPLEX_VARIABLE, 'pre_style'), #STYLE ME
(stc.STC_HPHP_HSTRING, 'string_style'),
(stc.STC_HPHP_HSTRING_VARIABLE, 'scalar_style'), # STYLE ME
(stc.STC_HPHP_NUMBER, 'number_style'),
(stc.STC_HPHP_OPERATOR, 'operator_style'),
(stc.STC_HPHP_SIMPLESTRING, 'string_style'),
(stc.STC_HPHP_VARIABLE, 'pre2_style'),
(stc.STC_HPHP_WORD, 'keyword_style') ]
#------------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Php"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_HTML)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, AutoIndenter)
def GetKeywords(self):
"""Returns Specified Keywords List """
# Support Embedded HTML highlighting
html = _html.SyntaxData(synglob.ID_LANG_HTML)
keywords = html.GetKeywords()
keywords.append((4, PHP_KEYWORDS))
return keywords
def GetSyntaxSpec(self):
"""Syntax Specifications """
return _html.SYNTAX_ITEMS + SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [_html.FOLD, _html.FLD_HTML]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code
@note: assuming pure php code for comment character(s)
"""
return [u'//']
#---- Syntax Modules Internal Functions ----#
def KeywordString(option=0):
"""Returns the specified Keyword String
@note: not used by most modules
"""
return PHP_KEYWORDS
#---- End Syntax Modules Internal Functions ----#
|
beiko-lab/gengis
|
bin/Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/syntax/_php.py
|
Python
|
gpl-3.0
| 30,237
|
"""
Creates fasta file from orfs
"""
import argparse
import os
import sys
import site
import re
import numpy as np
import numpy.random
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
base_path="%s/src"%base_path
for directory_name in os.listdir(base_path):
site.addsitedir(os.path.join(base_path, directory_name))
import fasta
import faa
import gff
import cPickle
if __name__=="__main__":
parser = argparse.ArgumentParser(description=\
'Extracts orfs and creates fasta files')
parser.add_argument('--gff', type=str, required=False,
help='Input gff file')
parser.add_argument('--fasta', type=str, required=False,
help='Input fasta file')
parser.add_argument('--faidx', type=str, required=False,
help='Input fasta file index')
parser.add_argument('--faa', type=str, required=False,
help='Input fasta file for proteins')
parser.add_argument('--pickle', type=str, required=False,default=None,
help='Pickle file containing all clusters')
parser.add_argument('--out', type=str, required=False,
help='Output file for translated orfs')
args = parser.parse_args()
queries = []
print "Started"
if not os.path.exists("faa.pickle"):
print "Creating pickle"
faaindex = faa.FAA(args.faa)
faaindex.index()
print len(faaindex.indexer)
assert 'AAD07798.1' in faaindex.indexer,"len=%d"%len(faaindex.indexer)
faaindex.dump("faa.pickle")
else:
print "Loading pickle"
faaindex = faa.FAA(args.faa)
faaindex.load("faa.pickle")
if args.pickle==None:
clusters = clique_filter.findContextGeneClusters(all_hits,faidx,backtrans=False,
functions=["toxin","transport"])
cPickle.dump(open(args.pickle,'wb'))
else:
clusters,_ = cPickle.load(open(args.pickle,'rb'))
gff = gff.GFF(args.gff,fasta_file=args.fasta,fasta_index=args.faidx)
for cluster in clusters:
for node in cluster:
acc,clrname,full_evalue,hmm_st,hmm_end,env_st,env_end,description = node.split('|')
function = clrname.split('.')[0]
if function=='toxin':
queries.append((acc,clrname,full_evalue,hmm_st,hmm_end,env_st,env_end,description))
gff.translate_orfs(queries,faaindex,args.out)
|
idoerg/BOA
|
scripts/orffasta.py
|
Python
|
gpl-3.0
| 2,576
|
from django.db import models
class Group(models.Model):
BASE_URL = "https://www.facebook.com/groups/%s"
def __unicode__(self):
return self.name
id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length = 100)
school = models.CharField(max_length = 100)
class User(models.Model):
def __unicode__(self):
return self.name
id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length = 100)
class Listing(models.Model):
BASE_URL = "https://www.facebook.com/%s/posts/%s"
def __unicode__(self):
return self.message or u'No Text'
id = models.BigIntegerField(primary_key=True)
created_time = models.DateTimeField(null=False)
updated_time = models.DateTimeField(null=False)
type = models.CharField(max_length = 6)
message = models.TextField(null=False, blank=True, default="")
picture = models.TextField(null=False, blank=True, default="")
parsed = models.BooleanField(default=False)
user = models.ForeignKey(User)
approved = models.BooleanField(default=False)
buy_or_sell = models.CharField(max_length = 4, null=True)
category = models.CharField(max_length = 15, null=True)
object_id = models.BigIntegerField(null=True)
group = models.ForeignKey(Group)
likers = models.ManyToManyField(User, related_name="listings")
sold = models.BooleanField(default=False)
def url(self): return self.BASE_URL % (self.group.id, self.id)
class Comment(models.Model):
def __unicode__(self):
return self.message or u'No Text'
id = models.BigIntegerField(primary_key=True)
message = models.TextField(null=False, blank=True, default="")
created_time = models.DateTimeField('date published')
user = models.ForeignKey(User)
listing = models.ForeignKey(Listing, related_name="comments")
|
AlJohri/nucraigslist
|
listings/models.py
|
Python
|
gpl-3.0
| 1,757
|
# Copyright (C) 2014 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
import logging
import requests
from base64 import b64decode, b64encode
from bottle import request, abort
from future.moves.urllib.parse import urlparse
from ycmd import hmac_utils
from ycmd.utils import ToBytes
from ycmd.bottle_utils import SetResponseHeader
_HMAC_HEADER = 'x-ycm-hmac'
_HOST_HEADER = 'host'
# This class implements the Bottle plugin API:
# http://bottlepy.org/docs/dev/plugindev.html
#
# We want to ensure that every request coming in has a valid HMAC set in the
# x-ycm-hmac header and that every response coming out sets such a valid header.
# This is to prevent security issues with possible remote code execution.
# The x-ycm-hmac value is encoded as base64 during transport instead of sent raw
# because https://tools.ietf.org/html/rfc5987 says header values must be in the
# ISO-8859-1 character set.
class HmacPlugin( object ):
name = 'hmac'
api = 2
def __init__( self, hmac_secret ):
self._hmac_secret = hmac_secret
self._logger = logging.getLogger( __name__ )
def __call__( self, callback ):
def wrapper( *args, **kwargs ):
if not HostHeaderCorrect( request ):
self._logger.info( 'Dropping request with bad Host header.' )
abort( requests.codes.unauthorized,
'Unauthorized, received bad Host header.' )
return
body = ToBytes( request.body.read() )
if not RequestAuthenticated( request.method, request.path, body,
self._hmac_secret ):
self._logger.info( 'Dropping request with bad HMAC.' )
abort( requests.codes.unauthorized, 'Unauthorized, received bad HMAC.' )
return
body = callback( *args, **kwargs )
SetHmacHeader( body, self._hmac_secret )
return body
return wrapper
def HostHeaderCorrect( request ):
host = urlparse( 'http://' + request.headers[ _HOST_HEADER ] ).hostname
return host == '127.0.0.1' or host == 'localhost'
def RequestAuthenticated( method, path, body, hmac_secret ):
if _HMAC_HEADER not in request.headers:
return False
return hmac_utils.SecureBytesEqual(
hmac_utils.CreateRequestHmac(
ToBytes( method ),
ToBytes( path ),
ToBytes( body ),
ToBytes( hmac_secret ) ),
ToBytes( b64decode( request.headers[ _HMAC_HEADER ] ) ) )
def SetHmacHeader( body, hmac_secret ):
value = b64encode( hmac_utils.CreateHmac( ToBytes( body ),
ToBytes( hmac_secret ) ) )
SetResponseHeader( _HMAC_HEADER, value )
|
volkhin/ycmd
|
ycmd/hmac_plugin.py
|
Python
|
gpl-3.0
| 3,445
|
from __future__ import print_function, division, unicode_literals
from pprint import pprint
from itertools import groupby
from functools import wraps
from collections import namedtuple, deque
# OrderedDict was added in 2.7. ibm6 still uses python2.6
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
def group_entries_bylocus(entries):
d = {}
for e in entries:
if e.locus not in d:
d[e.locus] = [e]
else:
d[e.locus].append(e)
return d
class Entry(namedtuple("Entry", "vname, ptr, action, size, file, func, line, tot_memory, sidx")):
@classmethod
def from_line(cls, line, sidx):
args = line.split()
args.append(sidx)
return cls(*args)
def __new__(cls, *args):
"""Extends the base class adding type conversion of arguments."""
# write(logunt,'(a,t60,a,1x,2(i0,1x),2(a,1x),2(i0,1x))')&
# trim(vname), trim(act), addr, isize, trim(basename(file)), trim(func), line, memtot_abi%memory
return super(cls, Entry).__new__(cls,
vname=args[0],
action=args[1],
ptr=int(args[2]),
size=int(args[3]),
file=args[4],
func=args[5],
line=int(args[6]),
tot_memory=int(args[7]),
sidx=args[8],
)
def __repr__(self):
return self.as_repr(with_addr=True)
def as_repr(self, with_addr=True):
if with_addr:
return "<var=%s, %s@%s:%s:%s, addr=%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, hex(self.ptr), self.size, self.sidx)
else:
return "<var=%s, %s@%s:%s:%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, self.size, self.sidx)
@property
def basename(self):
return self.vname.split("%")[-1]
@property
def isalloc(self):
"""True if entry represents an allocation."""
return self.action == "A"
@property
def isfree(self):
"""True if entry represents a deallocation."""
return self.action == "D"
@property
def iszerosized(self):
"""True if this is a zero-sized alloc/free."""
return self.size == 0
@property
def locus(self):
"""This is almost unique"""
return self.func + "@" + self.file
def frees_onheap(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
return True
def frees_onstack(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
if self.locus != other.locus: return False
return True
class Heap(dict):
def show(self):
print("=== HEAP OF LEN %s ===" % len(self))
if not self: return
# for p, elist in self.items():
pprint(self, indent=4)
print("")
def pop_alloc(self, entry):
if not entry.isfree: return 0
elist = self.get[entry.ptr]
if elist is None: return 0
for i, olde in elist:
if entry.size + olde.size != 0:
elist.pop(i)
return 1
return 0
class Stack(dict):
def show(self):
print("=== STACK OF LEN %s ===)" % len(self))
if not self: return
pprint(self)
print("")
def catchall(method):
@wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
try:
return method(*args, **kwargs)
except Exception as exc:
# Add info on file and re-raise.
msg = "Exception while parsing file: %s\n" % self.path
raise exc.__class__(msg + str(exc))
return wrapper
class AbimemParser(object):
def __init__(self, path):
self.path = path
#def __str__(self):
# lines = []
# app = lines.append
# return "\n".join(lines)
@catchall
def summarize(self):
with open(self.path, "rt") as fh:
l = fh.read()
print(l)
@catchall
def find_small_allocs(self, nbytes=160):
"""Zero sized allocations are not counted."""
smalles = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
if not e.isalloc: continue
if 0 < e.size <= nbytes: smalles.append(e)
pprint(smalles)
return smalles
@catchall
def find_intensive(self, threshold=2000):
d = {}
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
loc = e.locus
if loc not in d:
d[loc] = [e]
else:
d[loc].append(e)
# Remove entries below the threshold and perform DSU sort
dsu_list = [(elist, len(elist)) for _, elist in d.items() if len(elist) >= threshold]
intensive = [t[0] for t in sorted(dsu_list, key=lambda x: x[1], reverse=True)]
for elist in intensive:
loc = elist[0].locus
# assert all(e.locus == loc for e in elist)
print("[%s] has %s allocations/frees" % (loc, len(elist)))
return intensive
#def show_peaks(self):
@catchall
def find_zerosized(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.size == 0: eapp(e)
if elist:
print("Found %d zero-sized entries:" % len(elist))
pprint(elist)
else:
print("No zero-sized found")
return elist
@catchall
def find_weird_ptrs(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.ptr <= 0: eapp(e)
if elist:
print("Found %d weird entries:" % len(elist))
pprint(elist)
else:
print("No weird entries found")
return elist
def yield_all_entries(self):
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
yield Entry.from_line(line, lineno)
@catchall
def find_peaks(self, maxlen=20):
# the deque is bounded to the specified maximum length. Once a bounded length deque is full,
# when new items are added, a corresponding number of items are discarded from the opposite end.
peaks = deque(maxlen=maxlen)
for e in self.yield_all_entries():
size = e.size
if size == 0 or not e.isalloc: continue
if len(peaks) == 0:
peaks.append(e); continue
# TODO: Should remove redondant entries.
if size > peaks[0].size:
peaks.append(e)
peaks = deque(sorted(peaks, key=lambda x: x.size), maxlen=maxlen)
peaks = deque(sorted(peaks, key=lambda x: x.size, reverse=True), maxlen=maxlen)
for peak in peaks:
print(peak)
return peaks
@catchall
def plot_memory_usage(self, show=True):
memory = [e.tot_memory for e in self.yield_all_entries()]
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(memory)
if show: plt.show()
return fig
#def get_dataframe(self):
# import pandas as pd
# frame = pd.DataFrame()
# return frame
@catchall
def find_memleaks(self):
heap, stack = Heap(), Stack()
reallocs = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
newe = Entry.from_line(line, lineno)
p = newe.ptr
if newe.size == 0: continue
# Store new entry in list if the ptr is not in d
# else we check if there's an allocation that matches a previous allocation
# (zero-sized arrays are not included)
# else there's a possible memory leak or some undected problems.
if p not in heap:
if newe.isalloc:
heap[p] = [newe]
else:
# Likely reallocation
reallocs.append(newe)
else:
if newe.isfree and len(heap[p]) == 1 and heap[p][0].size + newe.size == 0:
heap.pop(p)
else:
# In principle this should never happen but there are exceptions:
#
# 1) The compiler could decide to put the allocatable on the stack
# In this case the ptr reported by gfortran is 0.
#
# 2) The allocatable variable is "reallocated" by the compiler (F2003).
# Example:
#
# allocate(foo(2,1)) ! p0 = &foo
# foo = reshape([0,0], [2,1]) ! p1 = &foo. Reallocation of the LHS.
# ! Use foo(:) to avoid that
# deallocate(foo) ! p2 = &foo
#
# In this case, p2 != p0
#print("WARN:", newe.ptr, newe, "ptr already on the heap")
#print("HEAP:", heap[newe.ptr])
locus = newe.locus
if locus not in stack:
stack[locus] = [newe]
else:
#if newe.ptr != 0: print(newe)
stack_loc = stack[locus]
ifind = -1
for i, olde in enumerate(stack_loc):
if newe.frees_onstack(olde):
ifind = i
break
if ifind != -1:
stack_loc.pop(ifind)
#else:
# print(newe)
#if p == 0:
# stack[p] = newe
#else:
# print("varname", newe.vname, "in heap with size ",newe.size)
# for weirde in heap[p]:
# print("\tweird entry:", weirde)
# heap[p].append(newe)
if False and heap:
# Possible memory leaks.
count = -1
keyfunc = lambda e: abs(e.size)
for a, entries in heap.items():
count += 1
entries = [e for e in entries if e.size != 0]
entries = sorted(entries, key=keyfunc)
#if any(int(e.size) != 0 for e in l):
#msizes = []
for key, group in groupby(entries, keyfunc):
group = list(group)
#print([e.name for e in g])
pos_size = [e for e in group if e.size >0]
neg_size = [e for e in group if e.size <0]
if len(pos_size) != len(neg_size):
print("key", key)
for e in group:
print(e)
#print(list(g))
#for i, e in enumerate(entries):
# print("\t[%d]" % i, e)
#print("Count=%d" % count, 60 * "=")
if heap: heap.show()
if stack: stack.show()
if reallocs:
print("Possible reallocations:")
pprint(reallocs)
return len(heap) + len(stack) + len(reallocs)
|
jmbeuken/abinit
|
tests/pymods/memprof.py
|
Python
|
gpl-3.0
| 12,121
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Font handling wrapper.
'''
from weblate import appsettings
from PIL import ImageFont
import os.path
# List of chars in base DejaVu font, otherwise we use DroidSansFallback
BASE_CHARS = frozenset((
0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe,
0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a,
0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32,
0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e,
0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a,
0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56,
0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62,
0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a,
0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86,
0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92,
0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e,
0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa,
0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2,
0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce,
0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6,
0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2,
0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe,
0xff, 0x100, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106, 0x107, 0x108, 0x109,
0x10a, 0x10b, 0x10c, 0x10d, 0x10e, 0x10f, 0x110, 0x111, 0x112, 0x113,
0x114, 0x115, 0x116, 0x117, 0x118, 0x119, 0x11a, 0x11b, 0x11c, 0x11d,
0x11e, 0x11f, 0x120, 0x121, 0x122, 0x123, 0x124, 0x125, 0x126, 0x127,
0x128, 0x129, 0x12a, 0x12b, 0x12c, 0x12d, 0x12e, 0x12f, 0x130, 0x131,
0x132, 0x133, 0x134, 0x135, 0x136, 0x137, 0x138, 0x139, 0x13a, 0x13b,
0x13c, 0x13d, 0x13e, 0x13f, 0x140, 0x141, 0x142, 0x143, 0x144, 0x145,
0x146, 0x147, 0x148, 0x149, 0x14a, 0x14b, 0x14c, 0x14d, 0x14e, 0x14f,
0x150, 0x151, 0x152, 0x153, 0x154, 0x155, 0x156, 0x157, 0x158, 0x159,
0x15a, 0x15b, 0x15c, 0x15d, 0x15e, 0x15f, 0x160, 0x161, 0x162, 0x163,
0x164, 0x165, 0x166, 0x167, 0x168, 0x169, 0x16a, 0x16b, 0x16c, 0x16d,
0x16e, 0x16f, 0x170, 0x171, 0x172, 0x173, 0x174, 0x175, 0x176, 0x177,
0x178, 0x179, 0x17a, 0x17b, 0x17c, 0x17d, 0x17e, 0x17f, 0x180, 0x181,
0x182, 0x183, 0x184, 0x185, 0x186, 0x187, 0x188, 0x189, 0x18a, 0x18b,
0x18c, 0x18d, 0x18e, 0x18f, 0x190, 0x191, 0x192, 0x193, 0x194, 0x195,
0x196, 0x197, 0x198, 0x199, 0x19a, 0x19b, 0x19c, 0x19d, 0x19e, 0x19f,
0x1a0, 0x1a1, 0x1a2, 0x1a3, 0x1a4, 0x1a5, 0x1a6, 0x1a7, 0x1a8, 0x1a9,
0x1aa, 0x1ab, 0x1ac, 0x1ad, 0x1ae, 0x1af, 0x1b0, 0x1b1, 0x1b2, 0x1b3,
0x1b4, 0x1b5, 0x1b6, 0x1b7, 0x1b8, 0x1b9, 0x1ba, 0x1bb, 0x1bc, 0x1bd,
0x1be, 0x1bf, 0x1c0, 0x1c1, 0x1c2, 0x1c3, 0x1c4, 0x1c5, 0x1c6, 0x1c7,
0x1c8, 0x1c9, 0x1ca, 0x1cb, 0x1cc, 0x1cd, 0x1ce, 0x1cf, 0x1d0, 0x1d1,
0x1d2, 0x1d3, 0x1d4, 0x1d5, 0x1d6, 0x1d7, 0x1d8, 0x1d9, 0x1da, 0x1db,
0x1dc, 0x1dd, 0x1de, 0x1df, 0x1e0, 0x1e1, 0x1e2, 0x1e3, 0x1e4, 0x1e5,
0x1e6, 0x1e7, 0x1e8, 0x1e9, 0x1ea, 0x1eb, 0x1ec, 0x1ed, 0x1ee, 0x1ef,
0x1f0, 0x1f1, 0x1f2, 0x1f3, 0x1f4, 0x1f5, 0x1f6, 0x1f7, 0x1f8, 0x1f9,
0x1fa, 0x1fb, 0x1fc, 0x1fd, 0x1fe, 0x1ff, 0x200, 0x201, 0x202, 0x203,
0x204, 0x205, 0x206, 0x207, 0x208, 0x209, 0x20a, 0x20b, 0x20c, 0x20d,
0x20e, 0x20f, 0x210, 0x211, 0x212, 0x213, 0x214, 0x215, 0x216, 0x217,
0x218, 0x219, 0x21a, 0x21b, 0x21c, 0x21d, 0x21e, 0x21f, 0x220, 0x221,
0x222, 0x223, 0x224, 0x225, 0x226, 0x227, 0x228, 0x229, 0x22a, 0x22b,
0x22c, 0x22d, 0x22e, 0x22f, 0x230, 0x231, 0x232, 0x233, 0x234, 0x235,
0x236, 0x237, 0x238, 0x239, 0x23a, 0x23b, 0x23c, 0x23d, 0x23e, 0x23f,
0x240, 0x241, 0x242, 0x243, 0x244, 0x245, 0x246, 0x247, 0x248, 0x249,
0x24a, 0x24b, 0x24c, 0x24d, 0x24e, 0x24f, 0x250, 0x251, 0x252, 0x253,
0x254, 0x255, 0x256, 0x257, 0x258, 0x259, 0x25a, 0x25b, 0x25c, 0x25d,
0x25e, 0x25f, 0x260, 0x261, 0x262, 0x263, 0x264, 0x265, 0x266, 0x267,
0x268, 0x269, 0x26a, 0x26b, 0x26c, 0x26d, 0x26e, 0x26f, 0x270, 0x271,
0x272, 0x273, 0x274, 0x275, 0x276, 0x277, 0x278, 0x279, 0x27a, 0x27b,
0x27c, 0x27d, 0x27e, 0x27f, 0x280, 0x281, 0x282, 0x283, 0x284, 0x285,
0x286, 0x287, 0x288, 0x289, 0x28a, 0x28b, 0x28c, 0x28d, 0x28e, 0x28f,
0x290, 0x291, 0x292, 0x293, 0x294, 0x295, 0x296, 0x297, 0x298, 0x299,
0x29a, 0x29b, 0x29c, 0x29d, 0x29e, 0x29f, 0x2a0, 0x2a1, 0x2a2, 0x2a3,
0x2a4, 0x2a5, 0x2a6, 0x2a7, 0x2a8, 0x2a9, 0x2aa, 0x2ab, 0x2ac, 0x2ad,
0x2ae, 0x2af, 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, 0x2b5, 0x2b6, 0x2b7,
0x2b8, 0x2b9, 0x2ba, 0x2bb, 0x2bc, 0x2bd, 0x2be, 0x2bf, 0x2c0, 0x2c1,
0x2c2, 0x2c3, 0x2c4, 0x2c5, 0x2c6, 0x2c7, 0x2c8, 0x2c9, 0x2ca, 0x2cb,
0x2cc, 0x2cd, 0x2ce, 0x2cf, 0x2d0, 0x2d1, 0x2d2, 0x2d3, 0x2d4, 0x2d5,
0x2d6, 0x2d7, 0x2d8, 0x2d9, 0x2da, 0x2db, 0x2dc, 0x2dd, 0x2de, 0x2df,
0x2e0, 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x2e5, 0x2e6, 0x2e7, 0x2e8, 0x2e9,
0x2ec, 0x2ed, 0x2ee, 0x2f3, 0x2f7, 0x300, 0x301, 0x302, 0x303, 0x304,
0x305, 0x306, 0x307, 0x308, 0x309, 0x30a, 0x30b, 0x30c, 0x30d, 0x30e,
0x30f, 0x310, 0x311, 0x312, 0x313, 0x314, 0x315, 0x316, 0x317, 0x318,
0x319, 0x31a, 0x31b, 0x31c, 0x31d, 0x31e, 0x31f, 0x320, 0x321, 0x322,
0x323, 0x324, 0x325, 0x326, 0x327, 0x328, 0x329, 0x32a, 0x32b, 0x32c,
0x32d, 0x32e, 0x32f, 0x330, 0x331, 0x332, 0x333, 0x334, 0x335, 0x336,
0x337, 0x338, 0x339, 0x33a, 0x33b, 0x33c, 0x33d, 0x33e, 0x33f, 0x340,
0x341, 0x342, 0x343, 0x344, 0x345, 0x346, 0x347, 0x348, 0x349, 0x34a,
0x34b, 0x34c, 0x34d, 0x34e, 0x34f, 0x351, 0x352, 0x353, 0x357, 0x358,
0x35a, 0x35c, 0x35d, 0x35e, 0x35f, 0x360, 0x361, 0x362, 0x370, 0x371,
0x372, 0x373, 0x374, 0x375, 0x376, 0x377, 0x37a, 0x37b, 0x37c, 0x37d,
0x37e, 0x384, 0x385, 0x386, 0x387, 0x388, 0x389, 0x38a, 0x38c, 0x38e,
0x38f, 0x390, 0x391, 0x392, 0x393, 0x394, 0x395, 0x396, 0x397, 0x398,
0x399, 0x39a, 0x39b, 0x39c, 0x39d, 0x39e, 0x39f, 0x3a0, 0x3a1, 0x3a3,
0x3a4, 0x3a5, 0x3a6, 0x3a7, 0x3a8, 0x3a9, 0x3aa, 0x3ab, 0x3ac, 0x3ad,
0x3ae, 0x3af, 0x3b0, 0x3b1, 0x3b2, 0x3b3, 0x3b4, 0x3b5, 0x3b6, 0x3b7,
0x3b8, 0x3b9, 0x3ba, 0x3bb, 0x3bc, 0x3bd, 0x3be, 0x3bf, 0x3c0, 0x3c1,
0x3c2, 0x3c3, 0x3c4, 0x3c5, 0x3c6, 0x3c7, 0x3c8, 0x3c9, 0x3ca, 0x3cb,
0x3cc, 0x3cd, 0x3ce, 0x3cf, 0x3d0, 0x3d1, 0x3d2, 0x3d3, 0x3d4, 0x3d5,
0x3d6, 0x3d7, 0x3d8, 0x3d9, 0x3da, 0x3db, 0x3dc, 0x3dd, 0x3de, 0x3df,
0x3e0, 0x3e1, 0x3e2, 0x3e3, 0x3e4, 0x3e5, 0x3e6, 0x3e7, 0x3e8, 0x3e9,
0x3ea, 0x3eb, 0x3ec, 0x3ed, 0x3ee, 0x3ef, 0x3f0, 0x3f1, 0x3f2, 0x3f3,
0x3f4, 0x3f5, 0x3f6, 0x3f7, 0x3f8, 0x3f9, 0x3fa, 0x3fb, 0x3fc, 0x3fd,
0x3fe, 0x3ff, 0x400, 0x401, 0x402, 0x403, 0x404, 0x405, 0x406, 0x407,
0x408, 0x409, 0x40a, 0x40b, 0x40c, 0x40d, 0x40e, 0x40f, 0x410, 0x411,
0x412, 0x413, 0x414, 0x415, 0x416, 0x417, 0x418, 0x419, 0x41a, 0x41b,
0x41c, 0x41d, 0x41e, 0x41f, 0x420, 0x421, 0x422, 0x423, 0x424, 0x425,
0x426, 0x427, 0x428, 0x429, 0x42a, 0x42b, 0x42c, 0x42d, 0x42e, 0x42f,
0x430, 0x431, 0x432, 0x433, 0x434, 0x435, 0x436, 0x437, 0x438, 0x439,
0x43a, 0x43b, 0x43c, 0x43d, 0x43e, 0x43f, 0x440, 0x441, 0x442, 0x443,
0x444, 0x445, 0x446, 0x447, 0x448, 0x449, 0x44a, 0x44b, 0x44c, 0x44d,
0x44e, 0x44f, 0x450, 0x451, 0x452, 0x453, 0x454, 0x455, 0x456, 0x457,
0x458, 0x459, 0x45a, 0x45b, 0x45c, 0x45d, 0x45e, 0x45f, 0x460, 0x461,
0x462, 0x463, 0x464, 0x465, 0x466, 0x467, 0x468, 0x469, 0x46a, 0x46b,
0x46c, 0x46d, 0x46e, 0x46f, 0x470, 0x471, 0x472, 0x473, 0x474, 0x475,
0x476, 0x477, 0x478, 0x479, 0x47a, 0x47b, 0x47c, 0x47d, 0x47e, 0x47f,
0x480, 0x481, 0x482, 0x483, 0x484, 0x485, 0x486, 0x487, 0x488, 0x489,
0x48a, 0x48b, 0x48c, 0x48d, 0x48e, 0x48f, 0x490, 0x491, 0x492, 0x493,
0x494, 0x495, 0x496, 0x497, 0x498, 0x499, 0x49a, 0x49b, 0x49c, 0x49d,
0x49e, 0x49f, 0x4a0, 0x4a1, 0x4a2, 0x4a3, 0x4a4, 0x4a5, 0x4a6, 0x4a7,
0x4a8, 0x4a9, 0x4aa, 0x4ab, 0x4ac, 0x4ad, 0x4ae, 0x4af, 0x4b0, 0x4b1,
0x4b2, 0x4b3, 0x4b4, 0x4b5, 0x4b6, 0x4b7, 0x4b8, 0x4b9, 0x4ba, 0x4bb,
0x4bc, 0x4bd, 0x4be, 0x4bf, 0x4c0, 0x4c1, 0x4c2, 0x4c3, 0x4c4, 0x4c5,
0x4c6, 0x4c7, 0x4c8, 0x4c9, 0x4ca, 0x4cb, 0x4cc, 0x4cd, 0x4ce, 0x4cf,
0x4d0, 0x4d1, 0x4d2, 0x4d3, 0x4d4, 0x4d5, 0x4d6, 0x4d7, 0x4d8, 0x4d9,
0x4da, 0x4db, 0x4dc, 0x4dd, 0x4de, 0x4df, 0x4e0, 0x4e1, 0x4e2, 0x4e3,
0x4e4, 0x4e5, 0x4e6, 0x4e7, 0x4e8, 0x4e9, 0x4ea, 0x4eb, 0x4ec, 0x4ed,
0x4ee, 0x4ef, 0x4f0, 0x4f1, 0x4f2, 0x4f3, 0x4f4, 0x4f5, 0x4f6, 0x4f7,
0x4f8, 0x4f9, 0x4fa, 0x4fb, 0x4fc, 0x4fd, 0x4fe, 0x4ff, 0x500, 0x501,
0x502, 0x503, 0x504, 0x505, 0x506, 0x507, 0x508, 0x509, 0x50a, 0x50b,
0x50c, 0x50d, 0x50e, 0x50f, 0x510, 0x511, 0x512, 0x513, 0x514, 0x515,
0x516, 0x517, 0x518, 0x519, 0x51a, 0x51b, 0x51c, 0x51d, 0x51e, 0x51f,
0x520, 0x521, 0x522, 0x523, 0x524, 0x525, 0x531, 0x532, 0x533, 0x534,
0x535, 0x536, 0x537, 0x538, 0x539, 0x53a, 0x53b, 0x53c, 0x53d, 0x53e,
0x53f, 0x540, 0x541, 0x542, 0x543, 0x544, 0x545, 0x546, 0x547, 0x548,
0x549, 0x54a, 0x54b, 0x54c, 0x54d, 0x54e, 0x54f, 0x550, 0x551, 0x552,
0x553, 0x554, 0x555, 0x556, 0x559, 0x55a, 0x55b, 0x55c, 0x55d, 0x55e,
0x55f, 0x561, 0x562, 0x563, 0x564, 0x565, 0x566, 0x567, 0x568, 0x569,
0x56a, 0x56b, 0x56c, 0x56d, 0x56e, 0x56f, 0x570, 0x571, 0x572, 0x573,
0x574, 0x575, 0x576, 0x577, 0x578, 0x579, 0x57a, 0x57b, 0x57c, 0x57d,
0x57e, 0x57f, 0x580, 0x581, 0x582, 0x583, 0x584, 0x585, 0x586, 0x587,
0x589, 0x58a, 0x5b0, 0x5b1, 0x5b2, 0x5b3, 0x5b4, 0x5b5, 0x5b6, 0x5b7,
0x5b8, 0x5b9, 0x5ba, 0x5bb, 0x5bc, 0x5bd, 0x5be, 0x5bf, 0x5c0, 0x5c1,
0x5c2, 0x5c3, 0x5c6, 0x5c7, 0x5d0, 0x5d1, 0x5d2, 0x5d3, 0x5d4, 0x5d5,
0x5d6, 0x5d7, 0x5d8, 0x5d9, 0x5da, 0x5db, 0x5dc, 0x5dd, 0x5de, 0x5df,
0x5e0, 0x5e1, 0x5e2, 0x5e3, 0x5e4, 0x5e5, 0x5e6, 0x5e7, 0x5e8, 0x5e9,
0x5ea, 0x5f0, 0x5f1, 0x5f2, 0x5f3, 0x5f4, 0x606, 0x607, 0x609, 0x60a,
0x60c, 0x615, 0x61b, 0x61f, 0x621, 0x622, 0x623, 0x624, 0x625, 0x626,
0x627, 0x628, 0x629, 0x62a, 0x62b, 0x62c, 0x62d, 0x62e, 0x62f, 0x630,
0x631, 0x632, 0x633, 0x634, 0x635, 0x636, 0x637, 0x638, 0x639, 0x63a,
0x640, 0x641, 0x642, 0x643, 0x644, 0x645, 0x646, 0x647, 0x648, 0x649,
0x64a, 0x64b, 0x64c, 0x64d, 0x64e, 0x64f, 0x650, 0x651, 0x652, 0x653,
0x654, 0x655, 0x657, 0x65a, 0x660, 0x661, 0x662, 0x663, 0x664, 0x665,
0x666, 0x667, 0x668, 0x669, 0x66a, 0x66b, 0x66c, 0x66d, 0x66e, 0x66f,
0x670, 0x674, 0x679, 0x67a, 0x67b, 0x67c, 0x67d, 0x67e, 0x67f, 0x680,
0x681, 0x682, 0x683, 0x684, 0x685, 0x686, 0x687, 0x688, 0x689, 0x68a,
0x68b, 0x68c, 0x68d, 0x68e, 0x68f, 0x690, 0x691, 0x692, 0x693, 0x694,
0x695, 0x696, 0x697, 0x698, 0x699, 0x69a, 0x69b, 0x69c, 0x69d, 0x69e,
0x69f, 0x6a0, 0x6a1, 0x6a2, 0x6a3, 0x6a4, 0x6a5, 0x6a6, 0x6a7, 0x6a8,
0x6a9, 0x6aa, 0x6ab, 0x6ac, 0x6ad, 0x6ae, 0x6af, 0x6b0, 0x6b1, 0x6b2,
0x6b3, 0x6b4, 0x6b5, 0x6b6, 0x6b7, 0x6b8, 0x6b9, 0x6ba, 0x6bb, 0x6bc,
0x6bd, 0x6be, 0x6bf, 0x6c6, 0x6cc, 0x6ce, 0x6d5, 0x6f0, 0x6f1, 0x6f2,
0x6f3, 0x6f4, 0x6f5, 0x6f6, 0x6f7, 0x6f8, 0x6f9, 0x7c0, 0x7c1, 0x7c2,
0x7c3, 0x7c4, 0x7c5, 0x7c6, 0x7c7, 0x7c8, 0x7c9, 0x7ca, 0x7cb, 0x7cc,
0x7cd, 0x7ce, 0x7cf, 0x7d0, 0x7d1, 0x7d2, 0x7d3, 0x7d4, 0x7d5, 0x7d6,
0x7d7, 0x7d8, 0x7d9, 0x7da, 0x7db, 0x7dc, 0x7dd, 0x7de, 0x7df, 0x7e0,
0x7e1, 0x7e2, 0x7e3, 0x7e4, 0x7e5, 0x7e6, 0x7e7, 0x7eb, 0x7ec, 0x7ed,
0x7ee, 0x7ef, 0x7f0, 0x7f1, 0x7f2, 0x7f3, 0x7f4, 0x7f5, 0x7f8, 0x7f9,
0x7fa, 0xe3f, 0xe81, 0xe82, 0xe84, 0xe87, 0xe88, 0xe8a, 0xe8d, 0xe94,
0xe95, 0xe96, 0xe97, 0xe99, 0xe9a, 0xe9b, 0xe9c, 0xe9d, 0xe9e, 0xe9f,
0xea1, 0xea2, 0xea3, 0xea5, 0xea7, 0xeaa, 0xeab, 0xead, 0xeae, 0xeaf,
0xeb0, 0xeb1, 0xeb2, 0xeb3, 0xeb4, 0xeb5, 0xeb6, 0xeb7, 0xeb8, 0xeb9,
0xebb, 0xebc, 0xebd, 0xec0, 0xec1, 0xec2, 0xec3, 0xec4, 0xec6, 0xec8,
0xec9, 0xeca, 0xecb, 0xecc, 0xecd, 0xed0, 0xed1, 0xed2, 0xed3, 0xed4,
0xed5, 0xed6, 0xed7, 0xed8, 0xed9, 0xedc, 0xedd, 0x10a0, 0x10a1, 0x10a2,
0x10a3, 0x10a4, 0x10a5, 0x10a6, 0x10a7, 0x10a8, 0x10a9, 0x10aa, 0x10ab,
0x10ac, 0x10ad, 0x10ae, 0x10af, 0x10b0, 0x10b1, 0x10b2, 0x10b3, 0x10b4,
0x10b5, 0x10b6, 0x10b7, 0x10b8, 0x10b9, 0x10ba, 0x10bb, 0x10bc, 0x10bd,
0x10be, 0x10bf, 0x10c0, 0x10c1, 0x10c2, 0x10c3, 0x10c4, 0x10c5, 0x10d0,
0x10d1, 0x10d2, 0x10d3, 0x10d4, 0x10d5, 0x10d6, 0x10d7, 0x10d8, 0x10d9,
0x10da, 0x10db, 0x10dc, 0x10dd, 0x10de, 0x10df, 0x10e0, 0x10e1, 0x10e2,
0x10e3, 0x10e4, 0x10e5, 0x10e6, 0x10e7, 0x10e8, 0x10e9, 0x10ea, 0x10eb,
0x10ec, 0x10ed, 0x10ee, 0x10ef, 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4,
0x10f5, 0x10f6, 0x10f7, 0x10f8, 0x10f9, 0x10fa, 0x10fb, 0x10fc, 0x1401,
0x1402, 0x1403, 0x1404, 0x1405, 0x1406, 0x1407, 0x1409, 0x140a, 0x140b,
0x140c, 0x140d, 0x140e, 0x140f, 0x1410, 0x1411, 0x1412, 0x1413, 0x1414,
0x1415, 0x1416, 0x1417, 0x1418, 0x1419, 0x141a, 0x141b, 0x141d, 0x141e,
0x141f, 0x1420, 0x1421, 0x1422, 0x1423, 0x1424, 0x1425, 0x1426, 0x1427,
0x1428, 0x1429, 0x142a, 0x142b, 0x142c, 0x142d, 0x142e, 0x142f, 0x1430,
0x1431, 0x1432, 0x1433, 0x1434, 0x1435, 0x1437, 0x1438, 0x1439, 0x143a,
0x143b, 0x143c, 0x143d, 0x143e, 0x143f, 0x1440, 0x1441, 0x1442, 0x1443,
0x1444, 0x1445, 0x1446, 0x1447, 0x1448, 0x1449, 0x144a, 0x144c, 0x144d,
0x144e, 0x144f, 0x1450, 0x1451, 0x1452, 0x1454, 0x1455, 0x1456, 0x1457,
0x1458, 0x1459, 0x145a, 0x145b, 0x145c, 0x145d, 0x145e, 0x145f, 0x1460,
0x1461, 0x1462, 0x1463, 0x1464, 0x1465, 0x1466, 0x1467, 0x1468, 0x1469,
0x146a, 0x146b, 0x146c, 0x146d, 0x146e, 0x146f, 0x1470, 0x1471, 0x1472,
0x1473, 0x1474, 0x1475, 0x1476, 0x1477, 0x1478, 0x1479, 0x147a, 0x147b,
0x147c, 0x147d, 0x147e, 0x147f, 0x1480, 0x1481, 0x1482, 0x1483, 0x1484,
0x1485, 0x1486, 0x1487, 0x1488, 0x1489, 0x148a, 0x148b, 0x148c, 0x148d,
0x148e, 0x148f, 0x1490, 0x1491, 0x1492, 0x1493, 0x1494, 0x1495, 0x1496,
0x1497, 0x1498, 0x1499, 0x149a, 0x149b, 0x149c, 0x149d, 0x149e, 0x149f,
0x14a0, 0x14a1, 0x14a2, 0x14a3, 0x14a4, 0x14a5, 0x14a6, 0x14a7, 0x14a8,
0x14a9, 0x14aa, 0x14ab, 0x14ac, 0x14ad, 0x14ae, 0x14af, 0x14b0, 0x14b1,
0x14b2, 0x14b3, 0x14b4, 0x14b5, 0x14b6, 0x14b7, 0x14b8, 0x14b9, 0x14ba,
0x14bb, 0x14bc, 0x14bd, 0x14c0, 0x14c1, 0x14c2, 0x14c3, 0x14c4, 0x14c5,
0x14c6, 0x14c7, 0x14c8, 0x14c9, 0x14ca, 0x14cb, 0x14cc, 0x14cd, 0x14ce,
0x14cf, 0x14d0, 0x14d1, 0x14d2, 0x14d3, 0x14d4, 0x14d5, 0x14d6, 0x14d7,
0x14d8, 0x14d9, 0x14da, 0x14db, 0x14dc, 0x14dd, 0x14de, 0x14df, 0x14e0,
0x14e1, 0x14e2, 0x14e3, 0x14e4, 0x14e5, 0x14e6, 0x14e7, 0x14e8, 0x14e9,
0x14ea, 0x14ec, 0x14ed, 0x14ee, 0x14ef, 0x14f0, 0x14f1, 0x14f2, 0x14f3,
0x14f4, 0x14f5, 0x14f6, 0x14f7, 0x14f8, 0x14f9, 0x14fa, 0x14fb, 0x14fc,
0x14fd, 0x14fe, 0x14ff, 0x1500, 0x1501, 0x1502, 0x1503, 0x1504, 0x1505,
0x1506, 0x1507, 0x1510, 0x1511, 0x1512, 0x1513, 0x1514, 0x1515, 0x1516,
0x1517, 0x1518, 0x1519, 0x151a, 0x151b, 0x151c, 0x151d, 0x151e, 0x151f,
0x1520, 0x1521, 0x1522, 0x1523, 0x1524, 0x1525, 0x1526, 0x1527, 0x1528,
0x1529, 0x152a, 0x152b, 0x152c, 0x152d, 0x152e, 0x152f, 0x1530, 0x1531,
0x1532, 0x1533, 0x1534, 0x1535, 0x1536, 0x1537, 0x1538, 0x1539, 0x153a,
0x153b, 0x153c, 0x153d, 0x153e, 0x1540, 0x1541, 0x1542, 0x1543, 0x1544,
0x1545, 0x1546, 0x1547, 0x1548, 0x1549, 0x154a, 0x154b, 0x154c, 0x154d,
0x154e, 0x154f, 0x1550, 0x1552, 0x1553, 0x1554, 0x1555, 0x1556, 0x1557,
0x1558, 0x1559, 0x155a, 0x155b, 0x155c, 0x155d, 0x155e, 0x155f, 0x1560,
0x1561, 0x1562, 0x1563, 0x1564, 0x1565, 0x1566, 0x1567, 0x1568, 0x1569,
0x156a, 0x1574, 0x1575, 0x1576, 0x1577, 0x1578, 0x1579, 0x157a, 0x157b,
0x157c, 0x157d, 0x157e, 0x157f, 0x1580, 0x1581, 0x1582, 0x1583, 0x1584,
0x1585, 0x158a, 0x158b, 0x158c, 0x158d, 0x158e, 0x158f, 0x1590, 0x1591,
0x1592, 0x1593, 0x1594, 0x1595, 0x1596, 0x15a0, 0x15a1, 0x15a2, 0x15a3,
0x15a4, 0x15a5, 0x15a6, 0x15a7, 0x15a8, 0x15a9, 0x15aa, 0x15ab, 0x15ac,
0x15ad, 0x15ae, 0x15af, 0x15de, 0x15e1, 0x1646, 0x1647, 0x166e, 0x166f,
0x1670, 0x1671, 0x1672, 0x1673, 0x1674, 0x1675, 0x1676, 0x1680, 0x1681,
0x1682, 0x1683, 0x1684, 0x1685, 0x1686, 0x1687, 0x1688, 0x1689, 0x168a,
0x168b, 0x168c, 0x168d, 0x168e, 0x168f, 0x1690, 0x1691, 0x1692, 0x1693,
0x1694, 0x1695, 0x1696, 0x1697, 0x1698, 0x1699, 0x169a, 0x169b, 0x169c,
0x1d00, 0x1d01, 0x1d02, 0x1d03, 0x1d04, 0x1d05, 0x1d06, 0x1d07, 0x1d08,
0x1d09, 0x1d0a, 0x1d0b, 0x1d0c, 0x1d0d, 0x1d0e, 0x1d0f, 0x1d10, 0x1d11,
0x1d12, 0x1d13, 0x1d14, 0x1d16, 0x1d17, 0x1d18, 0x1d19, 0x1d1a, 0x1d1b,
0x1d1c, 0x1d1d, 0x1d1e, 0x1d1f, 0x1d20, 0x1d21, 0x1d22, 0x1d23, 0x1d26,
0x1d27, 0x1d28, 0x1d29, 0x1d2a, 0x1d2b, 0x1d2c, 0x1d2d, 0x1d2e, 0x1d30,
0x1d31, 0x1d32, 0x1d33, 0x1d34, 0x1d35, 0x1d36, 0x1d37, 0x1d38, 0x1d39,
0x1d3a, 0x1d3b, 0x1d3c, 0x1d3d, 0x1d3e, 0x1d3f, 0x1d40, 0x1d41, 0x1d42,
0x1d43, 0x1d44, 0x1d45, 0x1d46, 0x1d47, 0x1d48, 0x1d49, 0x1d4a, 0x1d4b,
0x1d4c, 0x1d4d, 0x1d4e, 0x1d4f, 0x1d50, 0x1d51, 0x1d52, 0x1d53, 0x1d54,
0x1d55, 0x1d56, 0x1d57, 0x1d58, 0x1d59, 0x1d5a, 0x1d5b, 0x1d5d, 0x1d5e,
0x1d5f, 0x1d60, 0x1d61, 0x1d62, 0x1d63, 0x1d64, 0x1d65, 0x1d66, 0x1d67,
0x1d68, 0x1d69, 0x1d6a, 0x1d77, 0x1d78, 0x1d7b, 0x1d7d, 0x1d85, 0x1d9b,
0x1d9c, 0x1d9d, 0x1d9e, 0x1d9f, 0x1da0, 0x1da1, 0x1da2, 0x1da3, 0x1da4,
0x1da5, 0x1da6, 0x1da7, 0x1da8, 0x1da9, 0x1daa, 0x1dab, 0x1dac, 0x1dad,
0x1dae, 0x1daf, 0x1db0, 0x1db1, 0x1db2, 0x1db3, 0x1db4, 0x1db5, 0x1db6,
0x1db7, 0x1db8, 0x1db9, 0x1dba, 0x1dbb, 0x1dbc, 0x1dbd, 0x1dbe, 0x1dbf,
0x1dc4, 0x1dc5, 0x1dc6, 0x1dc7, 0x1dc8, 0x1dc9, 0x1e00, 0x1e01, 0x1e02,
0x1e03, 0x1e04, 0x1e05, 0x1e06, 0x1e07, 0x1e08, 0x1e09, 0x1e0a, 0x1e0b,
0x1e0c, 0x1e0d, 0x1e0e, 0x1e0f, 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14,
0x1e15, 0x1e16, 0x1e17, 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d,
0x1e1e, 0x1e1f, 0x1e20, 0x1e21, 0x1e22, 0x1e23, 0x1e24, 0x1e25, 0x1e26,
0x1e27, 0x1e28, 0x1e29, 0x1e2a, 0x1e2b, 0x1e2c, 0x1e2d, 0x1e2e, 0x1e2f,
0x1e30, 0x1e31, 0x1e32, 0x1e33, 0x1e34, 0x1e35, 0x1e36, 0x1e37, 0x1e38,
0x1e39, 0x1e3a, 0x1e3b, 0x1e3c, 0x1e3d, 0x1e3e, 0x1e3f, 0x1e40, 0x1e41,
0x1e42, 0x1e43, 0x1e44, 0x1e45, 0x1e46, 0x1e47, 0x1e48, 0x1e49, 0x1e4a,
0x1e4b, 0x1e4c, 0x1e4d, 0x1e4e, 0x1e4f, 0x1e50, 0x1e51, 0x1e52, 0x1e53,
0x1e54, 0x1e55, 0x1e56, 0x1e57, 0x1e58, 0x1e59, 0x1e5a, 0x1e5b, 0x1e5c,
0x1e5d, 0x1e5e, 0x1e5f, 0x1e60, 0x1e61, 0x1e62, 0x1e63, 0x1e64, 0x1e65,
0x1e66, 0x1e67, 0x1e68, 0x1e69, 0x1e6a, 0x1e6b, 0x1e6c, 0x1e6d, 0x1e6e,
0x1e6f, 0x1e70, 0x1e71, 0x1e72, 0x1e73, 0x1e74, 0x1e75, 0x1e76, 0x1e77,
0x1e78, 0x1e79, 0x1e7a, 0x1e7b, 0x1e7c, 0x1e7d, 0x1e7e, 0x1e7f, 0x1e80,
0x1e81, 0x1e82, 0x1e83, 0x1e84, 0x1e85, 0x1e86, 0x1e87, 0x1e88, 0x1e89,
0x1e8a, 0x1e8b, 0x1e8c, 0x1e8d, 0x1e8e, 0x1e8f, 0x1e90, 0x1e91, 0x1e92,
0x1e93, 0x1e94, 0x1e95, 0x1e96, 0x1e97, 0x1e98, 0x1e99, 0x1e9a, 0x1e9b,
0x1e9c, 0x1e9d, 0x1e9e, 0x1e9f, 0x1ea0, 0x1ea1, 0x1ea2, 0x1ea3, 0x1ea4,
0x1ea5, 0x1ea6, 0x1ea7, 0x1ea8, 0x1ea9, 0x1eaa, 0x1eab, 0x1eac, 0x1ead,
0x1eae, 0x1eaf, 0x1eb0, 0x1eb1, 0x1eb2, 0x1eb3, 0x1eb4, 0x1eb5, 0x1eb6,
0x1eb7, 0x1eb8, 0x1eb9, 0x1eba, 0x1ebb, 0x1ebc, 0x1ebd, 0x1ebe, 0x1ebf,
0x1ec0, 0x1ec1, 0x1ec2, 0x1ec3, 0x1ec4, 0x1ec5, 0x1ec6, 0x1ec7, 0x1ec8,
0x1ec9, 0x1eca, 0x1ecb, 0x1ecc, 0x1ecd, 0x1ece, 0x1ecf, 0x1ed0, 0x1ed1,
0x1ed2, 0x1ed3, 0x1ed4, 0x1ed5, 0x1ed6, 0x1ed7, 0x1ed8, 0x1ed9, 0x1eda,
0x1edb, 0x1edc, 0x1edd, 0x1ede, 0x1edf, 0x1ee0, 0x1ee1, 0x1ee2, 0x1ee3,
0x1ee4, 0x1ee5, 0x1ee6, 0x1ee7, 0x1ee8, 0x1ee9, 0x1eea, 0x1eeb, 0x1eec,
0x1eed, 0x1eee, 0x1eef, 0x1ef0, 0x1ef1, 0x1ef2, 0x1ef3, 0x1ef4, 0x1ef5,
0x1ef6, 0x1ef7, 0x1ef8, 0x1ef9, 0x1efa, 0x1efb, 0x1f00, 0x1f01, 0x1f02,
0x1f03, 0x1f04, 0x1f05, 0x1f06, 0x1f07, 0x1f08, 0x1f09, 0x1f0a, 0x1f0b,
0x1f0c, 0x1f0d, 0x1f0e, 0x1f0f, 0x1f10, 0x1f11, 0x1f12, 0x1f13, 0x1f14,
0x1f15, 0x1f18, 0x1f19, 0x1f1a, 0x1f1b, 0x1f1c, 0x1f1d, 0x1f20, 0x1f21,
0x1f22, 0x1f23, 0x1f24, 0x1f25, 0x1f26, 0x1f27, 0x1f28, 0x1f29, 0x1f2a,
0x1f2b, 0x1f2c, 0x1f2d, 0x1f2e, 0x1f2f, 0x1f30, 0x1f31, 0x1f32, 0x1f33,
0x1f34, 0x1f35, 0x1f36, 0x1f37, 0x1f38, 0x1f39, 0x1f3a, 0x1f3b, 0x1f3c,
0x1f3d, 0x1f3e, 0x1f3f, 0x1f40, 0x1f41, 0x1f42, 0x1f43, 0x1f44, 0x1f45,
0x1f48, 0x1f49, 0x1f4a, 0x1f4b, 0x1f4c, 0x1f4d, 0x1f50, 0x1f51, 0x1f52,
0x1f53, 0x1f54, 0x1f55, 0x1f56, 0x1f57, 0x1f59, 0x1f5b, 0x1f5d, 0x1f5f,
0x1f60, 0x1f61, 0x1f62, 0x1f63, 0x1f64, 0x1f65, 0x1f66, 0x1f67, 0x1f68,
0x1f69, 0x1f6a, 0x1f6b, 0x1f6c, 0x1f6d, 0x1f6e, 0x1f6f, 0x1f70, 0x1f71,
0x1f72, 0x1f73, 0x1f74, 0x1f75, 0x1f76, 0x1f77, 0x1f78, 0x1f79, 0x1f7a,
0x1f7b, 0x1f7c, 0x1f7d, 0x1f80, 0x1f81, 0x1f82, 0x1f83, 0x1f84, 0x1f85,
0x1f86, 0x1f87, 0x1f88, 0x1f89, 0x1f8a, 0x1f8b, 0x1f8c, 0x1f8d, 0x1f8e,
0x1f8f, 0x1f90, 0x1f91, 0x1f92, 0x1f93, 0x1f94, 0x1f95, 0x1f96, 0x1f97,
0x1f98, 0x1f99, 0x1f9a, 0x1f9b, 0x1f9c, 0x1f9d, 0x1f9e, 0x1f9f, 0x1fa0,
0x1fa1, 0x1fa2, 0x1fa3, 0x1fa4, 0x1fa5, 0x1fa6, 0x1fa7, 0x1fa8, 0x1fa9,
0x1faa, 0x1fab, 0x1fac, 0x1fad, 0x1fae, 0x1faf, 0x1fb0, 0x1fb1, 0x1fb2,
0x1fb3, 0x1fb4, 0x1fb6, 0x1fb7, 0x1fb8, 0x1fb9, 0x1fba, 0x1fbb, 0x1fbc,
0x1fbd, 0x1fbe, 0x1fbf, 0x1fc0, 0x1fc1, 0x1fc2, 0x1fc3, 0x1fc4, 0x1fc6,
0x1fc7, 0x1fc8, 0x1fc9, 0x1fca, 0x1fcb, 0x1fcc, 0x1fcd, 0x1fce, 0x1fcf,
0x1fd0, 0x1fd1, 0x1fd2, 0x1fd3, 0x1fd6, 0x1fd7, 0x1fd8, 0x1fd9, 0x1fda,
0x1fdb, 0x1fdd, 0x1fde, 0x1fdf, 0x1fe0, 0x1fe1, 0x1fe2, 0x1fe3, 0x1fe4,
0x1fe5, 0x1fe6, 0x1fe7, 0x1fe8, 0x1fe9, 0x1fea, 0x1feb, 0x1fec, 0x1fed,
0x1fee, 0x1fef, 0x1ff2, 0x1ff3, 0x1ff4, 0x1ff6, 0x1ff7, 0x1ff8, 0x1ff9,
0x1ffa, 0x1ffb, 0x1ffc, 0x1ffd, 0x1ffe, 0x2000, 0x2001, 0x2002, 0x2003,
0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200a, 0x200b, 0x200c,
0x200d, 0x200e, 0x200f, 0x2010, 0x2011, 0x2012, 0x2013, 0x2015, 0x2015,
0x2016, 0x2017, 0x2018, 0x2019, 0x201a, 0x201b, 0x201c, 0x201d, 0x201e,
0x201f, 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027,
0x2028, 0x2029, 0x202a, 0x202b, 0x202c, 0x202d, 0x202e, 0x202f, 0x2030,
0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037, 0x2038, 0x2039,
0x203a, 0x203b, 0x203c, 0x203d, 0x203e, 0x203f, 0x2040, 0x2041, 0x2042,
0x2043, 0x2044, 0x2045, 0x2046, 0x2047, 0x2048, 0x2049, 0x204a, 0x204b,
0x204c, 0x204d, 0x204e, 0x204f, 0x2050, 0x2051, 0x2052, 0x2053, 0x2054,
0x2055, 0x2056, 0x2057, 0x2058, 0x2059, 0x205a, 0x205b, 0x205c, 0x205d,
0x205e, 0x205f, 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x206a, 0x206b,
0x206c, 0x206d, 0x206e, 0x206f, 0x2070, 0x2071, 0x2074, 0x2075, 0x2076,
0x2077, 0x2078, 0x2079, 0x207a, 0x207b, 0x207c, 0x207d, 0x207e, 0x207f,
0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087, 0x2088,
0x2089, 0x208a, 0x208b, 0x208c, 0x208d, 0x208e, 0x2090, 0x2091, 0x2092,
0x2093, 0x2094, 0x2095, 0x2096, 0x2097, 0x2098, 0x2099, 0x209a, 0x209b,
0x209c, 0x20a0, 0x20a1, 0x20a2, 0x20a3, 0x20a4, 0x20a5, 0x20a6, 0x20a7,
0x20a8, 0x20a9, 0x20aa, 0x20ab, 0x20ac, 0x20ad, 0x20ae, 0x20af, 0x20b0,
0x20b1, 0x20b2, 0x20b3, 0x20b4, 0x20b5, 0x20b8, 0x20b9, 0x20ba, 0x20d0,
0x20d1, 0x20d6, 0x20d7, 0x20db, 0x20dc, 0x20e1, 0x2100, 0x2101, 0x2102,
0x2103, 0x2104, 0x2105, 0x2106, 0x2107, 0x2108, 0x2109, 0x210b, 0x210c,
0x210d, 0x210e, 0x210f, 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115,
0x2116, 0x2117, 0x2118, 0x2119, 0x211a, 0x211b, 0x211c, 0x211d, 0x211e,
0x211f, 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127,
0x2128, 0x2129, 0x212a, 0x212b, 0x212c, 0x212d, 0x212e, 0x212f, 0x2130,
0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137, 0x2138, 0x2139,
0x213a, 0x213b, 0x213c, 0x213d, 0x213e, 0x213f, 0x2140, 0x2141, 0x2142,
0x2143, 0x2144, 0x2145, 0x2146, 0x2147, 0x2148, 0x2149, 0x214b, 0x214e,
0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157, 0x2158,
0x2159, 0x215a, 0x215b, 0x215c, 0x215d, 0x215e, 0x215f, 0x2160, 0x2161,
0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167, 0x2168, 0x2169, 0x216a,
0x216b, 0x216c, 0x216d, 0x216e, 0x216f, 0x2170, 0x2171, 0x2172, 0x2173,
0x2174, 0x2175, 0x2176, 0x2177, 0x2178, 0x2179, 0x217a, 0x217b, 0x217c,
0x217d, 0x217e, 0x217f, 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185,
0x2189, 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197,
0x2198, 0x2199, 0x219a, 0x219b, 0x219c, 0x219d, 0x219e, 0x219f, 0x21a0,
0x21a1, 0x21a2, 0x21a3, 0x21a4, 0x21a5, 0x21a6, 0x21a7, 0x21a8, 0x21a9,
0x21aa, 0x21ab, 0x21ac, 0x21ad, 0x21ae, 0x21af, 0x21b0, 0x21b1, 0x21b2,
0x21b3, 0x21b4, 0x21b5, 0x21b6, 0x21b7, 0x21b8, 0x21b9, 0x21ba, 0x21bb,
0x21bc, 0x21bd, 0x21be, 0x21bf, 0x21c0, 0x21c1, 0x21c2, 0x21c3, 0x21c4,
0x21c5, 0x21c6, 0x21c7, 0x21c8, 0x21c9, 0x21ca, 0x21cb, 0x21cc, 0x21cd,
0x21ce, 0x21cf, 0x21d0, 0x21d1, 0x21d2, 0x21d3, 0x21d4, 0x21d5, 0x21d6,
0x21d7, 0x21d8, 0x21d9, 0x21da, 0x21db, 0x21dc, 0x21dd, 0x21de, 0x21df,
0x21e0, 0x21e1, 0x21e2, 0x21e3, 0x21e4, 0x21e5, 0x21e6, 0x21e7, 0x21e8,
0x21e9, 0x21ea, 0x21eb, 0x21ec, 0x21ed, 0x21ee, 0x21ef, 0x21f0, 0x21f1,
0x21f2, 0x21f3, 0x21f4, 0x21f5, 0x21f6, 0x21f7, 0x21f8, 0x21f9, 0x21fa,
0x21fb, 0x21fc, 0x21fd, 0x21fe, 0x21ff, 0x2200, 0x2201, 0x2202, 0x2203,
0x2204, 0x2205, 0x2206, 0x2207, 0x2208, 0x2209, 0x220a, 0x220b, 0x220c,
0x220d, 0x220e, 0x220f, 0x2210, 0x2211, 0x2212, 0x2213, 0x2214, 0x2215,
0x2216, 0x2217, 0x2218, 0x2219, 0x221a, 0x221b, 0x221c, 0x221d, 0x221e,
0x221f, 0x2220, 0x2221, 0x2222, 0x2223, 0x2224, 0x2225, 0x2226, 0x2227,
0x2228, 0x2229, 0x222a, 0x222b, 0x222c, 0x222d, 0x222e, 0x222f, 0x2230,
0x2231, 0x2232, 0x2233, 0x2234, 0x2235, 0x2236, 0x2237, 0x2238, 0x2239,
0x223a, 0x223b, 0x223c, 0x223d, 0x223e, 0x223f, 0x2240, 0x2241, 0x2242,
0x2243, 0x2244, 0x2245, 0x2246, 0x2247, 0x2248, 0x2249, 0x224a, 0x224b,
0x224c, 0x224d, 0x224e, 0x224f, 0x2250, 0x2251, 0x2252, 0x2253, 0x2254,
0x2255, 0x2256, 0x2257, 0x2258, 0x2259, 0x225a, 0x225b, 0x225c, 0x225d,
0x225e, 0x225f, 0x2260, 0x2261, 0x2262, 0x2263, 0x2264, 0x2265, 0x2266,
0x2267, 0x2268, 0x2269, 0x226a, 0x226b, 0x226c, 0x226d, 0x226e, 0x226f,
0x2270, 0x2271, 0x2272, 0x2273, 0x2274, 0x2275, 0x2276, 0x2277, 0x2278,
0x2279, 0x227a, 0x227b, 0x227c, 0x227d, 0x227e, 0x227f, 0x2280, 0x2281,
0x2282, 0x2283, 0x2284, 0x2285, 0x2286, 0x2287, 0x2288, 0x2289, 0x228a,
0x228b, 0x228c, 0x228d, 0x228e, 0x228f, 0x2290, 0x2291, 0x2292, 0x2293,
0x2294, 0x2295, 0x2296, 0x2297, 0x2298, 0x2299, 0x229a, 0x229b, 0x229c,
0x229d, 0x229e, 0x229f, 0x22a0, 0x22a1, 0x22a2, 0x22a3, 0x22a4, 0x22a5,
0x22a6, 0x22a7, 0x22a8, 0x22a9, 0x22aa, 0x22ab, 0x22ac, 0x22ad, 0x22ae,
0x22af, 0x22b0, 0x22b1, 0x22b2, 0x22b3, 0x22b4, 0x22b5, 0x22b6, 0x22b7,
0x22b8, 0x22b9, 0x22ba, 0x22bb, 0x22bc, 0x22bd, 0x22be, 0x22bf, 0x22c0,
0x22c1, 0x22c2, 0x22c3, 0x22c4, 0x22c5, 0x22c6, 0x22c7, 0x22c8, 0x22c9,
0x22ca, 0x22cb, 0x22cc, 0x22cd, 0x22ce, 0x22cf, 0x22d0, 0x22d1, 0x22d2,
0x22d3, 0x22d4, 0x22d5, 0x22d6, 0x22d7, 0x22d8, 0x22d9, 0x22da, 0x22db,
0x22dc, 0x22dd, 0x22de, 0x22df, 0x22e0, 0x22e1, 0x22e2, 0x22e3, 0x22e4,
0x22e5, 0x22e6, 0x22e7, 0x22e8, 0x22e9, 0x22ea, 0x22eb, 0x22ec, 0x22ed,
0x22ee, 0x22ef, 0x22f0, 0x22f1, 0x22f2, 0x22f3, 0x22f4, 0x22f5, 0x22f6,
0x22f7, 0x22f8, 0x22f9, 0x22fa, 0x22fb, 0x22fc, 0x22fd, 0x22fe, 0x22ff,
0x2300, 0x2301, 0x2302, 0x2303, 0x2304, 0x2305, 0x2306, 0x2307, 0x2308,
0x2309, 0x230a, 0x230b, 0x230c, 0x230d, 0x230e, 0x230f, 0x2310, 0x2311,
0x2318, 0x2319, 0x231c, 0x231d, 0x231e, 0x231f, 0x2320, 0x2321, 0x2324,
0x2325, 0x2326, 0x2327, 0x2328, 0x232b, 0x232c, 0x2373, 0x2374, 0x2375,
0x237a, 0x237d, 0x2387, 0x2394, 0x239b, 0x239c, 0x239d, 0x239e, 0x239f,
0x23a0, 0x23a1, 0x23a2, 0x23a3, 0x23a4, 0x23a5, 0x23a6, 0x23a7, 0x23a8,
0x23a9, 0x23aa, 0x23ab, 0x23ac, 0x23ad, 0x23ae, 0x23ce, 0x23cf, 0x23e3,
0x23e5, 0x23e8, 0x2422, 0x2423, 0x2460, 0x2461, 0x2462, 0x2463, 0x2464,
0x2465, 0x2466, 0x2467, 0x2468, 0x2469, 0x2500, 0x2501, 0x2502, 0x2503,
0x2504, 0x2505, 0x2506, 0x2507, 0x2508, 0x2509, 0x250a, 0x250b, 0x250c,
0x250d, 0x250e, 0x250f, 0x2510, 0x2511, 0x2512, 0x2513, 0x2514, 0x2515,
0x2516, 0x2517, 0x2518, 0x2519, 0x251a, 0x251b, 0x251c, 0x251d, 0x251e,
0x251f, 0x2520, 0x2521, 0x2522, 0x2523, 0x2524, 0x2525, 0x2526, 0x2527,
0x2528, 0x2529, 0x252a, 0x252b, 0x252c, 0x252d, 0x252e, 0x252f, 0x2530,
0x2531, 0x2532, 0x2533, 0x2534, 0x2535, 0x2536, 0x2537, 0x2538, 0x2539,
0x253a, 0x253b, 0x253c, 0x253d, 0x253e, 0x253f, 0x2540, 0x2541, 0x2542,
0x2543, 0x2544, 0x2545, 0x2546, 0x2547, 0x2548, 0x2549, 0x254a, 0x254b,
0x254c, 0x254d, 0x254e, 0x254f, 0x2550, 0x2551, 0x2552, 0x2553, 0x2554,
0x2555, 0x2556, 0x2557, 0x2558, 0x2559, 0x255a, 0x255b, 0x255c, 0x255d,
0x255e, 0x255f, 0x2560, 0x2561, 0x2562, 0x2563, 0x2564, 0x2565, 0x2566,
0x2567, 0x2568, 0x2569, 0x256a, 0x256b, 0x256c, 0x256d, 0x256e, 0x256f,
0x2570, 0x2571, 0x2572, 0x2573, 0x2574, 0x2575, 0x2576, 0x2577, 0x2578,
0x2579, 0x257a, 0x257b, 0x257c, 0x257d, 0x257e, 0x257f, 0x2580, 0x2581,
0x2582, 0x2583, 0x2584, 0x2585, 0x2586, 0x2587, 0x2588, 0x2589, 0x258a,
0x258b, 0x258c, 0x258d, 0x258e, 0x258f, 0x2590, 0x2591, 0x2592, 0x2593,
0x2594, 0x2595, 0x2596, 0x2597, 0x2598, 0x2599, 0x259a, 0x259b, 0x259c,
0x259d, 0x259e, 0x259f, 0x25a0, 0x25a1, 0x25a2, 0x25a3, 0x25a4, 0x25a5,
0x25a6, 0x25a7, 0x25a8, 0x25a9, 0x25aa, 0x25ab, 0x25ac, 0x25ad, 0x25ae,
0x25af, 0x25b0, 0x25b1, 0x25b2, 0x25b3, 0x25b4, 0x25b5, 0x25b6, 0x25b7,
0x25b8, 0x25b9, 0x25ba, 0x25bb, 0x25bc, 0x25bd, 0x25be, 0x25bf, 0x25c0,
0x25c1, 0x25c2, 0x25c3, 0x25c4, 0x25c5, 0x25c6, 0x25c7, 0x25c8, 0x25c9,
0x25ca, 0x25cb, 0x25cc, 0x25cd, 0x25ce, 0x25cf, 0x25d0, 0x25d1, 0x25d2,
0x25d3, 0x25d4, 0x25d5, 0x25d6, 0x25d7, 0x25d8, 0x25d9, 0x25da, 0x25db,
0x25dc, 0x25dd, 0x25de, 0x25df, 0x25e0, 0x25e1, 0x25e2, 0x25e3, 0x25e4,
0x25e5, 0x25e6, 0x25e7, 0x25e8, 0x25e9, 0x25ea, 0x25eb, 0x25ec, 0x25ed,
0x25ee, 0x25ef, 0x25f0, 0x25f1, 0x25f2, 0x25f3, 0x25f4, 0x25f5, 0x25f6,
0x25f7, 0x25f8, 0x25f9, 0x25fa, 0x25fb, 0x25fc, 0x25fd, 0x25fe, 0x25ff,
0x2600, 0x2601, 0x2602, 0x2603, 0x2604, 0x2605, 0x2606, 0x2607, 0x2608,
0x2609, 0x260a, 0x260b, 0x260c, 0x260d, 0x260e, 0x260f, 0x2610, 0x2611,
0x2612, 0x2613, 0x2614, 0x2615, 0x2616, 0x2617, 0x2618, 0x2619, 0x261a,
0x261b, 0x261c, 0x261d, 0x261e, 0x261f, 0x2620, 0x2621, 0x2622, 0x2623,
0x2624, 0x2625, 0x2626, 0x2627, 0x2628, 0x2629, 0x262a, 0x262b, 0x262c,
0x262d, 0x262e, 0x262f, 0x2630, 0x2631, 0x2632, 0x2633, 0x2634, 0x2635,
0x2636, 0x2637, 0x2638, 0x2639, 0x263a, 0x263b, 0x263c, 0x263d, 0x263e,
0x263f, 0x2640, 0x2641, 0x2642, 0x2643, 0x2644, 0x2645, 0x2646, 0x2647,
0x2648, 0x2649, 0x264a, 0x264b, 0x264c, 0x264d, 0x264e, 0x264f, 0x2650,
0x2651, 0x2652, 0x2653, 0x2654, 0x2655, 0x2656, 0x2657, 0x2658, 0x2659,
0x265a, 0x265b, 0x265c, 0x265d, 0x265e, 0x265f, 0x2660, 0x2661, 0x2662,
0x2663, 0x2664, 0x2665, 0x2666, 0x2667, 0x2668, 0x2669, 0x266a, 0x266b,
0x266c, 0x266d, 0x266e, 0x266f, 0x2670, 0x2671, 0x2672, 0x2673, 0x2674,
0x2675, 0x2676, 0x2677, 0x2678, 0x2679, 0x267a, 0x267b, 0x267c, 0x267d,
0x267e, 0x267f, 0x2680, 0x2681, 0x2682, 0x2683, 0x2684, 0x2685, 0x2686,
0x2687, 0x2688, 0x2689, 0x268a, 0x268b, 0x268c, 0x268d, 0x268e, 0x268f,
0x2690, 0x2691, 0x2692, 0x2693, 0x2694, 0x2695, 0x2696, 0x2697, 0x2698,
0x2699, 0x269a, 0x269b, 0x269c, 0x26a0, 0x26a1, 0x26a2, 0x26a3, 0x26a4,
0x26a5, 0x26a6, 0x26a7, 0x26a8, 0x26a9, 0x26aa, 0x26ab, 0x26ac, 0x26ad,
0x26ae, 0x26af, 0x26b0, 0x26b1, 0x26b2, 0x26b3, 0x26b4, 0x26b5, 0x26b6,
0x26b7, 0x26b8, 0x26c0, 0x26c1, 0x26c2, 0x26c3, 0x26e2, 0x2701, 0x2702,
0x2703, 0x2704, 0x2706, 0x2707, 0x2708, 0x2709, 0x270c, 0x270d, 0x270e,
0x270f, 0x2710, 0x2711, 0x2712, 0x2713, 0x2714, 0x2715, 0x2716, 0x2717,
0x2718, 0x2719, 0x271a, 0x271b, 0x271c, 0x271d, 0x271e, 0x271f, 0x2720,
0x2721, 0x2722, 0x2723, 0x2724, 0x2725, 0x2726, 0x2727, 0x2729, 0x272a,
0x272b, 0x272c, 0x272d, 0x272e, 0x272f, 0x2730, 0x2731, 0x2732, 0x2733,
0x2734, 0x2735, 0x2736, 0x2737, 0x2738, 0x2739, 0x273a, 0x273b, 0x273c,
0x273d, 0x273e, 0x273f, 0x2740, 0x2741, 0x2742, 0x2743, 0x2744, 0x2745,
0x2746, 0x2747, 0x2748, 0x2749, 0x274a, 0x274b, 0x274d, 0x274f, 0x2750,
0x2751, 0x2752, 0x2756, 0x2758, 0x2759, 0x275a, 0x275b, 0x275c, 0x275d,
0x275e, 0x2761, 0x2762, 0x2763, 0x2764, 0x2765, 0x2766, 0x2767, 0x2768,
0x2769, 0x276a, 0x276b, 0x276c, 0x276d, 0x276e, 0x276f, 0x2770, 0x2771,
0x2772, 0x2773, 0x2774, 0x2775, 0x2776, 0x2777, 0x2778, 0x2779, 0x277a,
0x277b, 0x277c, 0x277d, 0x277e, 0x277f, 0x2780, 0x2781, 0x2782, 0x2783,
0x2784, 0x2785, 0x2786, 0x2787, 0x2788, 0x2789, 0x278a, 0x278b, 0x278c,
0x278d, 0x278e, 0x278f, 0x2790, 0x2791, 0x2792, 0x2793, 0x2794, 0x2798,
0x2799, 0x279a, 0x279b, 0x279c, 0x279d, 0x279e, 0x279f, 0x27a0, 0x27a1,
0x27a2, 0x27a3, 0x27a4, 0x27a5, 0x27a6, 0x27a7, 0x27a8, 0x27a9, 0x27aa,
0x27ab, 0x27ac, 0x27ad, 0x27ae, 0x27af, 0x27b1, 0x27b2, 0x27b3, 0x27b4,
0x27b5, 0x27b6, 0x27b7, 0x27b8, 0x27b9, 0x27ba, 0x27bb, 0x27bc, 0x27bd,
0x27be, 0x27c5, 0x27c6, 0x27e0, 0x27e6, 0x27e7, 0x27e8, 0x27e9, 0x27ea,
0x27eb, 0x27f0, 0x27f1, 0x27f2, 0x27f3, 0x27f4, 0x27f5, 0x27f6, 0x27f7,
0x27f8, 0x27f9, 0x27fa, 0x27fb, 0x27fc, 0x27fd, 0x27fe, 0x27ff, 0x2800,
0x2801, 0x2802, 0x2803, 0x2804, 0x2805, 0x2806, 0x2807, 0x2808, 0x2809,
0x280a, 0x280b, 0x280c, 0x280d, 0x280e, 0x280f, 0x2810, 0x2811, 0x2812,
0x2813, 0x2814, 0x2815, 0x2816, 0x2817, 0x2818, 0x2819, 0x281a, 0x281b,
0x281c, 0x281d, 0x281e, 0x281f, 0x2820, 0x2821, 0x2822, 0x2823, 0x2824,
0x2825, 0x2826, 0x2827, 0x2828, 0x2829, 0x282a, 0x282b, 0x282c, 0x282d,
0x282e, 0x282f, 0x2830, 0x2831, 0x2832, 0x2833, 0x2834, 0x2835, 0x2836,
0x2837, 0x2838, 0x2839, 0x283a, 0x283b, 0x283c, 0x283d, 0x283e, 0x283f,
0x2840, 0x2841, 0x2842, 0x2843, 0x2844, 0x2845, 0x2846, 0x2847, 0x2848,
0x2849, 0x284a, 0x284b, 0x284c, 0x284d, 0x284e, 0x284f, 0x2850, 0x2851,
0x2852, 0x2853, 0x2854, 0x2855, 0x2856, 0x2857, 0x2858, 0x2859, 0x285a,
0x285b, 0x285c, 0x285d, 0x285e, 0x285f, 0x2860, 0x2861, 0x2862, 0x2863,
0x2864, 0x2865, 0x2866, 0x2867, 0x2868, 0x2869, 0x286a, 0x286b, 0x286c,
0x286d, 0x286e, 0x286f, 0x2870, 0x2871, 0x2872, 0x2873, 0x2874, 0x2875,
0x2876, 0x2877, 0x2878, 0x2879, 0x287a, 0x287b, 0x287c, 0x287d, 0x287e,
0x287f, 0x2880, 0x2881, 0x2882, 0x2883, 0x2884, 0x2885, 0x2886, 0x2887,
0x2888, 0x2889, 0x288a, 0x288b, 0x288c, 0x288d, 0x288e, 0x288f, 0x2890,
0x2891, 0x2892, 0x2893, 0x2894, 0x2895, 0x2896, 0x2897, 0x2898, 0x2899,
0x289a, 0x289b, 0x289c, 0x289d, 0x289e, 0x289f, 0x28a0, 0x28a1, 0x28a2,
0x28a3, 0x28a4, 0x28a5, 0x28a6, 0x28a7, 0x28a8, 0x28a9, 0x28aa, 0x28ab,
0x28ac, 0x28ad, 0x28ae, 0x28af, 0x28b0, 0x28b1, 0x28b2, 0x28b3, 0x28b4,
0x28b5, 0x28b6, 0x28b7, 0x28b8, 0x28b9, 0x28ba, 0x28bb, 0x28bc, 0x28bd,
0x28be, 0x28bf, 0x28c0, 0x28c1, 0x28c2, 0x28c3, 0x28c4, 0x28c5, 0x28c6,
0x28c7, 0x28c8, 0x28c9, 0x28ca, 0x28cb, 0x28cc, 0x28cd, 0x28ce, 0x28cf,
0x28d0, 0x28d1, 0x28d2, 0x28d3, 0x28d4, 0x28d5, 0x28d6, 0x28d7, 0x28d8,
0x28d9, 0x28da, 0x28db, 0x28dc, 0x28dd, 0x28de, 0x28df, 0x28e0, 0x28e1,
0x28e2, 0x28e3, 0x28e4, 0x28e5, 0x28e6, 0x28e7, 0x28e8, 0x28e9, 0x28ea,
0x28eb, 0x28ec, 0x28ed, 0x28ee, 0x28ef, 0x28f0, 0x28f1, 0x28f2, 0x28f3,
0x28f4, 0x28f5, 0x28f6, 0x28f7, 0x28f8, 0x28f9, 0x28fa, 0x28fb, 0x28fc,
0x28fd, 0x28fe, 0x28ff, 0x2906, 0x2907, 0x290a, 0x290b, 0x2940, 0x2941,
0x2983, 0x2984, 0x29ce, 0x29cf, 0x29d0, 0x29d1, 0x29d2, 0x29d3, 0x29d4,
0x29d5, 0x29eb, 0x29fa, 0x29fb, 0x2a00, 0x2a01, 0x2a02, 0x2a0c, 0x2a0d,
0x2a0e, 0x2a0f, 0x2a10, 0x2a11, 0x2a12, 0x2a13, 0x2a14, 0x2a15, 0x2a16,
0x2a17, 0x2a18, 0x2a19, 0x2a1a, 0x2a1b, 0x2a1c, 0x2a2f, 0x2a6a, 0x2a6b,
0x2a7d, 0x2a7e, 0x2a7f, 0x2a80, 0x2a81, 0x2a82, 0x2a83, 0x2a84, 0x2a85,
0x2a86, 0x2a87, 0x2a88, 0x2a89, 0x2a8a, 0x2a8b, 0x2a8c, 0x2a8d, 0x2a8e,
0x2a8f, 0x2a90, 0x2a91, 0x2a92, 0x2a93, 0x2a94, 0x2a95, 0x2a96, 0x2a97,
0x2a98, 0x2a99, 0x2a9a, 0x2a9b, 0x2a9c, 0x2a9d, 0x2a9e, 0x2a9f, 0x2aa0,
0x2aae, 0x2aaf, 0x2ab0, 0x2ab1, 0x2ab2, 0x2ab3, 0x2ab4, 0x2ab5, 0x2ab6,
0x2ab7, 0x2ab8, 0x2ab9, 0x2aba, 0x2af9, 0x2afa, 0x2b00, 0x2b01, 0x2b02,
0x2b03, 0x2b04, 0x2b05, 0x2b06, 0x2b07, 0x2b08, 0x2b09, 0x2b0a, 0x2b0b,
0x2b0c, 0x2b0d, 0x2b0e, 0x2b0f, 0x2b10, 0x2b11, 0x2b12, 0x2b13, 0x2b14,
0x2b15, 0x2b16, 0x2b17, 0x2b18, 0x2b19, 0x2b1a, 0x2b1f, 0x2b20, 0x2b21,
0x2b22, 0x2b23, 0x2b24, 0x2b53, 0x2b54, 0x2c60, 0x2c61, 0x2c62, 0x2c63,
0x2c64, 0x2c65, 0x2c66, 0x2c67, 0x2c68, 0x2c69, 0x2c6a, 0x2c6b, 0x2c6c,
0x2c6d, 0x2c6e, 0x2c6f, 0x2c70, 0x2c71, 0x2c72, 0x2c73, 0x2c74, 0x2c75,
0x2c76, 0x2c77, 0x2c79, 0x2c7a, 0x2c7b, 0x2c7c, 0x2c7d, 0x2c7e, 0x2c7f,
0x2d00, 0x2d01, 0x2d02, 0x2d03, 0x2d04, 0x2d05, 0x2d06, 0x2d07, 0x2d08,
0x2d09, 0x2d0a, 0x2d0b, 0x2d0c, 0x2d0d, 0x2d0e, 0x2d0f, 0x2d10, 0x2d11,
0x2d12, 0x2d13, 0x2d14, 0x2d15, 0x2d16, 0x2d17, 0x2d18, 0x2d19, 0x2d1a,
0x2d1b, 0x2d1c, 0x2d1d, 0x2d1e, 0x2d1f, 0x2d20, 0x2d21, 0x2d22, 0x2d23,
0x2d24, 0x2d25, 0x2d30, 0x2d31, 0x2d32, 0x2d33, 0x2d34, 0x2d35, 0x2d36,
0x2d37, 0x2d38, 0x2d39, 0x2d3a, 0x2d3b, 0x2d3c, 0x2d3d, 0x2d3e, 0x2d3f,
0x2d40, 0x2d41, 0x2d42, 0x2d43, 0x2d44, 0x2d45, 0x2d46, 0x2d47, 0x2d48,
0x2d49, 0x2d4a, 0x2d4b, 0x2d4c, 0x2d4d, 0x2d4e, 0x2d4f, 0x2d50, 0x2d51,
0x2d52, 0x2d53, 0x2d54, 0x2d55, 0x2d56, 0x2d57, 0x2d58, 0x2d59, 0x2d5a,
0x2d5b, 0x2d5c, 0x2d5d, 0x2d5e, 0x2d5f, 0x2d60, 0x2d61, 0x2d62, 0x2d63,
0x2d64, 0x2d65, 0x2d6f, 0x2e18, 0x2e1e, 0x2e22, 0x2e23, 0x2e24, 0x2e25,
0x2e2e, 0x4dc0, 0x4dc1, 0x4dc2, 0x4dc3, 0x4dc4, 0x4dc5, 0x4dc6, 0x4dc7,
0x4dc8, 0x4dc9, 0x4dca, 0x4dcb, 0x4dcc, 0x4dcd, 0x4dce, 0x4dcf, 0x4dd0,
0x4dd1, 0x4dd2, 0x4dd3, 0x4dd4, 0x4dd5, 0x4dd6, 0x4dd7, 0x4dd8, 0x4dd9,
0x4dda, 0x4ddb, 0x4ddc, 0x4ddd, 0x4dde, 0x4ddf, 0x4de0, 0x4de1, 0x4de2,
0x4de3, 0x4de4, 0x4de5, 0x4de6, 0x4de7, 0x4de8, 0x4de9, 0x4dea, 0x4deb,
0x4dec, 0x4ded, 0x4dee, 0x4def, 0x4df0, 0x4df1, 0x4df2, 0x4df3, 0x4df4,
0x4df5, 0x4df6, 0x4df7, 0x4df8, 0x4df9, 0x4dfa, 0x4dfb, 0x4dfc, 0x4dfd,
0x4dfe, 0x4dff, 0xa4d0, 0xa4d1, 0xa4d2, 0xa4d3, 0xa4d4, 0xa4d5, 0xa4d6,
0xa4d7, 0xa4d8, 0xa4d9, 0xa4da, 0xa4db, 0xa4dc, 0xa4dd, 0xa4de, 0xa4df,
0xa4e0, 0xa4e1, 0xa4e2, 0xa4e3, 0xa4e4, 0xa4e5, 0xa4e6, 0xa4e7, 0xa4e8,
0xa4e9, 0xa4ea, 0xa4eb, 0xa4ec, 0xa4ed, 0xa4ee, 0xa4ef, 0xa4f0, 0xa4f1,
0xa4f2, 0xa4f3, 0xa4f4, 0xa4f5, 0xa4f6, 0xa4f7, 0xa4f8, 0xa4f9, 0xa4fa,
0xa4fb, 0xa4fc, 0xa4fd, 0xa4fe, 0xa4ff, 0xa644, 0xa645, 0xa646, 0xa647,
0xa64c, 0xa64d, 0xa650, 0xa651, 0xa654, 0xa655, 0xa656, 0xa657, 0xa662,
0xa663, 0xa664, 0xa665, 0xa666, 0xa667, 0xa668, 0xa669, 0xa66a, 0xa66b,
0xa66c, 0xa66d, 0xa66e, 0xa68a, 0xa68b, 0xa68c, 0xa68d, 0xa694, 0xa695,
0xa708, 0xa709, 0xa70a, 0xa70b, 0xa70c, 0xa70d, 0xa70e, 0xa70f, 0xa710,
0xa711, 0xa712, 0xa713, 0xa714, 0xa715, 0xa716, 0xa71b, 0xa71c, 0xa71d,
0xa71e, 0xa71f, 0xa722, 0xa723, 0xa724, 0xa725, 0xa726, 0xa727, 0xa728,
0xa729, 0xa72a, 0xa72b, 0xa730, 0xa731, 0xa732, 0xa733, 0xa734, 0xa735,
0xa736, 0xa737, 0xa738, 0xa739, 0xa73a, 0xa73b, 0xa73c, 0xa73d, 0xa73e,
0xa73f, 0xa740, 0xa741, 0xa746, 0xa747, 0xa748, 0xa749, 0xa74a, 0xa74b,
0xa74e, 0xa74f, 0xa750, 0xa751, 0xa752, 0xa753, 0xa756, 0xa757, 0xa764,
0xa765, 0xa766, 0xa767, 0xa780, 0xa781, 0xa782, 0xa783, 0xa789, 0xa78a,
0xa78b, 0xa78c, 0xa78d, 0xa78e, 0xa790, 0xa791, 0xa7a0, 0xa7a1, 0xa7a2,
0xa7a3, 0xa7a4, 0xa7a5, 0xa7a6, 0xa7a7, 0xa7a8, 0xa7a9, 0xa7aa, 0xa7fa,
0xa7fb, 0xa7fc, 0xa7fd, 0xa7fe, 0xa7ff, 0xef00, 0xef01, 0xef02, 0xef03,
0xef04, 0xef05, 0xef06, 0xef07, 0xef08, 0xef09, 0xef0a, 0xef0b, 0xef0c,
0xef0d, 0xef0e, 0xef0f, 0xef10, 0xef11, 0xef12, 0xef13, 0xef14, 0xef15,
0xef16, 0xef17, 0xef18, 0xef19, 0xf000, 0xf001, 0xf002, 0xf003, 0xf400,
0xf401, 0xf402, 0xf403, 0xf404, 0xf405, 0xf406, 0xf407, 0xf408, 0xf409,
0xf40a, 0xf40b, 0xf40c, 0xf40d, 0xf40e, 0xf40f, 0xf410, 0xf411, 0xf412,
0xf413, 0xf414, 0xf415, 0xf416, 0xf417, 0xf418, 0xf419, 0xf41a, 0xf41b,
0xf41c, 0xf41d, 0xf41e, 0xf41f, 0xf420, 0xf421, 0xf422, 0xf423, 0xf424,
0xf425, 0xf426, 0xf428, 0xf429, 0xf42a, 0xf42b, 0xf42c, 0xf42d, 0xf42e,
0xf42f, 0xf430, 0xf431, 0xf432, 0xf433, 0xf434, 0xf435, 0xf436, 0xf437,
0xf438, 0xf439, 0xf43a, 0xf43b, 0xf43c, 0xf43d, 0xf43e, 0xf43f, 0xf440,
0xf441, 0xf6c5, 0xfb00, 0xfb01, 0xfb02, 0xfb03, 0xfb04, 0xfb05, 0xfb06,
0xfb13, 0xfb14, 0xfb15, 0xfb16, 0xfb17, 0xfb1d, 0xfb1e, 0xfb1f, 0xfb20,
0xfb21, 0xfb22, 0xfb23, 0xfb24, 0xfb25, 0xfb26, 0xfb27, 0xfb28, 0xfb29,
0xfb2a, 0xfb2b, 0xfb2c, 0xfb2d, 0xfb2e, 0xfb2f, 0xfb30, 0xfb31, 0xfb32,
0xfb33, 0xfb34, 0xfb35, 0xfb36, 0xfb38, 0xfb39, 0xfb3a, 0xfb3b, 0xfb3c,
0xfb3e, 0xfb40, 0xfb41, 0xfb43, 0xfb44, 0xfb46, 0xfb47, 0xfb48, 0xfb49,
0xfb4a, 0xfb4b, 0xfb4c, 0xfb4d, 0xfb4e, 0xfb4f, 0xfb52, 0xfb53, 0xfb54,
0xfb55, 0xfb56, 0xfb57, 0xfb58, 0xfb59, 0xfb5a, 0xfb5b, 0xfb5c, 0xfb5d,
0xfb5e, 0xfb5f, 0xfb60, 0xfb61, 0xfb62, 0xfb63, 0xfb64, 0xfb65, 0xfb66,
0xfb67, 0xfb68, 0xfb69, 0xfb6a, 0xfb6b, 0xfb6c, 0xfb6d, 0xfb6e, 0xfb6f,
0xfb70, 0xfb71, 0xfb72, 0xfb73, 0xfb74, 0xfb75, 0xfb76, 0xfb77, 0xfb78,
0xfb79, 0xfb7a, 0xfb7b, 0xfb7c, 0xfb7d, 0xfb7e, 0xfb7f, 0xfb80, 0xfb81,
0xfb82, 0xfb83, 0xfb84, 0xfb85, 0xfb86, 0xfb87, 0xfb88, 0xfb89, 0xfb8a,
0xfb8b, 0xfb8c, 0xfb8d, 0xfb8e, 0xfb8f, 0xfb90, 0xfb91, 0xfb92, 0xfb93,
0xfb94, 0xfb95, 0xfb96, 0xfb97, 0xfb98, 0xfb99, 0xfb9a, 0xfb9b, 0xfb9c,
0xfb9d, 0xfb9e, 0xfb9f, 0xfba0, 0xfba1, 0xfba2, 0xfba3, 0xfbaa, 0xfbab,
0xfbac, 0xfbad, 0xfbd3, 0xfbd4, 0xfbd5, 0xfbd6, 0xfbd9, 0xfbda, 0xfbe8,
0xfbe9, 0xfbfc, 0xfbfd, 0xfbfe, 0xfbff, 0xfe00, 0xfe01, 0xfe02, 0xfe03,
0xfe04, 0xfe05, 0xfe06, 0xfe07, 0xfe08, 0xfe09, 0xfe0a, 0xfe0b, 0xfe0c,
0xfe0d, 0xfe0e, 0xfe0f, 0xfe20, 0xfe21, 0xfe22, 0xfe23, 0xfe70, 0xfe71,
0xfe72, 0xfe73, 0xfe74, 0xfe76, 0xfe77, 0xfe78, 0xfe79, 0xfe7a, 0xfe7b,
0xfe7c, 0xfe7d, 0xfe7e, 0xfe7f, 0xfe80, 0xfe81, 0xfe82, 0xfe83, 0xfe84,
0xfe85, 0xfe86, 0xfe87, 0xfe88, 0xfe89, 0xfe8a, 0xfe8b, 0xfe8c, 0xfe8d,
0xfe8e, 0xfe8f, 0xfe90, 0xfe91, 0xfe92, 0xfe93, 0xfe94, 0xfe95, 0xfe96,
0xfe97, 0xfe98, 0xfe99, 0xfe9a, 0xfe9b, 0xfe9c, 0xfe9d, 0xfe9e, 0xfe9f,
0xfea0, 0xfea1, 0xfea2, 0xfea3, 0xfea4, 0xfea5, 0xfea6, 0xfea7, 0xfea8,
0xfea9, 0xfeaa, 0xfeab, 0xfeac, 0xfead, 0xfeae, 0xfeaf, 0xfeb0, 0xfeb1,
0xfeb2, 0xfeb3, 0xfeb4, 0xfeb5, 0xfeb6, 0xfeb7, 0xfeb8, 0xfeb9, 0xfeba,
0xfebb, 0xfebc, 0xfebd, 0xfebe, 0xfebf, 0xfec0, 0xfec1, 0xfec2, 0xfec3,
0xfec4, 0xfec5, 0xfec6, 0xfec7, 0xfec8, 0xfec9, 0xfeca, 0xfecb, 0xfecc,
0xfecd, 0xfece, 0xfecf, 0xfed0, 0xfed1, 0xfed2, 0xfed3, 0xfed4, 0xfed5,
0xfed6, 0xfed7, 0xfed8, 0xfed9, 0xfeda, 0xfedb, 0xfedc, 0xfedd, 0xfede,
0xfedf, 0xfee0, 0xfee1, 0xfee2, 0xfee3, 0xfee4, 0xfee5, 0xfee6, 0xfee7,
0xfee8, 0xfee9, 0xfeea, 0xfeeb, 0xfeec, 0xfeed, 0xfeee, 0xfeef, 0xfef0,
0xfef1, 0xfef2, 0xfef3, 0xfef4, 0xfef5, 0xfef6, 0xfef7, 0xfef8, 0xfef9,
0xfefa, 0xfefb, 0xfefc, 0xfeff, 0xfff9, 0xfffa, 0xfffb, 0xfffc, 0xfffd,
0x10300, 0x10301, 0x10302, 0x10303, 0x10304, 0x10305, 0x10306, 0x10307,
0x10308, 0x10309, 0x1030a, 0x1030b, 0x1030c, 0x1030d, 0x1030e, 0x1030f,
0x10310, 0x10311, 0x10312, 0x10313, 0x10314, 0x10315, 0x10316, 0x10317,
0x10318, 0x10319, 0x1031a, 0x1031b, 0x1031c, 0x1031d, 0x1031e, 0x10320,
0x10321, 0x10322, 0x10323, 0x1d300, 0x1d301, 0x1d302, 0x1d303, 0x1d304,
0x1d305, 0x1d306, 0x1d307, 0x1d308, 0x1d309, 0x1d30a, 0x1d30b, 0x1d30c,
0x1d30d, 0x1d30e, 0x1d30f, 0x1d310, 0x1d311, 0x1d312, 0x1d313, 0x1d314,
0x1d315, 0x1d316, 0x1d317, 0x1d318, 0x1d319, 0x1d31a, 0x1d31b, 0x1d31c,
0x1d31d, 0x1d31e, 0x1d31f, 0x1d320, 0x1d321, 0x1d322, 0x1d323, 0x1d324,
0x1d325, 0x1d326, 0x1d327, 0x1d328, 0x1d329, 0x1d32a, 0x1d32b, 0x1d32c,
0x1d32d, 0x1d32e, 0x1d32f, 0x1d330, 0x1d331, 0x1d332, 0x1d333, 0x1d334,
0x1d335, 0x1d336, 0x1d337, 0x1d338, 0x1d339, 0x1d33a, 0x1d33b, 0x1d33c,
0x1d33d, 0x1d33e, 0x1d33f, 0x1d340, 0x1d341, 0x1d342, 0x1d343, 0x1d344,
0x1d345, 0x1d346, 0x1d347, 0x1d348, 0x1d349, 0x1d34a, 0x1d34b, 0x1d34c,
0x1d34d, 0x1d34e, 0x1d34f, 0x1d350, 0x1d351, 0x1d352, 0x1d353, 0x1d354,
0x1d355, 0x1d356, 0x1d538, 0x1d539, 0x1d53b, 0x1d53c, 0x1d53d, 0x1d53e,
0x1d540, 0x1d541, 0x1d542, 0x1d543, 0x1d544, 0x1d546, 0x1d54a, 0x1d54b,
0x1d54c, 0x1d54d, 0x1d54e, 0x1d54f, 0x1d550, 0x1d552, 0x1d553, 0x1d554,
0x1d555, 0x1d556, 0x1d557, 0x1d558, 0x1d559, 0x1d55a, 0x1d55b, 0x1d55c,
0x1d55d, 0x1d55e, 0x1d55f, 0x1d560, 0x1d561, 0x1d562, 0x1d563, 0x1d564,
0x1d565, 0x1d566, 0x1d567, 0x1d568, 0x1d569, 0x1d56a, 0x1d56b, 0x1d5a0,
0x1d5a1, 0x1d5a2, 0x1d5a3, 0x1d5a4, 0x1d5a5, 0x1d5a6, 0x1d5a7, 0x1d5a8,
0x1d5a9, 0x1d5aa, 0x1d5ab, 0x1d5ac, 0x1d5ad, 0x1d5ae, 0x1d5af, 0x1d5b0,
0x1d5b1, 0x1d5b2, 0x1d5b3, 0x1d5b4, 0x1d5b5, 0x1d5b6, 0x1d5b7, 0x1d5b8,
0x1d5b9, 0x1d5ba, 0x1d5bb, 0x1d5bc, 0x1d5bd, 0x1d5be, 0x1d5bf, 0x1d5c0,
0x1d5c1, 0x1d5c2, 0x1d5c3, 0x1d5c4, 0x1d5c5, 0x1d5c6, 0x1d5c7, 0x1d5c8,
0x1d5c9, 0x1d5ca, 0x1d5cb, 0x1d5cc, 0x1d5cd, 0x1d5ce, 0x1d5cf, 0x1d5d0,
0x1d5d1, 0x1d5d2, 0x1d5d3, 0x1d7d8, 0x1d7d9, 0x1d7da, 0x1d7db, 0x1d7dc,
0x1d7dd, 0x1d7de, 0x1d7df, 0x1d7e0, 0x1d7e1, 0x1d7e2, 0x1d7e3, 0x1d7e4,
0x1d7e5, 0x1d7e6, 0x1d7e7, 0x1d7e8, 0x1d7e9, 0x1d7ea, 0x1d7eb, 0x1f030,
0x1f031, 0x1f032, 0x1f033, 0x1f034, 0x1f035, 0x1f036, 0x1f037, 0x1f038,
0x1f039, 0x1f03a, 0x1f03b, 0x1f03c, 0x1f03d, 0x1f03e, 0x1f03f, 0x1f040,
0x1f041, 0x1f042, 0x1f043, 0x1f044, 0x1f045, 0x1f046, 0x1f047, 0x1f048,
0x1f049, 0x1f04a, 0x1f04b, 0x1f04c, 0x1f04d, 0x1f04e, 0x1f04f, 0x1f050,
0x1f051, 0x1f052, 0x1f053, 0x1f054, 0x1f055, 0x1f056, 0x1f057, 0x1f058,
0x1f059, 0x1f05a, 0x1f05b, 0x1f05c, 0x1f05d, 0x1f05e, 0x1f05f, 0x1f060,
0x1f061, 0x1f062, 0x1f063, 0x1f064, 0x1f065, 0x1f066, 0x1f067, 0x1f068,
0x1f069, 0x1f06a, 0x1f06b, 0x1f06c, 0x1f06d, 0x1f06e, 0x1f06f, 0x1f070,
0x1f071, 0x1f072, 0x1f073, 0x1f074, 0x1f075, 0x1f076, 0x1f077, 0x1f078,
0x1f079, 0x1f07a, 0x1f07b, 0x1f07c, 0x1f07d, 0x1f07e, 0x1f07f, 0x1f080,
0x1f081, 0x1f082, 0x1f083, 0x1f084, 0x1f085, 0x1f086, 0x1f087, 0x1f088,
0x1f089, 0x1f08a, 0x1f08b, 0x1f08c, 0x1f08d, 0x1f08e, 0x1f08f, 0x1f090,
0x1f091, 0x1f092, 0x1f093, 0x1f0a0, 0x1f0a1, 0x1f0a2, 0x1f0a3, 0x1f0a4,
0x1f0a5, 0x1f0a6, 0x1f0a7, 0x1f0a8, 0x1f0a9, 0x1f0aa, 0x1f0ab, 0x1f0ac,
0x1f0ad, 0x1f0ae, 0x1f0b1, 0x1f0b2, 0x1f0b3, 0x1f0b4, 0x1f0b5, 0x1f0b6,
0x1f0b7, 0x1f0b8, 0x1f0b9, 0x1f0ba, 0x1f0bb, 0x1f0bc, 0x1f0bd, 0x1f0be,
0x1f0c1, 0x1f0c2, 0x1f0c3, 0x1f0c4, 0x1f0c5, 0x1f0c6, 0x1f0c7, 0x1f0c8,
0x1f0c9, 0x1f0ca, 0x1f0cb, 0x1f0cc, 0x1f0cd, 0x1f0ce, 0x1f0cf, 0x1f0d1,
0x1f0d2, 0x1f0d3, 0x1f0d4, 0x1f0d5, 0x1f0d6, 0x1f0d7, 0x1f0d8, 0x1f0d9,
0x1f0da, 0x1f0db, 0x1f0dc, 0x1f0dd, 0x1f0de, 0x1f0df, 0x1f42d, 0x1f42e,
0x1f431, 0x1f435, 0x1f600, 0x1f601, 0x1f602, 0x1f603, 0x1f604, 0x1f605,
0x1f606, 0x1f607, 0x1f608, 0x1f609, 0x1f60a, 0x1f60b, 0x1f60c, 0x1f60d,
0x1f60e, 0x1f60f, 0x1f610, 0x1f611, 0x1f612, 0x1f613, 0x1f614, 0x1f615,
0x1f616, 0x1f617, 0x1f618, 0x1f619, 0x1f61a, 0x1f61b, 0x1f61c, 0x1f61d,
0x1f61e, 0x1f61f, 0x1f620, 0x1f621, 0x1f622, 0x1f623, 0x1f625, 0x1f626,
0x1f627, 0x1f628, 0x1f629, 0x1f62a, 0x1f62b, 0x1f62d, 0x1f62e, 0x1f62f,
0x1f630, 0x1f631, 0x1f632, 0x1f633, 0x1f634, 0x1f635, 0x1f636, 0x1f637,
0x1f638, 0x1f639, 0x1f63a, 0x1f63b, 0x1f63c, 0x1f63d, 0x1f63e, 0x1f63f,
0x1f640
))
# Cache of open fonts
FONT_CACHE = {}
def is_base(text):
'''
Checks whether text should use CJK fonts.
'''
return min([ord(char) in BASE_CHARS for char in text])
def get_font(size, bold=False, base_font=True):
'''
Returns PIL font object matching parameters.
'''
cache_key = '%d-%s-%s' % (size, bold, base_font)
if cache_key not in FONT_CACHE:
if base_font:
if bold:
name = 'DejaVuSans-Bold.ttf'
else:
name = 'DejaVuSans.ttf'
else:
name = 'DroidSansFallback.ttf'
FONT_CACHE[cache_key] = ImageFont.truetype(
os.path.join(appsettings.TTF_PATH, name),
size
)
return FONT_CACHE[cache_key]
|
miyataken999/weblate
|
weblate/trans/fonts.py
|
Python
|
gpl-3.0
| 49,812
|
#!/bin/env python
#-*-coding:utf-8-*-
import MySQLdb
import string
import time
import datetime
import os
import re
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import ConfigParser
import smtplib
from email.mime.text import MIMEText
from email.message import Message
from email.header import Header
from pymongo import MongoClient
import dba_crypto as crypt
basePath = "/usr/local/camelbell"
fCnf = "%s/etc/config.ini" % (basePath)
def get_item(data_dict,item,default='-1'):
if data_dict.has_key(item):
return data_dict[item]
else:
return default
def get_config(group,config_name,default='-1'):
config = ConfigParser.ConfigParser()
config.readfp(open(fCnf,'rw'))
if config.has_option(group,config_name):
config_value=config.get(group,config_name).strip(' ').strip('\'').strip('\"')
else:
config_value=default
return config_value
def get_option(key):
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
cursor = conn.cursor()
sql="select value from options where name='%s'" % (key)
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=(cursor.fetchone())[0]
cursor.close()
conn.close()
return result
def filters(data):
return data.strip(' ').strip('\n').strip('\br')
server_key = 'monitor_server'
host = get_config(server_key,'host')
port = get_config(server_key,'port')
user = get_config(server_key,'user')
passwd = get_config(server_key,'passwd')
dbname = get_config(server_key,'dbname')
server_mongodb_key = 'monitor_server_mongodb'
mongodb_host = get_config(server_mongodb_key,'host')
mongodb_port = get_config(server_mongodb_key,'port')
mongodb_user = get_config(server_mongodb_key,'user')
mongodb_passwd = get_config(server_mongodb_key,'passwd')
mongodb_dbname = get_config(server_mongodb_key,'dbname')
mongodb_replicaSet = get_config(server_mongodb_key,'replicaSet')
'''
# collection 函数文档
http://api.mongodb.com/python/current/api/pymongo/collection.html?_ga=1.219309790.610772200.1468379950#pymongo.collection.Collection.find
'''
def mongodb_save(tb_name, params):
inpParams = params
inpParams.pop("id", None)
connect_mongodb = None
try:
connect_mongodb = MongoClient(host=mongodb_host,port=int(mongodb_port),replicaSet=mongodb_replicaSet)
db = connect_mongodb.get_database(mongodb_dbname)
db.authenticate(mongodb_user, mongodb_passwd, mechanism='SCRAM-SHA-1')
tb = db[tb_name]
tb.insert_one(inpParams)
#print tb.find_one()
#print tb.find_one(sort=[("create_time",-1)])
print tb.count()
'''
tlines = tb.find(limit=10).sort([("create_time",-1)])
for tline in tlines:
print tline
'''
except Exception, e:
logger_msg="insert alarm to mongodb %s:%s/%s,%s/%s : %s" %(mongodb_host,mongodb_port,mongodb_dbname,mongodb_user,mongodb_passwd,e)
print logger_msg
finally:
if connect_mongodb != None:
connect_mongodb.close()
def other_save(tb_name, params):
mongodb_save(tb_name, params)
def mysql_exec_many(sqls,params=None):
try:
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
curs.execute("set session sql_mode=''")
for i in range(0,len(sqls)):
sql = sqls[i]
if params != None and params[i] <> '':
curs.execute(sql,params[i])
else:
curs.execute(sql)
curs.close()
conn.commit()
conn.close()
except Exception,e:
print "mysql execute: %s,%s" % (sqls, e)
conn.rollback()
def mysql_exec(sql,params=''):
try:
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
'''
curs.execute("set session sql_mode=''")
curs.execute("SELECT CONNECTION_ID() AS cid")
cnntID = (curs.fetchall())[0][0]
#print sql
'''
if params <> '':
if len(params) > 0 and (isinstance(params[0], tuple) or isinstance(params[0], list)):
curs.executemany(sql, params)
else:
curs.execute(sql,params)
else:
curs.execute(sql)
curs.close()
conn.commit()
conn.close()
except Exception, e:
print "Error: %s" % (sql)
print "Exception: %s" % (e)
conn.rollback()
def mysql_query(sql):
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
cursor = conn.cursor()
#cursor.execute("set session sql_mode=''")
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=cursor.fetchall()
cursor.close()
conn.close()
return result
send_mail_max_count = get_option('send_mail_max_count')
send_mail_sleep_time = get_option('send_mail_sleep_time')
mail_to_list_common = get_option('send_mail_to_list')
def add_alarm(server_id,tags,db_host,db_port,create_time,db_type,alarm_item,alarm_value,level,message,send_mail=1,send_mail_to_list=mail_to_list_common, send_sms=0, send_sms_to_list=''):
inpParams = locals()
try:
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
sql="insert into alarm(server_id,tags,host,port,create_time,db_type,alarm_item,alarm_value,level,message,send_mail,send_mail_to_list,send_sms,send_sms_to_list) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param=(server_id,tags,db_host,db_port,create_time,db_type,alarm_item,alarm_value,level,message,send_mail,send_mail_to_list,send_sms,send_sms_to_list)
curs.execute(sql,param)
if send_mail == 1:
temp_sql = "insert into alarm_temp(server_id,ip,db_type,alarm_item,alarm_type) values(%s,%s,%s,%s,%s);"
temp_param = (server_id,db_host,db_type,alarm_item,'mail')
curs.execute(temp_sql,temp_param)
if send_sms == 1:
temp_sql = "insert into alarm_temp(server_id,ip,db_type,alarm_item,alarm_type) values(%s,%s,%s,%s,%s);"
temp_param = (server_id,db_host,db_type,alarm_item,'sms')
curs.execute(temp_sql,temp_param)
if (send_mail ==0 and send_sms==0):
temp_sql = "insert into alarm_temp(server_id,ip,db_type,alarm_item,alarm_type) values(%s,%s,%s,%s,%s);"
temp_param = (server_id,db_host,db_type,alarm_item,'none')
curs.execute(temp_sql,temp_param)
conn.commit()
curs.close()
conn.close()
except Exception,e:
print "Add alarm: " + str(e)
# insert mongodb
mongodb_save("alarm_logs",inpParams)
def check_if_ok(server_id,tags,db_host,db_port,create_time,db_type,alarm_item,alarm_value,message,send_mail,send_mail_to_list,send_sms,send_sms_to_list):
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
if db_type=='os':
alarm_count=curs.execute("select id from alarm_temp where ip='%s' and alarm_item='%s' ;" %(db_host,alarm_item))
mysql_exec("delete from alarm_temp where ip='%s' and alarm_item='%s' ;" %(db_host,alarm_item),'')
else:
alarm_count=curs.execute("select id from alarm_temp where server_id=%s and db_type='%s' and alarm_item='%s' ;" %(server_id,db_type,alarm_item))
mysql_exec("delete from alarm_temp where server_id=%s and db_type='%s' and alarm_item='%s' ;" %(server_id,db_type,alarm_item),'')
if int(alarm_count) > 0 :
sql="insert into alarm(server_id,tags,host,port,create_time,db_type,alarm_item,alarm_value,level,message,send_mail,send_mail_to_list,send_sms,send_sms_to_list) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param=(server_id,tags,db_host,db_port,create_time,db_type,alarm_item,alarm_value,'ok',message,send_mail,send_mail_to_list,send_sms,send_sms_to_list)
mysql_exec(sql,param)
curs.close()
conn.close()
def update_send_mail_status(server,db_type,alarm_item,send_mail,send_mail_max_count):
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
if db_type == "os":
alarm_count=curs.execute("select id from alarm_temp where ip='%s' and db_type='%s' and alarm_item='%s' and alarm_type='mail' ;" %(server,db_type,alarm_item))
else:
alarm_count=curs.execute("select id from alarm_temp where server_id=%s and db_type='%s' and alarm_item='%s' and alarm_type='mail' ;" %(server,db_type,alarm_item))
if int(alarm_count) >= int(send_mail_max_count) :
send_mail = 0
else:
send_mail = send_mail
curs.close()
conn.close()
return send_mail
def update_send_sms_status(server,db_type,alarm_item,send_sms,send_sms_max_count):
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
if db_type == "os":
alarm_count=curs.execute("select id from alarm_temp where ip='%s' and db_type='%s' and alarm_item='%s' and alarm_type='sms' ;" %(server,db_type,alarm_item))
else:
alarm_count=curs.execute("select id from alarm_temp where server_id=%s and db_type='%s' and alarm_item='%s' and alarm_type='sms' ;" %(server,db_type,alarm_item))
if int(alarm_count) >= int(send_sms_max_count) :
send_sms = 0
else:
send_sms = send_sms
curs.close()
conn.close()
return send_sms
def check_db_status(server_id,db_host,db_port,tags,db_type):
try:
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
sql="select id from db_status where host='%s' and port=%s " % (db_host, db_port)
count=curs.execute(sql)
if count ==0:
if db_type=='mysql':
sort=1
elif db_type=='oracle':
sort=2
elif db_type=='mongodb':
sort=3
elif db_type=='redis':
sort=4
else:
sort=0
sql="replace into db_status(server_id,host,port,tags,db_type,db_type_sort) values(%s,%s,%s,%s,%s,%s);"
param=(server_id,db_host,str(db_port),tags,db_type,str(sort))
curs.execute(sql,param)
conn.commit()
except Exception,e:
print "Check db status table: " + str(e)
finally:
curs.close()
conn.close()
def update_db_status_init(server_id,role,version,db_host,db_port,tags):
try:
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
#sql="replace into db_status(server_id,host,port,tags,version,role,connect) values(%s,%s,%s,%s,%s,%s,%s)"
#param=(server_id,host,port,tags,version,role,0)
#curs.execute(sql,param)
curs.execute("update db_status set role='%s',version='%s',tags='%s' where host='%s' and port='%s';" %(role,version,tags,db_host,db_port))
conn.commit()
except Exception, e:
print "update db status init: " + str(e)
finally:
curs.close()
conn.close()
def update_db_status_more(vals, db_host,db_port=''):
try:
setCols = []
for val in vals:
field = val[0]
value = val[1]
field_tips=field+'_tips'
if not cmp("-1",value):
value_tips='no data'
else:
(alarm_time,alarm_item,alarm_value,alarm_level) = val[2:]
value_tips="""
item: %s\n<br/>
value: %s\n<br/>
level: %s\n<br/>
time: %s\n<br/>
""" %(alarm_item,alarm_value,alarm_level,alarm_time)
setCols.append("%s='%s'" % (field, value))
setCols.append("%s='%s'" % (field_tips, value_tips))
if cmp('', db_port) and int(db_port) >0:
updSql = "update db_status set %s where host='%s' and port='%s';" %(",".join(setCols),db_host,db_port)
else:
updSql = "update db_status set %s where host='%s';" %(",".join(setCols),db_host)
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
curs.execute(updSql)
conn.commit()
except Exception, e:
print "update db status more: " + str(e)
print db_host,db_port, vals
finally:
curs.close()
conn.close()
def update_db_status(field,value,db_host,db_port,alarm_time,alarm_item,alarm_value,alarm_level):
try:
field_tips=field+'_tips'
if value==-1:
value_tips='no data'
else:
value_tips="""
item: %s\n<br/>
value: %s\n<br/>
level: %s\n<br/>
time: %s\n<br/>
""" %(alarm_item,alarm_value,alarm_level,alarm_time)
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
if cmp('', db_port) and int(db_port) >0:
curs.execute("update db_status set %s='%s',%s='%s' where host='%s' and port='%s';" %(field,value,field_tips,value_tips,db_host,db_port))
else:
curs.execute("update db_status set %s='%s',%s='%s' where host='%s';" %(field,value,field_tips,value_tips,db_host))
conn.commit()
except Exception, e:
print "update db status: " + str(e)
print field,value,db_host,db_port,alarm_time,alarm_item,alarm_value,alarm_level
finally:
curs.close()
conn.close()
def update_check_time():
try:
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
curs = conn.cursor()
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
curs.execute("update lepus_status set lepus_value='%s' where lepus_variables='lepus_checktime';" %(localtime))
conn.commit()
except Exception, e:
print "update check time: " + str(e)
finally:
curs.close()
conn.close()
def flush_hosts():
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
cursor = conn.cursor()
cursor.execute('flush hosts;');
def get_mysql_status(cursor):
data=cursor.execute('show global status;');
data_list=cursor.fetchall()
data_dict={}
for item in data_list:
data_dict[item[0]] = item[1]
return data_dict
def get_mysql_variables(cursor):
data=cursor.execute('show global variables;');
data_list=cursor.fetchall()
data_dict={}
for item in data_list:
data_dict[item[0]] = item[1]
return data_dict
def get_mysql_version(cursor):
cursor.execute('select version();');
return cursor.fetchone()[0]
##################################### mail ##############################################
mail_host = get_option('smtp_host')
mail_port = int(get_option('smtp_port'))
mail_user = get_option('smtp_user')
mail_pass = get_option('smtp_pass')
mail_send_from = get_option('mailfrom')
def send_mail(to_list,sub,content):
'''
to_list:发给谁
sub:主题
content:内容
send_mail("aaa@126.com","sub","content")
'''
#me=mail_user+"<</span>"+mail_user+"@"+mail_postfix+">"
me=mail_send_from
msg = MIMEText(content, _subtype='html', _charset='utf8')
msg['Subject'] = "Camelbell %s" % (Header(sub,'utf8'))
msg['From'] = Header(me,'utf8')
msg['To'] = ";".join(to_list)
try:
smtp = smtplib.SMTP()
smtp.connect(mail_host,mail_port)
smtp.login(mail_user,mail_pass)
smtp.sendmail(me,to_list, msg.as_string())
smtp.close()
return True
except Exception, e:
print str(e)
return False
##################################### db_server_os ##############################################
def init_server_os():
try:
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,port=int(port),connect_timeout=5,charset='utf8')
conn.select_db(dbname)
cursor = conn.cursor()
#print "disable os monitor"
cursor.execute("update db_servers_os set monitor = 0")
conn.commit()
# insert/update
dbs = ["mysql", "oracle", "mongodb", "redis"]
for db in dbs:
#print "insert/update %s" % (db)
insSql = "insert into db_servers_os (host, tags, monitor) " \
+ " SELECT HOST,TAGS,max(monitor_os) as dm from db_servers_%s d group by host " % (db) \
+ " ON DUPLICATE KEY UPDATE monitor=IF(monitor=0, VALUES(monitor), monitor)"
cursor.execute(insSql)
conn.commit()
except Exception,e:
print "Fail init_server_os: %s" %(e)
return False
finally:
cursor.close()
conn.close()
return True
##################################### salt ##############################################
def doSaltCmd(saltCmd):
#print saltCmd
retry = 3
for i in range(0, retry):
cmdRes = os.popen(saltCmd).read()
if re.search('^No minions matched the target', cmdRes):
print "No minions matched the target try %s" % (i+1)
continue
else:
return cmdRes.rstrip("\n")
return None
def exeSaltCmd(ip, cmd):
sCmd = '''salt --async '%s' cmd.run "%s" ''' % (ip, cmd.replace('"','\\"').replace('$','\\$'))
# Executed command with job ID: 20160331112308102201
sRes = doSaltCmd(sCmd)
if sRes != None:
sVals = sRes.split()
jobID = sVals[len(sVals)-1]
time.sleep(3)
retry = 3
for i in range(0, retry):
sJobCmd = '''salt-run --out='json' jobs.lookup_jid %s ''' % (jobID)
jobRes = doSaltCmd(sJobCmd)
#print cmdRes
if re.search('^No minions matched the target', jobRes):
continue
else:
saltRes = json.loads(jobRes).get(ip)
if re.search('^Minion did not return', str(saltRes)):
continue
else:
return saltRes
return None
def exeSaltAsyncCmd(ip, cmd):
sCmd = '''salt --async '%s' cmd.run "%s" ''' % (ip, cmd.replace('"','\\"').replace('$','\\$'))
# Executed command with job ID: 20160331112308102201
sRes = doSaltCmd(sCmd)
if sRes != None and cmp('', sRes) and re.search("^Executed", sRes):
sVals = sRes.split()
jobID = sVals[len(sVals)-1]
return jobID
#print sCmd, sRes
return None
def getSaltJobByID(ip, jobID):
retry = 3
for i in range(0, retry):
sJobCmd = '''salt-run --out='json' jobs.lookup_jid %s ''' % (jobID)
jobRes = doSaltCmd(sJobCmd)
#print cmdRes
if re.search('^No minions matched the target', jobRes):
print "No minions matched the target try %s" % (i+1)
continue
else:
saltRes = json.loads(jobRes).get(ip)
if re.search('^Minion did not return', str(saltRes)):
print "Minion did not return try %s" % (i+1)
continue
else:
return saltRes
return None
def checkSaltKey(ip):
if exeSaltCmd(ip, "hostname") != None:
return True
else:
return False
def encode(str):
return crypt.sub_encrypt(str)
def decode(str):
return crypt.sub_decrypt(str)
|
G8bao7/camelbell-server
|
include/functions.py
|
Python
|
gpl-3.0
| 19,363
|
#!/usr/bin/env python
# encoding: utf-8
"""
views.py
Created by Christophe VAN FRACKEM on 2014/05/25.
Copyright (c) 2014 Tiss'Page. All rights reserved.
"""
__author__ = 'Christophe VAN FRACKEM <contact@tisspage.fr>'
__version__= '0.0.1'
__copyright__ = '© 2014 Tiss\'Page'
from django.shortcuts import render_to_response
from django.views.generic import View, TemplateView, FormView, UpdateView
from django.http import HttpResponseRedirect, HttpResponse
from django.core.mail import send_mail, BadHeaderError
from django.core.mail import EmailMultiAlternatives
from django.contrib import messages
from website.forms import ContactForm
class ContactFormView(FormView):
form_class=ContactForm
success_url = '/'
def get_context_data(self, **kwargs):
context = super(ContactFormView, self).get_context_data(**kwargs)
context.update(form=ContactForm()
)
return context
def form_valid(self, form):
subject = u'Contact via le site tisspage.fr'
from_email = 'contact@tisspage.fr'
to = 'contact@tisspage.fr'
text_content = 'Un internaute vient de vous contacter via le formulaire de contact de votre site Internet.'
html_content = 'Un internaute vient de vous contacter via le formulaire de contact de votre site Internet.<br><br>'
html_content += u'<strong>Son email :</strong><a href="mailto:{email}"> {email}</a><br>'.format(email=form.cleaned_data.get('email'))
html_content += u'<strong>Son message :</strong> <br>{message}'.format(message=form.cleaned_data.get('message'))
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
messages.add_message(self.request, messages.INFO, 'Votre message a bien été envoyé. Je vous contacterai dans les plus brefs délais.')
return HttpResponseRedirect(self.get_success_url())
class PageView(ContactFormView, FormView):
template_name="index.html"
class MentionsView(ContactFormView, FormView):
template_name="mentions.html"
|
tisspage/resume-website
|
website/views.py
|
Python
|
gpl-3.0
| 2,019
|
#!/usr/bin/python
import glob, gzip, csv, sys, os, copy, re
csv.register_dialect('tab', delimiter='\t', quoting=csv.QUOTE_NONE)
def usage(msg=None):
if msg==None:
print 'Usage: plot.py [OPTIONS] <dir>'
print 'Options:'
print ' -H, --highlight +group1,-group2 Highlight calls shared within group1 but not present in group2'
print ' -i, --interactive Run interactively'
print ' -l, --min-length <num> Filter input regions shorter than this [0]'
print ' -n, --min-markers <num> Filter input regions with fewer marker than this [0]'
print ' -o, --outfile <file> Output file name [plot.png]'
print ' -q, --min-qual <num> Filter input regions with quality smaller than this [0]'
print ' -r, --region [^]<chr|chr:beg-end> Plot this chromosome/region only'
print ' -s, --samples <file> List of samples to show, rename or group: "name[\\tnew_name[\\tgroup]]"'
print ' -h, --help This usage text'
print 'Matplotlib options:'
print ' +adj, --adjust <str> Set plot adjust [bottom=0.18,left=0.07,right=0.98]'
print ' +dpi, --dpi <num> Set bitmap DPI [150]'
print ' +sxt, --show-xticks Show x-ticks (genomic coordinate)'
print ' +xlb, --xlabel <str> Set x-label'
print ' +xli, --xlimit <num> Extend x-range by this fraction [0.05]'
else:
print msg
sys.exit(1)
dir = None
regs = None
min_length = 0
min_markers = 0
min_qual = 0
interactive = False
sample_file = None
highlight = None
outfile = None
adjust = 'bottom=0.18,left=0.07,right=0.98'
dpi = 150
xlim = 0.05
show_xticks = False
xlabel = None
if len(sys.argv) < 2: usage()
args = sys.argv[1:]
while len(args):
if args[0]=='-r' or args[0]=='--region':
args = args[1:]
regs = args[0]
elif args[0]=='-i' or args[0]=='--interactive':
interactive = True
elif args[0]=='-l' or args[0]=='--min-length':
args = args[1:]
min_length = float(args[0])
elif args[0]=='-n' or args[0]=='--min-markers':
args = args[1:]
min_markers = float(args[0])
elif args[0]=='-o' or args[0]=='--outfile':
args = args[1:]
outfile = args[0]
elif args[0]=='-q' or args[0]=='--min-qual':
args = args[1:]
min_qual = float(args[0])
elif args[0]=='-H' or args[0]=='--highlight':
args = args[1:]
highlight = args[0]
elif args[0]=='-s' or args[0]=='--samples':
args = args[1:]
sample_file = args[0]
elif args[0]=='-?' or args[0]=='-h' or args[0]=='--help':
usage()
elif args[0]=='+adj' or args[0]=='--adjust':
args = args[1:]
adjust = args[0]
elif args[0]=='+dpi' or args[0]=='--dpi':
args = args[1:]
dpi = float(args[0])
elif args[0]=='+xlb' or args[0]=='--xlabel':
args = args[1:]
xlabel = args[0]
elif args[0]=='+sxt' or args[0]=='--show-xticks':
show_xticks = True
elif args[0]=='+xli' or args[0]=='--xlimit':
args = args[1:]
xlim = float(args[0])
else:
dir = args[0]
args = args[1:]
if interactive and outfile!=None: usage("Use -i, --interactive or -o, --outfile, but not both")
if not interactive and outfile==None: outfile = 'plot.png'
def wrap_hash(**args): return args
adjust = eval("wrap_hash("+adjust+")")
import matplotlib as mpl
for gui in ['TKAgg','GTKAgg','Qt4Agg','WXAgg','MacOSX']:
try:
mpl.use(gui,warn=False, force=True)
import matplotlib.pyplot as plt
import matplotlib.patches as patches
break
except:
continue
cols = [ '#337ab7', '#5cb85c', '#5bc0de', '#f0ad4e', '#d9534f', 'grey', 'black' ]
mpl.rcParams['axes.color_cycle'] = cols
globstr = os.path.join(dir, '*.txt.gz')
fnames = glob.glob(globstr)
if len(fnames)==0: usage("No data files found in \""+dir+"\"")
def parse_regions(str):
if str==None: return None
regs = { 'inc':[], 'exc':[] }
list = str.split(',')
key = 'inc'
if list[0][0]=='^':
key = 'exc'
list[0] = list[0][1:]
for reg in list:
x = reg.split(':')
chr = x[0]
beg = 0
end = (1<<32)-1
if len(x)>1:
(beg,end) = x[1].split('-')
beg = float(beg)
end = float(end)
regs[key].append({'chr':chr,'beg':beg,'end':end})
return regs
def region_overlap(regs,chr,beg,end):
if regs==None: return (beg,end)
if len(regs['exc'])>0:
for reg in regs['exc']:
if chr==reg['chr']: return None
return (beg,end)
if len(regs['inc'])==0: return (beg,end)
for reg in regs['inc']:
if chr!=reg['chr']: continue
if beg>reg['end']: continue
if end<reg['beg']: continue
if beg<reg['beg']: beg = reg['beg']
if end>reg['end']: end = reg['end']
return (beg,end)
return None
def parse_outfile(fname):
files = re.split(r',',fname)
bname = re.search(r'^(.+)\.[^.]+$', files[0]).group(1)
for i in range(len(files)-1):
files[i+1] = bname+"."+files[i+1]
return files
def next_region(rgs):
min = None
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if min==None:
min = [0,0]
min[0] = reg[0]
min[1] = reg[1]
if min[0] > reg[0]: min[0] = reg[0]
if min==None: return None
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if min[1] > reg[1]: min[1] = reg[1]
if min[1] > reg[0] - 1 and min[0] != reg[0]: min[1] = reg[0] - 1
return min;
def merge_regions(rg):
rgs = copy.deepcopy(rg)
out = {}
while True:
min = next_region(rgs)
if min==None: break
beg = min[0]
end = min[1]
smpls = []
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if reg[0] > end: continue
if reg[1] > end:
rgs[smpl][0][0] = end + 1
else:
rgs[smpl] = rgs[smpl][1:]
if smpl not in out: out[smpl] = []
smpls.append(smpl)
if len(smpls)>1:
for smpl in smpls: out[smpl].append([beg,end])
return out
def prune_regions(groups,regions):
regs = {'+':{},'-':{}}
for smpl in regions:
grp = groups[smpl]
for reg in regions[smpl]:
key = str(reg[0])+"-"+str(reg[1]) # reg=[beg,end] -> "beg-end"
if key not in regs[grp]: regs[grp][key] = 0
regs[grp][key] += 1
nexp = 0
for smpl in groups:
if groups[smpl]=='+': nexp += 1
for smpl in regions:
rm = []
for reg in regions[smpl]:
key = str(reg[0])+"-"+str(reg[1])
if key in regs['-']: rm.append(reg)
elif key not in regs['+'] or regs['+'][key]!=nexp: rm.append(reg)
for reg in rm:
if reg in regions[smpl]:
regions[smpl].remove(reg)
return regions
def parse_samples(fname,highlight):
if fname==None: return (None,None,{})
samples = {}
groups = {}
grp2sgn = {}
smpl2y = {}
# parse "+name" to create a map "name":"+"
if highlight!=None:
for grp in re.split(r',', highlight):
if grp[0]!='+' and grp[0]!='-': usage("Expected + or - before the group name: "+grp)
grp2sgn[grp[1:]] = grp[0]
# read samples, renaming them
with open(fname) as f:
for line in f:
row = re.split(r'\s+', line.rstrip('\n'))
smpl = row[0]
if len(row)==1: samples[smpl] = smpl
else:
samples[smpl] = row[1]
if len(row)==3:
grp = row[2]
if grp in grp2sgn:
grp = grp2sgn[grp]
else:
grp = '+'
groups[smpl] = grp
y = len(smpl2y)
smpl2y[smpl] = y
if highlight==None: groups = None
return (samples,groups,smpl2y)
regs = parse_regions(regs)
(samples,groups,smpl2y) = parse_samples(sample_file,highlight)
dat_gt = {}
dat_rg = {}
chrs = []
for fname in fnames:
f = gzip.open(fname, 'rb')
reader = csv.reader(f, 'tab')
for row in reader:
if row[0]=='GT':
chr = row[1]
pos = int(row[2])
reg = region_overlap(regs,chr,pos,pos)
if reg==None: continue
for i in range(3,len(row),2):
smpl = row[i]
if samples!=None and smpl not in samples: continue
gt = row[i+1]
x = gt.split('/')
if x[0]=='.': continue # missing genotype ./.
dsg = 2
if x[0]!=x[1]: dsg = 1
elif x[0]=='0': continue # skip HomRef 0/0 genotypes
if chr not in dat_gt:
dat_gt[chr] = {}
chrs.append(chr)
if smpl not in dat_gt[chr]:
dat_gt[chr][smpl] = []
if smpl not in smpl2y:
y = len(smpl2y)
smpl2y[smpl] = y
dat_gt[chr][smpl].append([pos,dsg])
elif row[0]=='RG':
smpl = row[1]
if samples!=None and smpl not in samples: continue
chr = row[2]
beg = int(row[3])
end = int(row[4])
length= int(row[5])
nmark = int(row[6])
qual = float(row[7])
if length < min_length: continue
if nmark < min_markers : continue
if qual < min_qual : continue
reg = region_overlap(regs,chr,beg,end)
if chr not in dat_rg: dat_rg[chr] = {}
if smpl not in dat_rg[chr]: dat_rg[chr][smpl] = []
if reg!=None:
if beg<reg[0]: beg = reg[0]
if end>reg[1]: end = reg[1]
dat_rg[chr][smpl].append([beg,end])
if samples==None:
samples = {}
for smpl in smpl2y: samples[smpl] = smpl
# list the samples in the same order as encountered in the file, from top to bottom
for smpl in smpl2y:
smpl2y[smpl] = len(smpl2y) - smpl2y[smpl] - 1
off_list = []
off_hash = {}
off = 0
off_sep = 0
dat_rg1 = {}
for chr in chrs:
if chr in dat_rg:
rg1 = merge_regions(dat_rg[chr])
if groups!=None:
rg1 = prune_regions(groups,rg1)
if len(rg1)!=0: dat_rg1[chr] = rg1
off_hash[chr] = off
max_pos = 0
for smpl in dat_gt[chr]:
if max_pos < dat_gt[chr][smpl][-1][0]: max_pos = dat_gt[chr][smpl][-1][0]
if off_sep==0: off_sep = max_pos*0.1
off += max_pos + off_sep
off_list.append(off)
height = len(smpl2y)
if len(smpl2y)>5: heigth = 5
wh = 20,height
def bignum(num):
s = str(num); out = ''; slen = len(s)
for i in range(slen):
out += s[i]
if i+1<slen and (slen-i-1)%3==0: out += ','
return out
def format_coord(x, y):
chr = None
off = 0
for i in range(len(off_list)):
chr = chrs[i]
if off_list[i] > x: break
off = off_list[i]
return 'chr%s:%s'%(chr,bignum(int(x - off)))
fig, ax1 = plt.subplots(1, 1, figsize=wh, num=dir)
ax1.yaxis.set_ticks_position('none')
ax1.format_coord = format_coord
xtick_lbl = []
xtick_pos = []
max_x = 0
for chr in dat_gt:
off = off_hash[chr]
icol = 0
max = 0
for smpl in dat_gt[chr]:
y = smpl2y[smpl]
if chr in dat_rg and smpl in dat_rg[chr]:
for rg in dat_rg[chr][smpl]:
rect = patches.Rectangle((rg[0]+off,3*y+0.5), rg[1]-rg[0]+1, 2, color='#dddddd')
ax1.add_patch(rect)
if chr in dat_rg1 and smpl in dat_rg1[chr]:
for rg in dat_rg1[chr][smpl]:
rect = patches.Rectangle((rg[0]+off,3*y+0.5), rg[1]-rg[0]+1, 2, color='#d9534f')
ax1.add_patch(rect)
ax1.plot([x[0]+off for x in dat_gt[chr][smpl]],[x[1]+3*y for x in dat_gt[chr][smpl]],'.',color=cols[icol])
if max_x < dat_gt[chr][smpl][-1][0]+off: max_x = dat_gt[chr][smpl][-1][0]+off
if max < dat_gt[chr][smpl][-1][0]: max = dat_gt[chr][smpl][-1][0]
icol += 1
if icol >= len(cols): 0
xtick_lbl.append(chr)
xtick_pos.append(off)
ytick_lbl = []
ytick_pos = []
for chr in dat_gt:
for smpl in dat_gt[chr]:
ytick_lbl.append(samples[smpl])
ytick_pos.append(3*smpl2y[smpl]+1)
break
if xlim!=0:
ax1.set_xlim(0,max_x+xlim*max_x)
lbl_pos = 3*(len(smpl2y)-1)
ax1.annotate(' HomAlt ',xy=(max_x,lbl_pos-1),xycoords='data',va='center')
ax1.annotate(' Het',xy=(max_x,lbl_pos-2),xycoords='data',va='center')
if not show_xticks:
ax1.set_xticks(xtick_pos)
ax1.set_xticklabels(xtick_lbl)
if xlabel!=None:
ax1.set_xlabel(xlabel)
ax1.set_yticks(ytick_pos)
ax1.set_yticklabels(ytick_lbl)
ax1.set_ylim(0,3*len(smpl2y)+0.5)
plt.subplots_adjust(**adjust)
if interactive:
plt.show()
else:
files = parse_outfile(outfile)
for file in (parse_outfile(outfile)):
plt.savefig(file,dpi=dpi)
plt.close()
|
wkretzsch/bcftools
|
misc/plot-roh.py
|
Python
|
gpl-3.0
| 13,378
|
# -*- coding: utf-8 -*-
#
# TGiT, Music Tagger for Professionals
# Copyright (C) 2013 Iconoclaste Musique Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
COUNTRIES = {
"AF": "Afghanistan",
"AX": "Aland Islan",
"AL": "Albania",
"DZ": "Algeria",
"AS": "American Samoa",
"AD": "Andorra",
"AO": "Angola",
"AI": "Anguilla",
"AQ": "Antarctica",
"AG": "Antigua and Barbuda",
"AR": "Argentina",
"AM": "Armenia",
"AW": "Aruba",
"AU": "Australia",
"AT": "Austria",
"AZ": "Azerbaijan",
"BS": "Bahamas",
"BH": "Bahrain",
"BD": "Bangladesh",
"BB": "Barbados",
"BY": "Belarus",
"BE": "Belgium",
"BZ": "Belize",
"BJ": "Benin",
"BM": "Bermuda",
"BT": "Bhutan",
"BO": "Bolivia",
"BA": "Bosnia and Herzegovina",
"BW": "Botswana",
"BV": "Bouvet Island",
"BR": "Brazil",
"VG": "British Virgin Islands",
"IO": "British Indian Ocean Territory",
"BN": "Brunei Darussalam",
"BG": "Bulgaria",
"BF": "Burkina Faso",
"BI": "Burundi",
"KH": "Cambodia",
"CM": "Cameroon",
"CA": "Canada",
"CV": "Cape Verde",
"KY": "Cayman Islands",
"CF": "Central African Republic",
"TD": "Chad",
"CL": "Chile",
"CN": "China",
"HK": "Hong Kong, Special Administrative Region of China",
"MO": "Macao, Special Administrative Region of China",
"CX": "Christmas Island",
"CC": "Cocos (Keeling) Islands",
"CO": "Colombia",
"KM": "Comoros",
"CG": "Congo (Brazzaville)",
"CD": "Congo, Democratic Republic of the",
"CK": "Cook Islands",
"CR": "Costa Rica",
"CI": "Côte d'Ivoire",
"HR": "Croatia",
"CU": "Cuba",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DK": "Denmark",
"DJ": "Djibouti",
"DM": "Dominica",
"DO": "Dominican Republic",
"EC": "Ecuador",
"EG": "Egypt",
"SV": "El Salvador",
"GQ": "Equatorial Guinea",
"ER": "Eritrea",
"EE": "Estonia",
"ET": "Ethiopia",
"FK": "Falkland Islands (Malvinas)",
"FO": "Faroe Islands",
"FJ": "Fiji",
"FI": "Finland",
"FR": "France",
"GF": "French Guiana",
"PF": "French Polynesia",
"TF": "French Southern Territories",
"GA": "Gabon",
"GM": "Gambia",
"GE": "Georgia",
"DE": "Germany",
"GH": "Ghana",
"GI": "Gibraltar",
"GR": "Greece",
"GL": "Greenland",
"GD": "Grenada",
"GP": "Guadeloupe",
"GU": "Guam",
"GT": "Guatemala",
"GG": "Guernsey",
"GN": "Guinea",
"GW": "Guinea-Bissau",
"GY": "Guyana",
"HT": "Haiti",
"HM": "Heard Island and Mcdonald Islands",
"VA": "Holy See (Vatican City State)",
"HN": "Honduras",
"HU": "Hungary",
"IS": "Iceland",
"IN": "India",
"ID": "Indonesia",
"IR": "Iran, Islamic Republic of",
"IQ": "Iraq",
"IE": "Ireland",
"IM": "Isle of Man",
"IL": "Israel",
"IT": "Italy",
"JM": "Jamaica",
"JP": "Japan",
"JE": "Jersey",
"JO": "Jordan",
"KZ": "Kazakhstan",
"KE": "Kenya",
"KI": "Kiribati",
"KP": "Korea, Democratic People's Republic of",
"KR": "Korea, Republic of",
"KW": "Kuwait",
"KG": "Kyrgyzstan",
"LA": "Lao PDR",
"LV": "Latvia",
"LB": "Lebanon",
"LS": "Lesotho",
"LR": "Liberia",
"LY": "Libya",
"LI": "Liechtenstein",
"LT": "Lithuania",
"LU": "Luxembourg",
"MK": "Macedonia, Republic of",
"MG": "Madagascar",
"MW": "Malawi",
"MY": "Malaysia",
"MV": "Maldives",
"ML": "Mali",
"MT": "Malta",
"MH": "Marshall Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MU": "Mauritius",
"YT": "Mayotte",
"MX": "Mexico",
"FM": "Micronesia, Federated States of",
"MD": "Moldova",
"MC": "Monaco",
"MN": "Mongolia",
"ME": "Montenegro",
"MS": "Montserrat",
"MA": "Morocco",
"MZ": "Mozambique",
"MM": "Myanmar",
"NA": "Namibia",
"NR": "Nauru",
"NP": "Nepal",
"NL": "Netherlands",
"AN": "Netherlands Antilles",
"NC": "New Caledonia",
"NZ": "New Zealand",
"NI": "Nicaragua",
"NE": "Niger",
"NG": "Nigeria",
"NU": "Niue",
"NF": "Norfolk Island",
"MP": "Northern Mariana Islands",
"NO": "Norway",
"OM": "Oman",
"PK": "Pakistan",
"PW": "Palau",
"PS": "Palestinian Territory, Occupied",
"PA": "Panama",
"PG": "Papua New Guinea",
"PY": "Paraguay",
"PE": "Peru",
"PH": "Philippines",
"PN": "Pitcairn",
"PL": "Poland",
"PT": "Portugal",
"PR": "Puerto Rico",
"QA": "Qatar",
"RE": "Réunion",
"RO": "Romania",
"RU": "Russian Federation",
"RW": "Rwanda",
"BL": "Saint-Barthélemy",
"SH": "Saint Helena",
"KN": "Saint Kitts and Nevis",
"LC": "Saint Lucia",
"MF": "Saint-Martin (French part)",
"PM": "Saint Pierre and Miquelon",
"VC": "Saint Vincent and Grenadines",
"WS": "Samoa",
"SM": "San Marino",
"ST": "Sao Tome and Principe",
"SA": "Saudi Arabia",
"SN": "Senegal",
"RS": "Serbia",
"SC": "Seychelles",
"SL": "Sierra Leone",
"SG": "Singapore",
"SK": "Slovakia",
"SI": "Slovenia",
"SB": "Solomon Islands",
"SO": "Somalia",
"ZA": "South Africa",
"GS": "South Georgia and the South Sandwich Islands",
"SS": "South Sudan",
"ES": "Spain",
"LK": "Sri Lanka",
"SD": "Sudan",
"SR": "Suriname",
"SJ": "Svalbard and Jan Mayen Islands",
"SZ": "Swaziland",
"SE": "Sweden",
"CH": "Switzerland",
"SY": "Syrian Arab Republic (Syria)",
"TW": "Taiwan, Republic of China",
"TJ": "Tajikistan",
"TZ": "Tanzania, United Republic of",
"TH": "Thailand",
"TL": "Timor-Leste",
"TG": "Togo",
"TK": "Tokelau",
"TO": "Tonga",
"TT": "Trinidad and Tobago",
"TN": "Tunisia",
"TR": "Turkey",
"TM": "Turkmenistan",
"TC": "Turks and Caicos Islands",
"TV": "Tuvalu",
"UG": "Uganda",
"UA": "Ukraine",
"AE": "United Arab Emirates",
"GB": "United Kingdom",
"US": "United States of America",
"UM": "United States Minor Outlying Islands",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VU": "Vanuatu",
"VE": "Venezuela (Bolivarian Republic of)",
"VN": "Viet Nam",
"VI": "Virgin Islands, US",
"WF": "Wallis and Futuna Islands",
"EH": "Western Sahara",
"YE": "Yemen",
"ZM": "Zambia",
"ZW": "Zimbabwe"
}
ISO3166_2_A2_TO_ISO3166_2_A3 = {
"AF": "AFG",
"AX": "ALA",
"AL": "ALB",
"DZ": "DZA",
"AS": "ASM",
"AD": "AND",
"AO": "AGO",
"AI": "AIA",
"AQ": "ATA",
"AG": "ATG",
"AR": "ARG",
"AM": "ARM",
"AW": "ABW",
"AU": "AUS",
"AT": "AUT",
"AZ": "AZE",
"BS": "BHS",
"BH": "BHR",
"BD": "BGD",
"BB": "BRB",
"BY": "BLR",
"BE": "BEL",
"BZ": "BLZ",
"BJ": "BEN",
"BM": "BMU",
"BT": "BTN",
"BO": "BOL",
"BA": "BIH",
"BW": "BWA",
"BV": "BVT",
"BR": "BRA",
"VG": "VGB",
"IO": "IOT",
"BN": "BRN",
"BG": "BGR",
"BF": "BFA",
"BI": "BDI",
"KH": "KHM",
"CM": "CMR",
"CA": "CAN",
"CV": "CPV",
"KY": "CYM",
"CF": "CAF",
"TD": "TCD",
"CL": "CHL",
"CN": "CHN",
"HK": "HKG",
"MO": "MAC",
"CX": "CXR",
"CC": "CCK",
"CO": "COL",
"KM": "COM",
"CG": "COG",
"CD": "COD",
"CK": "COK",
"CR": "CRI",
"CI": "CIV",
"HR": "HRV",
"CU": "CUB",
"CY": "CYP",
"CZ": "CZE",
"DK": "DNK",
"DJ": "DJI",
"DM": "DMA",
"DO": "DOM",
"EC": "ECU",
"EG": "EGY",
"SV": "SLV",
"GQ": "GNQ",
"ER": "ERI",
"EE": "EST",
"ET": "ETH",
"FK": "FLK",
"FO": "FRO",
"FJ": "FJI",
"FI": "FIN",
"FR": "FRA",
"GF": "GUF",
"PF": "PYF",
"TF": "ATF",
"GA": "GAB",
"GM": "GMB",
"GE": "GEO",
"DE": "DEU",
"GH": "GHA",
"GI": "GIB",
"GR": "GRC",
"GL": "GRL",
"GD": "GRD",
"GP": "GLP",
"GU": "GUM",
"GT": "GTM",
"GG": "GGY",
"GN": "GIN",
"GW": "GNB",
"GY": "GUY",
"HT": "HTI",
"HM": "HMD",
"VA": "VAT",
"HN": "HND",
"HU": "HUN",
"IS": "ISL",
"IN": "IND",
"ID": "IDN",
"IR": "IRN",
"IQ": "IRQ",
"IE": "IRL",
"IM": "IMN",
"IL": "ISR",
"IT": "ITA",
"JM": "JAM",
"JP": "JPN",
"JE": "JEY",
"JO": "JOR",
"KZ": "KAZ",
"KE": "KEN",
"KI": "KIR",
"KP": "PRK",
"KR": "KOR",
"KW": "KWT",
"KG": "KGZ",
"LA": "LAO",
"LV": "LVA",
"LB": "LBN",
"LS": "LSO",
"LR": "LBR",
"LY": "LBY",
"LI": "LIE",
"LT": "LTU",
"LU": "LUX",
"MK": "MKD",
"MG": "MDG",
"MW": "MWI",
"MY": "MYS",
"MV": "MDV",
"ML": "MLI",
"MT": "MLT",
"MH": "MHL",
"MQ": "MTQ",
"MR": "MRT",
"MU": "MUS",
"YT": "MYT",
"MX": "MEX",
"FM": "FSM",
"MD": "MDA",
"MC": "MCO",
"MN": "MNG",
"ME": "MNE",
"MS": "MSR",
"MA": "MAR",
"MZ": "MOZ",
"MM": "MMR",
"NA": "NAM",
"NR": "NRU",
"NP": "NPL",
"NL": "NLD",
"AN": "ANT",
"NC": "NCL",
"NZ": "NZL",
"NI": "NIC",
"NE": "NER",
"NG": "NGA",
"NU": "NIU",
"NF": "NFK",
"MP": "MNP",
"NO": "NOR",
"OM": "OMN",
"PK": "PAK",
"PW": "PLW",
"PS": "PSE",
"PA": "PAN",
"PG": "PNG",
"PY": "PRY",
"PE": "PER",
"PH": "PHL",
"PN": "PCN",
"PL": "POL",
"PT": "PRT",
"PR": "PRI",
"QA": "QAT",
"RE": "REU",
"RO": "ROU",
"RU": "RUS",
"RW": "RWA",
"BL": "BLM",
"SH": "SHN",
"KN": "KNA",
"LC": "LCA",
"MF": "MAF",
"PM": "SPM",
"VC": "VCT",
"WS": "WSM",
"SM": "SMR",
"ST": "STP",
"SA": "SAU",
"SN": "SEN",
"RS": "SRB",
"SC": "SYC",
"SL": "SLE",
"SG": "SGP",
"SK": "SVK",
"SI": "SVN",
"SB": "SLB",
"SO": "SOM",
"ZA": "ZAF",
"GS": "SGS",
"SS": "SSD",
"ES": "ESP",
"LK": "LKA",
"SD": "SDN",
"SR": "SUR",
"SJ": "SJM",
"SZ": "SWZ",
"SE": "SWE",
"CH": "CHE",
"SY": "SYR",
"TW": "TWN",
"TJ": "TJK",
"TZ": "TZA",
"TH": "THA",
"TL": "TLS",
"TG": "TGO",
"TK": "TKL",
"TO": "TON",
"TT": "TTO",
"TN": "TUN",
"TR": "TUR",
"TM": "TKM",
"TC": "TCA",
"TV": "TUV",
"UG": "UGA",
"UA": "UKR",
"AE": "ARE",
"GB": "GBR",
"US": "USA",
"UM": "UMI",
"UY": "URY",
"UZ": "UZB",
"VU": "VUT",
"VE": "VEN",
"VN": "VNM",
"VI": "VIR",
"WF": "WLF",
"EH": "ESH",
"YE": "YEM",
"ZM": "ZMB",
"ZW": "ZWE",
}
|
Iconoclasteinc/tgit
|
tgit/countries.py
|
Python
|
gpl-3.0
| 11,846
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from distutils.util import convert_path
import os
from setuptools import setup
root = os.path.abspath(os.path.dirname(__file__))
path = lambda *p: os.path.join(root, *p)
try:
long_desc = open(path('README.txt')).read()
except Exception:
long_desc = "<Missing README.txt>"
print("Missing README.txt")
def find_packages(where='.', lib_prefix='', exclude=()):
"""
SNAGGED FROM distribute-0.6.49-py2.7.egg/setuptools/__init__.py
"""
out = []
stack=[(convert_path(where), lib_prefix)]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup', 'distribute_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup(
name='mo-hg',
version="2.18.18240",
description='Fast cache for Mozilla\'s Mercurial repository',
long_description=long_desc,
author='Kyle Lahnakoski',
author_email='kyle@lahnakoski.com',
url='https://github.com/klahnakoski/mo-hg',
license='MPL 2.0',
packages=find_packages(),
install_requires=["beautifulsoup4","mo-collections>=2.18.18240","mo-dots>=2.18.18240","mo-future>=2.18.18240","mo-kwargs>=2.18.18240","mo-logs>=2.18.18240","mo-math>=2.18.18240","mo-threads>=2.18.18240","mo-times>=2.18.18240","pyLibrary"],
include_package_data=True,
zip_safe=False,
classifiers=[ #https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
]
)
|
klahnakoski/MoHg
|
setup.py
|
Python
|
mpl-2.0
| 2,261
|
import os
import sys
# Put communityshare in sys
this_directory = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.abspath(os.path.join(this_directory, '..')))
from community_share import config, store, Base
from community_share.models.user import User, TypedLabel
from community_share.models.search import Label
grade_level_labels = set((
'K-5', '6-8', '9-12', 'College', 'Adult',
'K-3', '4-5', '6-8', '9-12', 'Preschool',
))
engagement_labels = set((
'Guest Speaker', 'Host Field Trip', 'Judge Student Competition',
'Participate in Career Day', 'Collaborate on a Class Project',
'Mentor Students', 'Brainstorm Curriculum Ideas with Educator',
'Hands-On Demonstration',
'Guest', 'Speaker', 'Field Trip Host', 'Student Competition Judge',
'Individual/Group Mentor', 'Share Curriculum Ideas', 'Curriculuum Development',
'Career Day Participant', 'Collaborator on a Class Project',
'Long-term', 'Individual Mentor', 'Short-term',
'Small Group Mentor', 'Classroom Materials Provider',
'Student Competition Judget',
))
if __name__ == '__main__':
config.load_from_file()
Base.metadata.create_all(store.engine)
users = store.session.query(User).all()
# Update the is_community_partner and is_educator in the user table.
for user in users:
is_educator = False
search = user.educator_profile_search
if (search and search.active):
is_educator = (len(search.labels) > 0)
is_community_partner = False
search = user.community_partner_profile_search
if (search and search.active):
is_community_partner = (len(search.labels) > 0)
user.is_community_partner = is_community_partner
user.is_educator = is_educator
store.session.add(user)
store.session.commit()
# Update Labels
labels = store.session.query(Label).all()
for label in labels:
if label.active:
if label.name in grade_level_labels:
typ='gradelevel'
elif label.name in engagement_labels:
typ='engagement'
else:
typ='expertise'
check = store.session.query(TypedLabel).filter(TypedLabel.name==label.name, TypedLabel.typ==typ).first()
if not check:
new_label = TypedLabel(
name=label.name,
typ=typ,
)
store.session.add(new_label)
store.session.commit()
# Associate Labels with Users instead of with searches.
for user in users:
cp_search = user.community_partner_profile_search
if cp_search:
for label in cp_search.labels:
typed_label = store.session.query(TypedLabel).filter(TypedLabel.name==label.name).first()
user.labels.append(typed_label)
ed_search = user.educator_profile_search
if ed_search:
for label in ed_search.labels:
typed_label = store.session.query(TypedLabel).filter(TypedLabel.name==label.name).first()
if typed_label.typ == 'gradelevel':
user.labels.append(typed_label)
store.session.commit()
# Make a search string for the Community partners.
for user in users:
user.update_search_text()
store.session.add(user)
store.session.commit()
|
seanastephens/communityshare
|
update_scripts/update_search.py
|
Python
|
mpl-2.0
| 3,386
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""desktop_l10n.py
This script manages Desktop repacks for nightly builds.
In this version, a single partial is supported.
"""
import os
import re
import sys
import subprocess
# load modules from parent dir
sys.path.insert(1, os.path.dirname(sys.path[0]))
from mozharness.base.errors import BaseErrorList, MakefileErrorList
from mozharness.base.script import BaseScript
from mozharness.base.vcs.vcsbase import VCSMixin
from mozharness.mozilla.buildbot import BuildbotMixin
from mozharness.mozilla.building.buildbase import MakeUploadOutputParser
from mozharness.mozilla.l10n.locales import LocalesMixin
from mozharness.mozilla.mar import MarMixin
from mozharness.mozilla.mock import MockMixin
from mozharness.mozilla.purge import PurgeMixin
from mozharness.mozilla.release import ReleaseMixin
from mozharness.mozilla.signing import SigningMixin
from mozharness.mozilla.updates.balrog import BalrogMixin
try:
import simplejson as json
assert json
except ImportError:
import json
# needed by summarize
SUCCESS = 0
FAILURE = 1
# when running get_output_form_command, pymake has some extra output
# that needs to be filtered out
PyMakeIgnoreList = [
re.compile(r'''.*make\.py(?:\[\d+\])?: Entering directory'''),
re.compile(r'''.*make\.py(?:\[\d+\])?: Leaving directory'''),
]
# mandatory configuration options, without them, this script will not work
# it's a list of values that are already known before starting a build
configuration_tokens = ('branch',
'platform',
'en_us_binary_url',
'update_platform',
'update_channel')
# some other values such as "%(version)s", "%(buildid)s", ...
# are defined at run time and they cannot be enforced in the _pre_config_lock
# phase
runtime_config_tokens = ('buildid', 'version', 'locale', 'from_buildid',
'abs_objdir', 'abs_merge_dir', 'version', 'to_buildid')
# DesktopSingleLocale {{{1
class DesktopSingleLocale(LocalesMixin, ReleaseMixin, MockMixin, PurgeMixin,
BuildbotMixin, VCSMixin, SigningMixin, BaseScript,
BalrogMixin, MarMixin):
"""Manages desktop repacks"""
config_options = [[
['--balrog-config', ],
{"action": "extend",
"dest": "config_files",
"type": "string",
"help": "Specify the balrog configuration file"}
], [
['--branch-config', ],
{"action": "extend",
"dest": "config_files",
"type": "string",
"help": "Specify the branch configuration file"}
], [
['--environment-config', ],
{"action": "extend",
"dest": "config_files",
"type": "string",
"help": "Specify the environment (staging, production, ...) configuration file"}
], [
['--platform-config', ],
{"action": "extend",
"dest": "config_files",
"type": "string",
"help": "Specify the platform configuration file"}
], [
['--locale', ],
{"action": "extend",
"dest": "locales",
"type": "string",
"help": "Specify the locale(s) to sign and update"}
], [
['--locales-file', ],
{"action": "store",
"dest": "locales_file",
"type": "string",
"help": "Specify a file to determine which locales to sign and update"}
], [
['--tag-override', ],
{"action": "store",
"dest": "tag_override",
"type": "string",
"help": "Override the tags set for all repos"}
], [
['--user-repo-override', ],
{"action": "store",
"dest": "user_repo_override",
"type": "string",
"help": "Override the user repo path for all repos"}
], [
['--release-config-file', ],
{"action": "store",
"dest": "release_config_file",
"type": "string",
"help": "Specify the release config file to use"}
], [
['--keystore', ],
{"action": "store",
"dest": "keystore",
"type": "string",
"help": "Specify the location of the signing keystore"}
], [
['--this-chunk', ],
{"action": "store",
"dest": "this_locale_chunk",
"type": "int",
"help": "Specify which chunk of locales to run"}
], [
['--total-chunks', ],
{"action": "store",
"dest": "total_locale_chunks",
"type": "int",
"help": "Specify the total number of chunks of locales"}
]]
def __init__(self, require_config_file=True):
# fxbuild style:
buildscript_kwargs = {
'all_actions': [
"clobber",
"pull",
"list-locales",
"setup",
"repack",
"upload-repacks",
"submit-to-balrog",
"summary",
],
'config': {
"buildbot_json_path": "buildprops.json",
"ignore_locales": ["en-US"],
"locales_dir": "browser/locales",
"previous_mar_dir": "previous",
"current_mar_dir": "current",
"update_mar_dir": "dist/update",
"previous_mar_filename": "previous.mar",
"current_work_mar_dir": "current.work",
"buildid_section": "App",
"buildid_option": "BuildID",
"application_ini": "application.ini",
"unpack_script": "tools/update-packaging/unwrap_full_update.pl",
"log_name": "single_locale",
"clobber_file": 'CLOBBER',
"appName": "Firefox",
"hashType": "sha512",
},
}
#
LocalesMixin.__init__(self)
BaseScript.__init__(
self,
config_options=self.config_options,
require_config_file=require_config_file,
**buildscript_kwargs
)
self.buildid = None
self.make_ident_output = None
self.repack_env = None
self.upload_env = None
self.revision = None
self.version = None
self.upload_urls = {}
self.locales_property = {}
self.l10n_dir = None
self.package_urls = {}
self.partials = {}
if 'mock_target' in self.config:
self.enable_mock()
def _pre_config_lock(self, rw_config):
"""replaces 'configuration_tokens' with their values, before the
configuration gets locked. If some of the configuration_tokens
are not present, stops the execution of the script"""
# since values as branch, platform are mandatory, can replace them in
# in the configuration before it is locked down
# mandatory tokens
for token in configuration_tokens:
if token not in self.config:
self.fatal('No %s in configuration!' % token)
# all the important tokens are present in our configuration
for token in configuration_tokens:
# token_string '%(branch)s'
token_string = ''.join(('%(', token, ')s'))
# token_value => ash
token_value = self.config[token]
for element in self.config:
# old_value => https://hg.mozilla.org/projects/%(branch)s
old_value = self.config[element]
# new_value => https://hg.mozilla.org/projects/ash
new_value = self.__detokenise_element(self.config[element],
token_string,
token_value)
if new_value and new_value != old_value:
msg = "%s: replacing %s with %s" % (element,
old_value,
new_value)
self.debug(msg)
self.config[element] = new_value
# now, only runtime_config_tokens should be present in config
# we should parse self.config and fail if any other we spot any
# other token
tokens_left = set(self._get_configuration_tokens(self.config))
unknown_tokens = set(tokens_left) - set(runtime_config_tokens)
if unknown_tokens:
msg = ['unknown tokens in configuration:']
for t in unknown_tokens:
msg.append(t)
self.fatal(' '.join(msg))
self.info('configuration looks ok')
def _get_configuration_tokens(self, iterable):
"""gets a list of tokens in iterable"""
regex = re.compile('%\(\w+\)s')
results = []
try:
for element in iterable:
if isinstance(iterable, str):
# this is a string, look for tokens
# self.debug("{0}".format(re.findall(regex, element)))
tokens = re.findall(regex, iterable)
for token in tokens:
# clean %(branch)s => branch
# remove %(
token_name = token.partition('%(')[2]
# remove )s
token_name = token_name.partition(')s')[0]
results.append(token_name)
break
elif isinstance(iterable, (list, tuple)):
results.extend(self._get_configuration_tokens(element))
elif isinstance(iterable, dict):
results.extend(self._get_configuration_tokens(iterable[element]))
except TypeError:
# element is a int/float/..., nothing to do here
pass
# remove duplicates, and return results
return list(set(results))
def __detokenise_element(self, config_option, token, value):
"""reads config_options and returns a version of the same config_option
replacing token with value recursively"""
# config_option is a string, let's replace token with value
if isinstance(config_option, str):
# if token does not appear in this string,
# nothing happens and the original value is returned
return config_option.replace(token, value)
# it's a dictionary
elif isinstance(config_option, dict):
# replace token for each element of this dictionary
for element in config_option:
config_option[element] = self.__detokenise_element(
config_option[element], token, value)
return config_option
# it's a list
elif isinstance(config_option, list):
# create a new list and append the replaced elements
new_list = []
for element in config_option:
new_list.append(self.__detokenise_element(element, token, value))
return new_list
elif isinstance(config_option, tuple):
# create a new list and append the replaced elements
new_list = []
for element in config_option:
new_list.append(self.__detokenise_element(element, token, value))
return tuple(new_list)
# everything else, bool, number, ...
return None
# Helper methods {{{2
def query_repack_env(self):
"""returns the env for repacks"""
if self.repack_env:
return self.repack_env
config = self.config
replace_dict = self.query_abs_dirs()
repack_env = self.query_env(partial_env=config.get("repack_env"),
replace_dict=replace_dict)
if config.get('en_us_binary_url') and \
config.get('release_config_file'):
repack_env['EN_US_BINARY_URL'] = config['en_us_binary_url']
if 'MOZ_SIGNING_SERVERS' in os.environ:
sign_cmd = self.query_moz_sign_cmd(formats=None)
sign_cmd = subprocess.list2cmdline(sign_cmd)
# windows fix
repack_env['MOZ_SIGN_CMD'] = sign_cmd.replace('\\', '\\\\\\\\')
for binary in self._mar_binaries():
# "mar -> MAR" and 'mar.exe -> MAR' (windows)
name = binary.replace('.exe', '')
name = name.upper()
binary_path = os.path.join(self._mar_tool_dir(), binary)
# windows fix...
binary_path.replace("\\", "/")
repack_env[name] = binary_path
self.repack_env = repack_env
return self.repack_env
def query_upload_env(self):
"""returns the environment used for the upload step"""
if self.upload_env:
return self.upload_env
c = self.config
buildid = self._query_buildid()
version = self.query_version()
upload_env = self.query_env(partial_env=c.get("upload_env"),
replace_dict={'buildid': buildid,
'version': version})
# check if there are any extra option from the platform configuration
# and append them to the env
if 'upload_env_extra' in c:
for extra in c['upload_env_extra']:
upload_env[extra] = c['upload_env_extra'][extra]
if 'MOZ_SIGNING_SERVERS' in os.environ:
upload_env['MOZ_SIGN_CMD'] = subprocess.list2cmdline(self.query_moz_sign_cmd())
self.upload_env = upload_env
return self.upload_env
def _query_make_ident_output(self):
"""Get |make ident| output from the objdir.
Only valid after setup is run.
"""
if self.make_ident_output:
return self.make_ident_output
dirs = self.query_abs_dirs()
self.make_ident_output = self._get_output_from_make(
target=["ident"],
cwd=dirs['abs_locales_dir'],
env=self.query_repack_env())
return self.make_ident_output
def _query_buildid(self):
"""Get buildid from the objdir.
Only valid after setup is run.
"""
if self.buildid:
return self.buildid
r = re.compile(r"buildid (\d+)")
output = self._query_make_ident_output()
for line in output.splitlines():
match = r.match(line)
if match:
self.buildid = match.groups()[0]
return self.buildid
def query_revision(self):
"""Get revision from the objdir.
Only valid after setup is run.
"""
if self.revision:
return self.revision
r = re.compile(r"^(gecko|fx)_revision ([0-9a-f]{12}\+?)$")
output = self._query_make_ident_output()
for line in output.splitlines():
match = r.match(line)
if match:
self.revision = match.groups()[1]
return self.revision
def _query_make_variable(self, variable, make_args=None,
exclude_lines=PyMakeIgnoreList):
"""returns the value of make echo-variable-<variable>
it accepts extra make arguements (make_args)
it also has an exclude_lines from the output filer
exclude_lines defaults to PyMakeIgnoreList because
on windows, pymake writes extra output lines that need
to be filtered out.
"""
dirs = self.query_abs_dirs()
make_args = make_args or []
exclude_lines = exclude_lines or []
target = ["echo-variable-%s" % variable] + make_args
cwd = dirs['abs_locales_dir']
raw_output = self._get_output_from_make(target, cwd=cwd,
env=self.query_repack_env())
# we want to log all the messages from make/pymake and
# exlcude some messages from the output ("Entering directory...")
output = []
for line in raw_output.split("\n"):
discard = False
for element in exclude_lines:
if element.match(line):
discard = True
continue
if not discard:
output.append(line.strip())
return " ".join(output).strip()
def query_base_package_name(self, locale):
"""Gets the package name from the objdir.
Only valid after setup is run.
"""
# optimization:
# replace locale with %(locale)s
# and store its values.
args = ['AB_CD=%s' % locale]
return self._query_make_variable('PACKAGE', make_args=args)
def query_version(self):
"""Gets the version from the objdir.
Only valid after setup is run."""
if self.version:
return self.version
config = self.config
if config.get('release_config_file'):
release_config = self.query_release_config()
self.version = release_config['version']
else:
self.version = self._query_make_variable("MOZ_APP_VERSION")
return self.version
def upload_repacks(self):
"""iterates through the list of locales and calls make upload"""
self.summarize(self.make_upload, self.query_locales())
def summarize(self, func, items):
"""runs func for any item in items, calls the add_failure() for each
error. It assumes that function returns 0 when successful.
returns a two element tuple with (success_count, total_count)"""
success_count = 0
total_count = len(items)
name = func.__name__
for item in items:
result = func(item)
if result == SUCCESS:
# success!
success_count += 1
else:
# func failed...
message = 'failure: %s(%s)' % (name, item)
self._add_failure(item, message)
return (success_count, total_count)
def _add_failure(self, locale, message, **kwargs):
"""marks current step as failed"""
self.locales_property[locale] = "Failed"
prop_key = "%s_failure" % locale
prop_value = self.query_buildbot_property(prop_key)
if prop_value:
prop_value = "%s %s" % (prop_value, message)
else:
prop_value = message
self.set_buildbot_property(prop_key, prop_value, write_to_file=True)
BaseScript.add_failure(self, locale, message=message, **kwargs)
def summary(self):
"""generates a summary"""
BaseScript.summary(self)
# TODO we probably want to make this configurable on/off
locales = self.query_locales()
for locale in locales:
self.locales_property.setdefault(locale, "Success")
self.set_buildbot_property("locales",
json.dumps(self.locales_property),
write_to_file=True)
# Actions {{{2
def clobber(self):
"""clobber"""
dirs = self.query_abs_dirs()
config = self.config
objdir = os.path.join(dirs['abs_work_dir'], config['mozilla_dir'],
config['objdir'])
PurgeMixin.clobber(self, always_clobber_dirs=[objdir])
def pull(self):
"""pulls source code"""
config = self.config
dirs = self.query_abs_dirs()
repos = []
# replace dictionary for repos
# we need to interpolate some values:
# branch, branch_repo
# and user_repo_override if exists
replace_dict = {}
if config.get("user_repo_override"):
replace_dict['user_repo_override'] = config['user_repo_override']
for repository in config['repos']:
current_repo = {}
for key, value in repository.iteritems():
try:
current_repo[key] = value % replace_dict
except KeyError:
self.error('not all the values in "{0}" can be replaced. Check your configuration'.format(value))
raise
repos.append(current_repo)
self.info("repositories: %s" % repos)
self.vcs_checkout_repos(repos, parent_dir=dirs['abs_work_dir'],
tag_override=config.get('tag_override'))
self.pull_locale_source()
def _setup_configure(self, buildid=None):
"""configuration setup"""
# no need to catch failures as _make() halts on failure by default
self._make_configure()
self._make_dirs()
self.make_export(buildid) # not sure we need it
def setup(self):
"""setup step"""
dirs = self.query_abs_dirs()
self._copy_mozconfig()
self._setup_configure()
self.make_wget_en_US()
self.make_unpack()
revision = self.query_revision()
if not revision:
self.fatal("Can't determine revision!")
# TODO do this through VCSMixin instead of hardcoding hg
# self.update(dest=dirs["abs_mozilla_dir"], revision=revision)
hg = self.query_exe("hg")
self.run_command([hg, "update", "-r", revision],
cwd=dirs["abs_mozilla_dir"],
env=self.query_repack_env(),
error_list=BaseErrorList,
halt_on_failure=True, fatal_exit_code=3)
# if checkout updates CLOBBER file with a newer timestamp,
# next make -f client.mk configure will delete archives
# downloaded with make wget_en_US, so just touch CLOBBER file
_clobber_file = self._clobber_file()
if os.path.exists(_clobber_file):
self._touch_file(_clobber_file)
# Configure again since the hg update may have invalidated it.
buildid = self._query_buildid()
self._setup_configure(buildid=buildid)
def _clobber_file(self):
"""returns the full path of the clobber file"""
config = self.config
dirs = self.query_abs_dirs()
return os.path.join(dirs['abs_objdir'], config.get('clobber_file'))
def _copy_mozconfig(self):
"""copies the mozconfig file into abs_mozilla_dir/.mozconfig
and logs the content
"""
config = self.config
dirs = self.query_abs_dirs()
mozconfig = config['mozconfig']
src = os.path.join(dirs['abs_work_dir'], mozconfig)
dst = os.path.join(dirs['abs_mozilla_dir'], '.mozconfig')
self.copyfile(src, dst)
# STUPID HACK HERE
# should we update the mozconfig so it has the right value?
with self.opened(src, 'r') as (in_mozconfig, in_error):
if in_error:
self.fatal('cannot open {0}'.format(src))
with self.opened(dst, open_mode='w') as (out_mozconfig, out_error):
if out_error:
self.fatal('cannot write {0}'.format(dst))
for line in in_mozconfig:
if 'with-l10n-base' in line:
line = 'ac_add_options --with-l10n-base=../../l10n\n'
self.l10n_dir = line.partition('=')[2].strip()
out_mozconfig.write(line)
# now log
with self.opened(dst, 'r') as (mozconfig, in_error):
if in_error:
self.fatal('cannot open {0}'.format(dst))
for line in mozconfig:
self.info(line.strip())
def _make(self, target, cwd, env, error_list=MakefileErrorList,
halt_on_failure=True, output_parser=None):
"""Runs make. Returns the exit code"""
make = self.query_exe("make", return_type="list")
return self.run_command(make + target,
cwd=cwd,
env=env,
error_list=error_list,
halt_on_failure=halt_on_failure,
output_parser=output_parser)
def _get_output_from_make(self, target, cwd, env, halt_on_failure=True):
"""runs make and returns the output of the command"""
make = self.query_exe("make", return_type="list")
return self.get_output_from_command(make + target,
cwd=cwd,
env=env,
silent=True,
halt_on_failure=halt_on_failure)
def _make_configure(self):
"""calls make -f client.mk configure"""
env = self.query_repack_env()
dirs = self.query_abs_dirs()
cwd = dirs['abs_mozilla_dir']
target = ["-f", "client.mk", "configure"]
return self._make(target=target, cwd=cwd, env=env)
def _make_dirs(self):
"""calls make <dirs>
dirs is defined in configuration"""
config = self.config
env = self.query_repack_env()
dirs = self.query_abs_dirs()
target = []
for make_dir in config.get('make_dirs', []):
cwd = os.path.join(dirs['abs_objdir'], make_dir)
self._make(target=target, cwd=cwd, env=env, halt_on_failure=True)
def make_export(self, buildid):
"""calls make export <buildid>"""
# is it really needed ???
if buildid is None:
self.info('buildid is set to None, skipping make export')
return
dirs = self.query_abs_dirs()
cwd = dirs['abs_locales_dir']
env = self.query_repack_env()
target = ["export", 'MOZ_BUILD_DATE=%s' % str(buildid)]
return self._make(target=target, cwd=cwd, env=env)
def make_unpack(self):
"""wrapper for make unpack"""
config = self.config
dirs = self.query_abs_dirs()
env = self.query_repack_env()
cwd = os.path.join(dirs['abs_objdir'], config['locales_dir'])
return self._make(target=["unpack"], cwd=cwd, env=env)
def make_wget_en_US(self):
"""wrapper for make wget-en-US"""
env = self.query_repack_env()
dirs = self.query_abs_dirs()
cwd = dirs['abs_locales_dir']
return self._make(target=["wget-en-US"], cwd=cwd, env=env)
def make_upload(self, locale):
"""wrapper for make upload command"""
config = self.config
env = self.query_upload_env()
dirs = self.query_abs_dirs()
buildid = self._query_buildid()
try:
env['POST_UPLOAD_CMD'] = config['base_post_upload_cmd'] % {'buildid': buildid,
'branch': config['branch']}
except KeyError:
# no base_post_upload_cmd in configuration, just skip it
pass
target = ['upload', 'AB_CD=%s' % (locale)]
cwd = dirs['abs_locales_dir']
parser = MakeUploadOutputParser(config=self.config,
log_obj=self.log_obj)
retval = self._make(target=target, cwd=cwd, env=env,
halt_on_failure=False, output_parser=parser)
if locale not in self.package_urls:
self.package_urls[locale] = {}
self.package_urls[locale].update(parser.matches)
self.info("parser: %s" % parser)
self.info("parser matches: %s" % parser.matches)
if retval == 0:
self.info('Upload successful (%s)' % (locale))
ret = SUCCESS
else:
self.error('failed to upload %s' % (locale))
ret = FAILURE
return ret
def make_installers(self, locale):
"""wrapper for make installers-(locale)"""
env = self.query_repack_env()
self._copy_mozconfig()
env['L10NBASEDIR'] = self.l10n_dir
dirs = self.query_abs_dirs()
cwd = os.path.join(dirs['abs_locales_dir'])
cmd = ["installers-%s" % locale,
"LOCALE_MERGEDIR=%s" % env["LOCALE_MERGEDIR"], ]
return self._make(target=cmd, cwd=cwd,
env=env, halt_on_failure=False)
def generate_complete_mar(self, locale):
"""creates a complete mar file"""
config = self.config
dirs = self.query_abs_dirs()
self._create_mar_dirs()
self.download_mar_tools()
package_basedir = os.path.join(dirs['abs_objdir'],
config['package_base_dir'])
env = self.query_repack_env()
cmd = os.path.join(dirs['abs_objdir'], config['update_packaging_dir'])
cmd = ['-C', cmd, 'full-update', 'AB_CD=%s' % locale,
'PACKAGE_BASE_DIR=%s' % package_basedir]
return self._make(target=cmd, cwd=dirs['abs_mozilla_dir'], env=env)
def repack_locale(self, locale):
"""wraps the logic for comapare locale, make installers and generate
partials"""
if self.run_compare_locales(locale) != 0:
self.error("compare locale %s failed" % (locale))
return FAILURE
# compare locale succeded, let's run make installers
if self.make_installers(locale) != 0:
self.error("make installers-%s failed" % (locale))
return FAILURE
# installer succeded, generate complete mar
if self.generate_complete_mar(locale) != 0:
self.error("generate complete %s mar failed" % (locale))
return FAILURE
# do we need to generate partials?
if self.has_partials():
if self.create_partial_updates(locale) != 0:
self.error("generate partials %s failed" % (locale))
return FAILURE
else:
self.info("partial updates are not enabled, skipping")
return SUCCESS
def has_partials(self):
"""returns True if partials are enabled, False elsewhere"""
config = self.config
return config["enable_partials"]
def repack(self):
"""creates the repacks and udpates"""
self.summarize(self.repack_locale, self.query_locales())
def localized_marfile(self, locale):
"""returns the localized mar file name"""
config = self.config
version = self.query_version()
localized_mar = config['localized_mar'] % {'version': version,
'locale': locale}
localized_mar = os.path.join(self._mar_dir('update_mar_dir'),
localized_mar)
return localized_mar
def create_partial_updates(self, locale):
"""create partial updates for locale"""
# clean up any left overs from previous locales
# remove current/ current.work/ previous/ directories
self._delete_mar_dirs()
# and recreate current/ previous/
self._create_mar_dirs()
# download mar and mbsdiff executables
self.download_mar_tools()
# get the previous mar file
previous_marfile = self._get_previous_mar(locale)
# and unpack it
previous_mar_dir = self._previous_mar_dir()
result = self._unpack_mar(previous_marfile, previous_mar_dir)
if result != 0:
self.error('failed to unpack %s to %s' % (previous_marfile,
previous_mar_dir))
return result
current_marfile = self._get_current_mar()
current_mar_dir = self._current_mar_dir()
result = self._unpack_mar(current_marfile, current_mar_dir)
if result != 0:
self.error('failed to unpack %s to %s' % (current_marfile,
current_mar_dir))
return result
# partial filename
config = self.config
version = self.query_version()
previous_mar_buildid = self.get_buildid_from_mar_dir(previous_mar_dir)
current_mar_buildid = self._query_buildid()
partial_filename = config['partial_mar'] % {'version': version,
'locale': locale,
'from_buildid': current_mar_buildid,
'to_buildid': previous_mar_buildid}
if locale not in self.package_urls:
self.package_urls[locale] = {}
self.package_urls[locale]['partial_filename'] = partial_filename
self.package_urls[locale]['previous_buildid'] = previous_mar_buildid
self._delete_pgc_files()
result = self.do_incremental_update(previous_mar_dir, current_mar_dir,
partial_filename)
if result == 0:
# incremental updates succeded
# prepare partialInfo for balrog submission
partialInfo = {}
p_marfile = self._query_partial_mar_filename(locale)
partialInfo['from_buildid'] = previous_mar_buildid
partialInfo['size'] = self.query_filesize(p_marfile)
partialInfo['hash'] = self.query_sha512sum(p_marfile)
# url will be available only after make upload
# self._query_partial_mar_url(locale)
# and of course we need to generate partials befrore uploading them
partialInfo['url'] = None
if locale not in self.partials:
self.partials[locale] = []
# append partialInfo
self.partials[locale].append(partialInfo)
return result
def _query_objdir(self):
"""returns objdir name from configuration"""
return self.config['objdir']
def query_abs_dirs(self):
if self.abs_dirs:
return self.abs_dirs
abs_dirs = super(DesktopSingleLocale, self).query_abs_dirs()
for directory in abs_dirs:
value = abs_dirs[directory]
abs_dirs[directory] = value
dirs = {}
dirs['abs_tools_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tools')
for key in dirs.keys():
if key not in abs_dirs:
abs_dirs[key] = dirs[key]
self.abs_dirs = abs_dirs
return self.abs_dirs
def submit_to_balrog(self):
"""submit to barlog"""
if not self.config.get("balrog_api_root"):
self.info("balrog_api_root not set; skipping balrog submission.")
return
self.info("Reading buildbot build properties...")
self.read_buildbot_config()
# get platform, appName and hashType from configuration
# common values across different locales
config = self.config
platform = config["platform"]
hashType = config['hashType']
appName = config['appName']
branch = config['branch']
# values from configuration
self.set_buildbot_property("branch", branch)
self.set_buildbot_property("appName", appName)
# it's hardcoded to sha512 in balrog.py
self.set_buildbot_property("hashType", hashType)
self.set_buildbot_property("platform", platform)
# values common to the current repacks
self.set_buildbot_property("buildid", self._query_buildid())
self.set_buildbot_property("appVersion", self.query_version())
# submit complete mar to balrog
# clean up buildbot_properties
self.summarize(self.submit_repack_to_balrog, self.query_locales())
def submit_repack_to_balrog(self, locale):
"""submit a single locale to balrog"""
if not self.query_is_nightly():
# remove this check when we extend this script to non-nightly builds
self.fatal("Not a nightly build")
return FAILURE
# complete mar file
c_marfile = self._query_complete_mar_filename(locale)
c_mar_url = self._query_complete_mar_url(locale)
# Set other necessary properties for Balrog submission. None need to
# be passed back to buildbot, so we won't write them to the properties
# files
# Locale is hardcoded to en-US, for silly reasons
# The Balrog submitter translates this platform into a build target
# via https://github.com/mozilla/build-tools/blob/master/lib/python/release/platforms.py#L23
self.set_buildbot_property("completeMarSize", self.query_filesize(c_marfile))
self.set_buildbot_property("completeMarHash", self.query_sha512sum(c_marfile))
self.set_buildbot_property("completeMarUrl", c_mar_url)
self.set_buildbot_property("locale", locale)
self.set_buildbot_property("partialInfo", self._get_partialInfo(locale))
ret = FAILURE
try:
result = self.submit_balrog_updates()
self.info("balrog return code: %s" % (result))
if result == 0:
ret = SUCCESS
except Exception as error:
self.error("submit repack to balrog failed: %s" % (str(error)))
return ret
def _get_partialInfo(self, locale):
"""we can have 0, 1 or many partials, this method returns the partialInfo
needed by balrog submitter"""
if locale not in self.partials:
return []
# we have only a single partial for now
# MakeUploadOutputParser can match a single parser
partial_url = self.package_urls[locale]["partialMarUrl"]
self.partials[locale][0]["url"] = partial_url
return self.partials[locale]
def _query_complete_mar_filename(self, locale):
"""returns the full path to a localized complete mar file"""
config = self.config
version = self.query_version()
complete_mar_name = config['localized_mar'] % {'version': version,
'locale': locale}
return os.path.join(self._update_mar_dir(), complete_mar_name)
def _query_complete_mar_url(self, locale):
"""returns the complete mar url taken from self.package_urls[locale]
this value is available only after make_upload"""
if "complete_mar_url" in self.config:
return self.config["complete_mar_url"]
if "completeMarUrl" in self.package_urls[locale]:
return self.package_urls[locale]["completeMarUrl"]
# url = self.config.get("update", {}).get("mar_base_url")
# if url:
# url += os.path.basename(self.query_marfile_path())
# return url.format(branch=self.query_branch())
self.fatal("Couldn't find complete mar url in config or package_urls")
def _query_partial_mar_url(self, locale):
"""returns partial mar url"""
try:
return self.package_urls[locale]["partialMarUrl"]
except KeyError:
msg = "Couldn't find package_urls: %s %s" % (locale, self.package_urls)
self.error("package_urls: %s" % (self.package_urls))
self.fatal(msg)
def _query_partial_mar_filename(self, locale):
"""returns the full path to a partial, it returns a valid path only
after make upload"""
partial_mar_name = self.package_urls[locale]['partial_filename']
return os.path.join(self._update_mar_dir(), partial_mar_name)
def _query_previous_mar_buildid(self, locale):
"""return the partial mar buildid,
this method returns a valid buildid only after generate partials,
it raises an exception when buildid is not available
"""
try:
return self.package_urls[locale]["previous_buildid"]
except KeyError:
self.error("no previous mar buildid")
raise
def _delete_pgc_files(self):
"""deletes pgc files"""
for directory in (self._previous_mar_dir(),
self._current_mar_dir()):
for pcg_file in self._pgc_files(directory):
self.info("removing %s" % pcg_file)
self.rmtree(pcg_file)
def _current_mar_url(self):
"""returns current mar url"""
config = self.config
base_url = config['current_mar_url']
return "/".join((base_url, self._current_mar_name()))
def _previous_mar_url(self, locale):
"""returns the url for previous mar"""
config = self.config
base_url = config['previous_mar_url']
return "/".join((base_url, self._localized_mar_name(locale)))
def _get_current_mar(self):
"""downloads the current mar file"""
self.mkdir_p(self._previous_mar_dir())
if not os.path.exists(self._current_mar_filename()):
self.download_file(self._current_mar_url(),
self._current_mar_filename())
else:
self.info('%s already exists, skipping download' % (self._current_mar_filename()))
return self._current_mar_filename()
def _get_previous_mar(self, locale):
"""downloads the previous mar file"""
self.mkdir_p(self._previous_mar_dir())
self.download_file(self._previous_mar_url(locale),
self._previous_mar_filename())
return self._previous_mar_filename()
def _current_mar_name(self):
"""returns current mar file name"""
config = self.config
version = self.query_version()
return config["current_mar_filename"] % {'version': version}
def _localized_mar_name(self, locale):
"""returns localized mar name"""
config = self.config
version = self.query_version()
return config["localized_mar"] % {'version': version, 'locale': locale}
def _previous_mar_filename(self):
"""returns the complete path to previous.mar"""
config = self.config
return os.path.join(self._previous_mar_dir(),
config['previous_mar_filename'])
def _current_mar_filename(self):
"""returns the complete path to current.mar"""
return os.path.join(self._current_mar_dir(), self._current_mar_name())
def _create_mar_dirs(self):
"""creates mar directories: previous/ current/"""
for directory in (self._previous_mar_dir(),
self._current_mar_dir()):
self.info("creating: %s" % directory)
self.mkdir_p(directory)
def _delete_mar_dirs(self):
"""delete mar directories: previous, current"""
for directory in (self._previous_mar_dir(),
self._current_mar_dir(),
self._current_work_mar_dir()):
self.info("deleting: %s" % directory)
if os.path.exists(directory):
self.rmtree(directory)
def _unpack_script(self):
"""unpack script full path"""
config = self.config
dirs = self.query_abs_dirs()
return os.path.join(dirs['abs_mozilla_dir'], config['unpack_script'])
def _previous_mar_dir(self):
"""returns the full path of the previous/ directory"""
return self._mar_dir('previous_mar_dir')
def _abs_dist_dir(self):
"""returns the full path to abs_objdir/dst"""
dirs = self.query_abs_dirs()
return os.path.join(dirs['abs_objdir'], 'dist')
def _update_mar_dir(self):
"""returns the full path of the update/ directory"""
return self._mar_dir('update_mar_dir')
def _current_mar_dir(self):
"""returns the full path of the current/ directory"""
return self._mar_dir('current_mar_dir')
def _current_work_mar_dir(self):
"""returns the full path to current.work"""
return self._mar_dir('current_work_mar_dir')
def _mar_binaries(self):
"""returns a tuple with mar and mbsdiff paths"""
config = self.config
return (config['mar'], config['mbsdiff'])
def _mar_dir(self, dirname):
"""returns the full path of dirname;
dirname is an entry in configuration"""
config = self.config
return os.path.join(self._get_objdir(), config.get(dirname))
def _get_objdir(self):
"""returns full path to objdir"""
dirs = self.query_abs_dirs()
return dirs['abs_objdir']
def _pgc_files(self, basedir):
"""returns a list of .pcf files in basedir"""
pgc_files = []
for dirpath, dirnames, filenames in os.walk(basedir):
for pgc in filenames:
if pgc.endswith('.pgc'):
pgc_files.append(os.path.join(dirpath, pgc))
return pgc_files
# main {{{
if __name__ == '__main__':
single_locale = DesktopSingleLocale()
single_locale.run_and_exit()
|
kartikgupta0909/build-mozharness
|
scripts/desktop_l10n.py
|
Python
|
mpl-2.0
| 44,931
|
from .version import version as __version__
from oplus.configuration import CONF
from oplus.epm.api import *
from oplus.weather_data.api import *
from oplus.standard_output.api import *
from oplus.eio import Eio
from oplus.mtd import Mtd
from oplus.err import Err
from oplus.summary_table import SummaryTable
from oplus.output_table import OutputTable
from oplus.simulation import Simulation, simulate
|
Openergy/oplus
|
oplus/__init__.py
|
Python
|
mpl-2.0
| 404
|
import base64
import json
import os
import six
from wptserve.utils import isomorphic_decode, isomorphic_encode
def decodebytes(s):
if six.PY3:
return base64.decodebytes(six.ensure_binary(s))
return base64.decodestring(s)
def main(request, response):
headers = []
headers.append((b'X-ServiceWorker-ServerHeader', b'SetInTheServer'))
if b"ACAOrigin" in request.GET:
for item in request.GET[b"ACAOrigin"].split(b","):
headers.append((b"Access-Control-Allow-Origin", item))
for suffix in [b"Headers", b"Methods", b"Credentials"]:
query = b"ACA%s" % suffix
header = b"Access-Control-Allow-%s" % suffix
if query in request.GET:
headers.append((header, request.GET[query]))
if b"ACEHeaders" in request.GET:
headers.append((b"Access-Control-Expose-Headers", request.GET[b"ACEHeaders"]))
if (b"Auth" in request.GET and not request.auth.username) or b"AuthFail" in request.GET:
status = 401
headers.append((b'WWW-Authenticate', b'Basic realm="Restricted"'))
body = b'Authentication canceled'
return status, headers, body
if b"PNGIMAGE" in request.GET:
headers.append((b"Content-Type", b"image/png"))
body = decodebytes(b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAARnQU1B"
b"AACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAAhSURBVDhPY3wro/KfgQLABKXJBqMG"
b"jBoAAqMGDLwBDAwAEsoCTFWunmQAAAAASUVORK5CYII=")
return headers, body
if b"VIDEO" in request.GET:
headers.append((b"Content-Type", b"video/webm"))
body = open(os.path.join(request.doc_root, u"media", u"movie_5.ogv"), "rb").read()
length = len(body)
# If "PartialContent" is specified, the requestor wants to test range
# requests. For the initial request, respond with "206 Partial Content"
# and don't send the entire content. Then expect subsequent requests to
# have a "Range" header with a byte range. Respond with that range.
if b"PartialContent" in request.GET:
if length < 1:
return 500, headers, b"file is too small for range requests"
start = 0
end = length - 1
if b"Range" in request.headers:
range_header = request.headers[b"Range"]
prefix = b"bytes="
split_header = range_header[len(prefix):].split(b"-")
# The first request might be "bytes=0-". We want to force a range
# request, so just return the first byte.
if split_header[0] == b"0" and split_header[1] == b"":
end = start
# Otherwise, it is a range request. Respect the values sent.
if split_header[0] != b"":
start = int(split_header[0])
if split_header[1] != b"":
end = int(split_header[1])
else:
# The request doesn't have a range. Force a range request by
# returning the first byte.
end = start
headers.append((b"Accept-Ranges", b"bytes"))
headers.append((b"Content-Length", isomorphic_encode(str(end -start + 1))))
headers.append((b"Content-Range", b"bytes %d-%d/%d" % (start, end, length)))
chunk = body[start:(end + 1)]
return 206, headers, chunk
return headers, body
username = request.auth.username if request.auth.username else b"undefined"
password = request.auth.password if request.auth.username else b"undefined"
cookie = request.cookies[b'cookie'].value if b'cookie' in request.cookies else b"undefined"
files = []
for key, values in request.POST.items():
assert len(values) == 1
value = values[0]
if not hasattr(value, u"file"):
continue
data = value.file.read()
files.append({u"key": isomorphic_decode(key),
u"name": value.file.name,
u"type": value.type,
u"error": 0, #TODO,
u"size": len(data),
u"content": data})
get_data = {isomorphic_decode(key):isomorphic_decode(request.GET[key]) for key, value in request.GET.items()}
post_data = {isomorphic_decode(key):isomorphic_decode(request.POST[key]) for key, value in request.POST.items()
if not hasattr(request.POST[key], u"file")}
headers_data = {isomorphic_decode(key):isomorphic_decode(request.headers[key]) for key, value in request.headers.items()}
data = {u"jsonpResult": u"success",
u"method": request.method,
u"headers": headers_data,
u"body": isomorphic_decode(request.body),
u"files": files,
u"GET": get_data,
u"POST": post_data,
u"username": isomorphic_decode(username),
u"password": isomorphic_decode(password),
u"cookie": isomorphic_decode(cookie)}
return headers, u"report( %s )" % json.dumps(data)
|
KiChjang/servo
|
tests/wpt/web-platform-tests/service-workers/service-worker/resources/fetch-access-control.py
|
Python
|
mpl-2.0
| 5,045
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import datetime
from django.conf import settings
from django.db import models
from django_extensions.db.fields import CreationDateTimeField
class TimeStampedModel(models.Model):
"""
Replacement for django_extensions.db.models.TimeStampedModel
that updates the modified timestamp by default, but allows
that behavior to be overridden by passing a modified=False
parameter to the save method
"""
created = CreationDateTimeField()
modified = models.DateTimeField(editable=False, blank=True, db_index=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
if kwargs.pop('modified', True):
self.modified = datetime.now()
super(TimeStampedModel, self).save(*args, **kwargs)
class Release(TimeStampedModel):
CHANNELS = ('Nightly', 'Aurora', 'Beta', 'Release', 'ESR')
PRODUCTS = ('Firefox', 'Firefox for Android',
'Firefox Extended Support Release', 'Firefox OS',
'Thunderbird')
product = models.CharField(max_length=255,
choices=[(p, p) for p in PRODUCTS])
channel = models.CharField(max_length=255,
choices=[(c, c) for c in CHANNELS])
version = models.CharField(max_length=255)
release_date = models.DateTimeField()
text = models.TextField(blank=True)
is_public = models.BooleanField(default=False)
bug_list = models.TextField(blank=True)
bug_search_url = models.CharField(max_length=2000, blank=True)
system_requirements = models.TextField(blank=True)
def major_version(self):
return self.version.split('.', 1)[0]
def get_bug_search_url(self):
if self.bug_search_url:
return self.bug_search_url
if self.product == 'Thunderbird':
return (
'https://bugzilla.mozilla.org/buglist.cgi?'
'classification=Client%20Software&query_format=advanced&'
'bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&'
'target_milestone=Thunderbird%20{version}.0&product=Thunderbird'
'&resolution=FIXED'
).format(version=self.major_version())
return (
'https://bugzilla.mozilla.org/buglist.cgi?'
'j_top=OR&f1=target_milestone&o3=equals&v3=Firefox%20{version}&'
'o1=equals&resolution=FIXED&o2=anyexact&query_format=advanced&'
'f3=target_milestone&f2=cf_status_firefox{version}&'
'bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&'
'v1=mozilla{version}&v2=fixed%2Cverified&limit=0'
).format(version=self.major_version())
def equivalent_release_for_product(self, product):
"""
Returns the release for a specified product with the same
channel and major version with the highest minor version,
or None if no such releases exist
"""
releases = self._default_manager.filter(
version__startswith=self.major_version() + '.',
channel=self.channel, product=product).order_by('-version')
if not getattr(settings, 'DEV', False):
releases = releases.filter(is_public=True)
if releases:
return sorted(
sorted(releases, reverse=True,
key=lambda r: len(r.version.split('.'))),
reverse=True, key=lambda r: r.version.split('.')[1])[0]
def equivalent_android_release(self):
if self.product == 'Firefox':
return self.equivalent_release_for_product('Firefox for Android')
def equivalent_desktop_release(self):
if self.product == 'Firefox for Android':
return self.equivalent_release_for_product('Firefox')
def notes(self, public_only=False):
"""
Retrieve a list of Note instances that should be shown for this
release, grouped as either new features or known issues, and sorted
first by sort_num highest to lowest, which is applied to both groups,
and then for new features we also sort by tag in the order specified
by Note.TAGS, with untagged notes coming first, then finally moving
any note with the fixed tag that starts with the release version to
the top, for what we call "dot fixes".
"""
tag_index = dict((tag, i) for i, tag in enumerate(Note.TAGS))
notes = self.note_set.order_by('-sort_num')
if public_only:
notes = notes.filter(is_public=True)
known_issues = [n for n in notes if n.is_known_issue_for(self)]
new_features = sorted(
sorted(
(n for n in notes if not n.is_known_issue_for(self)),
key=lambda note: tag_index.get(note.tag, 0)),
key=lambda n: n.tag == 'Fixed' and n.note.startswith(self.version),
reverse=True)
return new_features, known_issues
def __unicode__(self):
return '{product} {version} {channel}'.format(
product=self.product, version=self.version, channel=self.channel)
class Meta:
# TODO: see if this has a significant performance impact
ordering = ('product', '-version', 'channel')
unique_together = (('product', 'version'),)
class Note(TimeStampedModel):
TAGS = ('New', 'Changed', 'HTML5', 'Feature', 'Language', 'Developer',
'Fixed')
bug = models.IntegerField(null=True, blank=True)
note = models.TextField(blank=True)
releases = models.ManyToManyField(Release, blank=True)
is_known_issue = models.BooleanField(default=False)
fixed_in_release = models.ForeignKey(Release, null=True, blank=True,
related_name='fixed_note_set')
tag = models.CharField(max_length=255, blank=True,
choices=[(t, t) for t in TAGS])
sort_num = models.IntegerField(default=0)
is_public = models.BooleanField(default=True)
image = models.ImageField(upload_to=lambda instance, filename: '/'.join(['screenshot', str(instance.pk), filename]))
def is_known_issue_for(self, release):
return self.is_known_issue and self.fixed_in_release != release
def __unicode__(self):
return self.note
|
sylvestre/rna
|
rna/models.py
|
Python
|
mpl-2.0
| 6,479
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import mozhttpd
import urllib2
import os
import unittest
import mozunit
here = os.path.dirname(os.path.abspath(__file__))
class RequestLogTest(unittest.TestCase):
def check_logging(self, log_requests=False):
httpd = mozhttpd.MozHttpd(port=0, docroot=here, log_requests=log_requests)
httpd.start(block=False)
url = "http://%s:%s/" % ('127.0.0.1', httpd.httpd.server_port)
f = urllib2.urlopen(url)
f.read()
return httpd.request_log
def test_logging_enabled(self):
request_log = self.check_logging(log_requests=True)
self.assertEqual(len(request_log), 1)
log_entry = request_log[0]
self.assertEqual(log_entry['method'], 'GET')
self.assertEqual(log_entry['path'], '/')
self.assertEqual(type(log_entry['time']), float)
def test_logging_disabled(self):
request_log = self.check_logging(log_requests=False)
self.assertEqual(len(request_log), 0)
if __name__ == '__main__':
mozunit.main()
|
Yukarumya/Yukarum-Redfoxes
|
testing/mozbase/mozhttpd/tests/requestlog.py
|
Python
|
mpl-2.0
| 1,221
|
import cis_profile
import cis_publisher
import boto3
import botocore
import os
import logging
import json
import time
from auth0.v3.authentication import GetToken
from auth0.v3.management import Auth0
from auth0.v3.exceptions import Auth0Error
from datetime import datetime, timezone, timedelta
from traceback import format_exc
# from http.client import HTTPConnection
logger = logging.getLogger(__name__)
# HTTPConnection.debuglevel = 1
class Auth0Publisher:
def __init__(self, context={}):
self.secret_manager = cis_publisher.secret.Manager()
self.context = context
self.report = None
self.config = cis_publisher.common.get_config()
self.s3_cache = None
self.s3_cache_require_update = False
# Only fields we care about for the user entries
# auth0 field->cis field map
self.az_cis_fields = {
"created_at": "created",
"given_name": "first_name",
"family_name": "last_name",
"name": None,
"nickname": None,
"user_id": "user_id",
"email": "primary_email",
"identities": "identities",
"blocked": "active",
}
self.az_blacklisted_connections = ["Mozilla-LDAP", "Mozilla-LDAP-Dev"]
self.az_whitelisted_connections = ["email", "github", "google-oauth2", "firefoxaccounts"]
self.az_users = None
self.all_cis_user_ids = None
self.user_ids_only = None
def get_s3_cache(self):
"""
If cache exists and is not older than timedelta() then return it, else don't
return: dict JSON
"""
if self.s3_cache is not None:
return self.s3_cache
s3 = boto3.client("s3")
bucket = os.environ.get("CIS_BUCKET_URL")
cache_time = int(os.environ.get("CIS_AUTHZERO_CACHE_TIME_SECONDS", 120))
recent = datetime.now(timezone.utc) - timedelta(seconds=cache_time)
try:
objects = s3.list_objects_v2(Bucket=bucket)
# bucket has zero contents?
if "Contents" not in objects:
logger.info("No S3 cache present")
return None
# Recent file?
for o in objects["Contents"]:
if o["Key"] == "cache.json" and recent > o["LastModified"]:
logger.info(
f"S3 cache too old, not using ({recent} gt {o['LastModified']}"
f", was cached for: {cache_time}s)"
)
return None
response = s3.get_object(Bucket=bucket, Key="cache.json")
data = response["Body"].read()
except botocore.exceptions.ClientError as e:
logger.error("Could not find S3 cache file: {}".format(e))
return None
logger.info("Using S3 cache")
self.s3_cache = json.loads(data)
return self.s3_cache
def save_s3_cache(self, data):
"""
@data dict JSON
"""
if self.s3_cache_require_update is False:
return
s3 = boto3.client("s3")
bucket = os.environ.get("CIS_BUCKET_URL")
s3.put_object(Bucket=bucket, Key="cache.json", Body=json.dumps(data))
logger.info("Wrote S3 cache file")
def publish(self, user_ids=None, chunk_size=100):
"""
Glue to create or fetch cis_profile.User profiles for this publisher
Then pass everything over to the Publisher class
None, ALL profiles are sent.
@user_ids: list of str - user ids to publish. If None, all users are published.
@chunk_size: int when no user_id is selected, this is the size of the chunk/slice we'll create to divide the
work between function calls (to self)
"""
if user_ids is None:
le = "All"
else:
le = len(user_ids)
logger.info("Starting Auth0 Publisher [{} users]".format(le))
# XXX login_method is overridden when posting the user or listing users, i.e. the one here does not matter
publisher = cis_publisher.Publish([], login_method="github", publisher_name="auth0")
# These are the users auth0 knows about
self.az_users = self.fetch_az_users(user_ids)
self.all_cis_user_ids = self.fetch_all_cis_user_ids(publisher)
# Should we fan-out processing to multiple function calls?
if user_ids is None:
# Because we do not care about most attributes update, we only process new users, or users that will be
# deactivated in order to save time. Note that there is (currently) no auth0 hook to notify of new user
# event, so this (the auth0 publisher that is) function needs to be reasonably fast to avoid delays when
# provisioning users
# So first, remove all known users from the requested list
user_ids_to_process_set = set(self.get_az_user_ids()) - set(self.all_cis_user_ids)
az_user_ids_set = set(self.get_az_user_ids())
# Add blocked users so that they get deactivated
logger.info(
"Converting filtering list, size of user_ids_to_process {}".format(len(user_ids_to_process_set))
)
for u in self.az_users:
if u["user_id"] in az_user_ids_set:
if ("blocked" in u.keys()) and (u["blocked"] is True):
user_ids_to_process_set.add(u["user_id"])
logger.info(
"After filtering out known CIS users/in auth0 blocked users, we will process {} users".format(
len(user_ids_to_process_set)
)
)
self.save_s3_cache({"az_users": self.az_users, "all_cis_user_ids": self.all_cis_user_ids})
self.fan_out(publisher, chunk_size, list(user_ids_to_process_set))
else:
# Don't cache auth0 list if we're just getting a single user, so that we get the most up to date data
# and because it's pretty fast for a single user
if len(user_ids) == 1:
os.environ["CIS_AUTHZERO_CACHE_TIME_SECONDS"] = "0"
logger.info("CIS_AUTHZERO_CACHE_TIME_SECONDS was set to 0 (caching disabled) for this run")
self.process(publisher, user_ids)
def fetch_all_cis_user_ids(self, publisher):
"""
Get all known CIS user ids for the whitelisted login methods
This is here because CIS only returns user ids per specific login methods
We also cache this
"""
self.s3_cache = self.get_s3_cache()
if self.s3_cache is not None:
self.all_cis_user_ids = self.s3_cache["all_cis_user_ids"]
return self.all_cis_user_ids
if self.all_cis_user_ids is not None:
return self.all_cis_user_ids
# Not cached, fetch it
self.s3_cache_require_update = True
# These are the users CIS knows about
self.all_cis_user_ids = []
for c in self.az_whitelisted_connections:
# FIXME we're not using the real login method here because
# Code in the CIS Vault matches against the start of `user_id` instead of the actual login method
# This is fine for most methods, except this one... ideally the code should change in the CIS Vault when it
# uses something else than DynamoDB and is able to match efficiently on other attributes
if c == "firefoxaccounts":
c = "oauth2|firefoxaccounts"
publisher.login_method = c
publisher.get_known_cis_users(include_inactive=False)
self.all_cis_user_ids += publisher.known_cis_users_by_user_id.keys()
# Invalidate publisher memory cache
publisher.known_cis_users = None
# XXX in case we got duplicates for some reason, we uniquify
self.all_cis_user_ids = list(set(self.all_cis_user_ids))
logger.info("Got {} known CIS users for all whitelisted login methods".format(len(self.all_cis_user_ids)))
return self.all_cis_user_ids
def get_az_user_ids(self):
"""
Extract a list of user_ids from a dict of auth0 users
return: list of user_ids
"""
if self.user_ids_only is not None:
return self.user_ids_only
self.user_ids_only = []
for u in self.fetch_az_users():
self.user_ids_only.append(u["user_id"])
return self.user_ids_only
def fetch_az_users(self, user_ids=None):
"""
Fetches ALL valid users from auth0'z database
Returns list of user attributes
"""
# Memory cached?
if self.az_users is not None:
return self.az_users
# S3 cached?
self.get_s3_cache()
if self.s3_cache is not None:
self.az_users = self.s3_cache["az_users"]
return self.az_users
# Don't use cache for just one user
if self.az_users is not None and (user_ids is not None and len(user_ids) != 1):
return self.az_users
# Not cached, fetch it
if user_ids is not None and len(user_ids) != 1:
self.s3_cache_require_update = True
az_api_url = self.config("AUTHZERO_API", namespace="cis", default="auth-dev.mozilla.auth0.com")
az_client_id = self.secret_manager.secret("az_client_id")
az_client_secret = self.secret_manager.secret("az_client_secret")
az_fields = self.az_cis_fields.keys()
# Build the connection query (excludes LDAP)
# Excluded: "Mozilla-LDAP", "Mozilla-LDAP-Dev"
# Excluded: Old users without any group
# This can also be retrieved from /api/v2/connections
# Ignore non-verified `email` (such as unfinished passwordless flows) as we don't consider these to be valid
# users
max_date = datetime.utcnow() - timedelta(days=31) # maximum login length + 1 day
max_date_str = max_date.strftime("%Y-%m-%d")
exclusion_query = (
f"logins_count:[2 TO *] AND NOT last_login:[* TO {max_date_str}] AND "
'(groups:(everyone) OR (NOT _exists_:"groups"))'
)
az_query = exclusion_query + " AND email_verified:true AND ("
t = ""
for azc in self.az_whitelisted_connections:
az_query = az_query + t + 'identities.connection:"{}"'.format(azc)
t = " OR "
az_query += ")"
# NOTE XXX: There is no way to tell auth0's ES "don't include matches where the first identity.connection is a
# blacklisted connection", so we do this instead. This 100% relies on auth0 user_ids NOT being opaque,
# unfortunately
az_query += ' AND NOT (user_id:"ad|*")'
# Build query for user_ids if some are specified (else it gets all of them)
# NOTE: We can't query all that many users because auth0 uses a GET query which is limited in size by httpd
# (nginx - 8kb by default)
if user_ids and len(user_ids) > 6:
logger.warning(
"Cannot query the requested number of user_ids from auth0, query would be too large. "
"Querying all user_ids instead."
)
user_ids = None
# we had to add this because it gets called by the CIS-New-User hook, where the query wouldn't work
# because exclusion_query excludes users who have only a single login success
elif len(user_ids) == 1:
logger.info("Restricting auth0 user query to single user_id: {}".format(user_ids[0]))
az_query = f'user_id:"{user_ids[0]}"'
elif user_ids:
logger.info("Restricting auth0 user query to user_ids: {}".format(user_ids))
# e.g.: user_id:"email|foo" OR user_id:"email|bar" OR user_id:"ad|Mozilla-LDAP|baz"
or_joined_user_query = " OR ".join([f'user_id:"{u}"' for u in user_ids])
az_query += f" AND ({or_joined_user_query})"
logger.debug("About to get Auth0 user list")
az_getter = GetToken(az_api_url)
az_token = az_getter.client_credentials(az_client_id, az_client_secret, "https://{}/api/v2/".format(az_api_url))
auth0 = Auth0(az_api_url, az_token["access_token"])
# Query the entire thing
logger.info("Querying auth0 user database, query is: {}".format(az_query))
user_list = []
# This is an artificial upper limit of 100*9999 (per_page*page) i.e. 999 900 users max - just in case things
# go wrong
retries = 15
backoff = 20
for p in range(0, 9999):
tmp = None
try:
tmp = auth0.users.list(page=p, per_page=100, fields=az_fields, q=az_query)["users"]
logger.debug("Requesting auth0 user list, at page {}".format(p))
except Auth0Error as e:
# 429 is Rate limit exceeded and we can still retry
if (e.error_code == 429 or e.status_code == 429) and retries > 0:
backoff += 1
logger.debug(
"Rate limit exceeded, backing off for {} seconds, retries left {} error: {}".format(
backoff, retries, e
)
)
retries -= 1
time.sleep(backoff)
else:
logger.warning("Error: {}".format(e))
raise
if tmp == [] or tmp is None:
# stop when our page is empty
logger.debug("Crawled {} pages from auth0 users API".format(p))
break
else:
user_list.extend(tmp)
logger.info("Received {} users from auth0".format(len(user_list)))
self.az_users = user_list
return self.az_users
def convert_az_users(self, az_users):
"""
Convert a list of auth0 user fields to cis_profile Users
@az_users list of dicts with user attributes
Returns [cis_profile.Users]
"""
profiles = []
logger.info("Converting auth0 users into CIS Profiles ({} user(s))".format(len(az_users)))
for u in az_users:
p = cis_profile.User()
# Must have fields
p.user_id.value = u["user_id"]
p.user_id.signature.publisher.name = "access_provider"
p.update_timestamp("user_id")
p.active.value = True
if "blocked" in u.keys():
if u["blocked"]:
p.active.value = False
p.active.signature.publisher.name = "access_provider"
p.update_timestamp("active")
p.primary_email.value = u["email"]
p.primary_email.metadata.display = "private"
p.primary_email.signature.publisher.name = "access_provider"
p.update_timestamp("primary_email")
try:
p.login_method.value = u["identities"][0]["connection"]
p.update_timestamp("login_method")
except IndexError:
logger.critical("Could not find login method for user {}, skipping integration".format(p.user_id.value))
continue
# Should have fields (cannot be "None" or "" but can be " ")
tmp = u.get("given_name", u.get("name", u.get("family_name", u.get("nickname", " "))))
p.first_name.value = tmp
p.first_name.metadata.display = "private"
p.first_name.signature.publisher.name = "access_provider"
p.update_timestamp("first_name")
tmp = u.get("family_name", " ")
p.last_name.value = tmp
p.last_name.metadata.display = "private"
p.last_name.signature.publisher.name = "access_provider"
p.update_timestamp("last_name")
# May have fields (its ok if these are not set)
tmp = u.get("node_id", None)
if tmp is not None:
p.identities.github_id_v4.value = tmp
p.identities.github_id_v4.display = "private"
p.identities.github_id_v4.signature.publisher.name = "access_provider"
p.update_timestamp("identities.github_id_v4")
if "identities" in u.keys():
# If blacklisted connection is in the first entry, skip (first entry = "main" user)
if u["identities"][0].get("connection") in self.az_blacklisted_connections:
logger.warning(
"ad/LDAP account returned from search - this should not happen. User will be skipped."
" User_id: {}".format(p.user_id.value)
)
continue
for ident in u["identities"]:
if ident.get("provider") == "google-oauth2":
p.identities.google_oauth2_id.value = ident.get("user_id")
p.identities.google_oauth2_id.metadata.display = "private"
p.identities.google_oauth2_id.signature.publisher.name = "access_provider"
p.update_timestamp("identities.google_oauth2_id")
p.identities.google_primary_email.value = p.primary_email.value
p.identities.google_primary_email.metadata.display = "private"
p.identities.google_primary_email.signature.publisher.name = "access_provider"
p.update_timestamp("identities.google_primary_email")
elif ident.get("provider") == "oauth2" and ident.get("connection") == "firefoxaccounts":
p.identities.firefox_accounts_id.value = ident.get("user_id")
p.identities.firefox_accounts_id.metadata.display = "private"
p.identities.firefox_accounts_id.signature.publisher.name = "access_provider"
p.update_timestamp("identities.firefox_accounts_id")
p.identities.firefox_accounts_primary_email.value = p.primary_email.value
p.identities.firefox_accounts_primary_email.metadata.display = "private"
p.identities.firefox_accounts_primary_email.signature.publisher.name = "access_provider"
p.update_timestamp("identities.firefox_accounts_primary_email")
elif ident.get("provider") == "github":
if ident.get("nickname") is not None:
# Match the hack in
# https://github.com/mozilla-iam/dino-park-whoami/blob/master/src/update.rs#L42 (see
# function definition at the top of the file as well)
p.usernames.value = {"HACK#GITHUB": ident.get("nickname")}
p.usernames.metadata.display = "private"
p.usernames.signature.publisher.name = "access_provider"
p.identities.github_id_v3.value = ident.get("user_id")
p.identities.github_id_v3.metadata.display = "private"
p.identities.github_id_v3.signature.publisher.name = "access_provider"
p.update_timestamp("identities.github_id_v3")
if "profileData" in ident.keys():
p.identities.github_primary_email.value = ident["profileData"].get("email")
p.identities.github_primary_email.metadata.verified = ident["profileData"].get(
"email_verified", False
)
p.identities.github_primary_email.metadata.display = "private"
p.identities.github_primary_email.signature.publisher.name = "access_provider"
p.update_timestamp("identities.github_primary_email")
p.identities.github_id_v4.value = ident["profileData"].get("node_id")
p.identities.github_id_v4.metadata.display = "private"
p.identities.github_id_v4.signature.publisher.name = "access_provider"
p.update_timestamp("identities.github_id_v4")
# Sign and verify everything
try:
p.sign_all(publisher_name="access_provider")
except Exception as e:
logger.critical(
"Profile data signing failed for user {} - skipped signing, verification "
"WILL FAIL ({})".format(p.primary_email.value, e)
)
logger.debug("Profile data {}".format(p.as_dict()))
try:
p.validate()
except Exception as e:
logger.critical(
"Profile schema validation failed for user {} - skipped validation, verification "
"WILL FAIL({})".format(p.primary_email.value, e)
)
logger.debug("Profile data {}".format(p.as_dict()))
try:
p.verify_all_publishers(cis_profile.User())
except Exception as e:
logger.critical(
"Profile publisher verification failed for user {} - skipped signing, verification "
"WILL FAIL ({})".format(p.primary_email.value, e)
)
logger.debug("Profile data {}".format(p.as_dict()))
logger.debug("Profile signed and ready to publish for user_id {}".format(p.user_id.value))
profiles.append(p)
logger.info("All profiles in this request were converted to CIS Profiles")
return profiles
def process(self, publisher, user_ids):
"""
Process profiles and post them
@publisher object the publisher object to operate on
@user_ids list of user ids to process in this batch
"""
# Only process the requested user_ids from the list of all az users
# as the list is often containing all users, not just the ones we requested
todo_user_ids = list(set(self.get_az_user_ids()) & set(user_ids))
todo_users = []
for u in self.az_users:
if u["user_id"] in todo_user_ids:
todo_users.append(u)
profiles = self.convert_az_users(todo_users)
logger.info("Processing {} profiles".format(len(profiles)))
publisher.profiles = profiles
failures = []
try:
failures = publisher.post_all(user_ids=user_ids, create_users=True)
except Exception as e:
logger.error("Failed to post_all() profiles. Trace: {}".format(format_exc()))
raise e
if len(failures) > 0:
logger.error("Failed to post {} profiles: {}".format(len(failures), failures))
def fan_out(self, publisher, chunk_size, user_ids_to_process):
"""
Splices all users to process into chunks
and self-invoke as many times as needed to complete all work in parallel lambda functions
When self-invoking, this will effectively call self.process() instead of self.fan_out()
"]
Note: chunk_size should never result in the invoke() argument to exceed 128KB (len(Payload.encode('utf-8') <
128KB) as this is the maximum AWS Lambda payload size.
@publisher object the cis_publisher object to operate on
@chunk_size int size of the chunk to process
"""
sliced = [user_ids_to_process[i : i + chunk_size] for i in range(0, len(user_ids_to_process), chunk_size)]
logger.info(
"No user_id selected. Creating slices of work, chunk size: {}, slices: {}, total users: {} and "
"faning-out work to self".format(chunk_size, len(sliced), len(user_ids_to_process))
)
lambda_client = boto3.client("lambda")
for s in sliced:
lambda_client.invoke(FunctionName=self.context.function_name, InvocationType="Event", Payload=json.dumps(s))
time.sleep(3) # give api calls a chance, otherwise this storms resources
logger.info("Exiting slicing function successfully")
|
mozilla-iam/cis
|
python-modules/cis_publisher/cis_publisher/auth0.py
|
Python
|
mpl-2.0
| 24,297
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyLibrary.dot import wrap
def es_query_template():
output = wrap({
"query": {"match_all": {}},
"from": 0,
"size": 0,
"sort": []
})
return output
def qb_sort_to_es_sort(sort):
output = []
for s in sort:
if s.sort == 1:
output.append(s.field)
elif s.sort == -1:
output.append({s.field: "desc"})
else:
pass
return output
# FOR ELASTICSEARCH aggs
aggregates1_4 = {
"none": "none",
"one": "count",
"sum": "sum",
"add": "sum",
"count": "value_count",
"maximum": "max",
"minimum": "min",
"max": "max",
"min": "min",
"mean": "avg",
"average": "avg",
"avg": "avg",
"N": "count",
"X0": "count",
"X1": "sum",
"X2": "sum_of_squares",
"std": "std_deviation",
"stddev": "std_deviation",
"var": "variance",
"variance": "variance"
}
|
klahnakoski/intermittents
|
pyLibrary/queries/es14/util.py
|
Python
|
mpl-2.0
| 1,311
|
import copy
import csv
import datetime
import json
import mock
import os
import re
import shutil
import tempfile
import urllib
import pyquery
from cStringIO import StringIO
from nose.tools import eq_, ok_, assert_raises
from nose.plugins.skip import SkipTest
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import (
User,
AnonymousUser,
Group,
Permission
)
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from crashstats.base.tests.testbase import DjangoTestCase
from crashstats.crashstats import models
from crashstats.crashstats.management import PERMISSIONS
from .test_models import Response
SAMPLE_STATUS = {
"breakpad_revision": "1035",
"hits": [
{
"date_oldest_job_queued": "2012-09-28T20:39:33+00:00",
"date_recently_completed": "2012-09-28T20:40:00+00:00",
"processors_count": 1,
"avg_wait_sec": 16.407,
"waiting_job_count": 56,
"date_created": "2012-09-28T20:40:02+00:00",
"id": 410655,
"avg_process_sec": 0.914149
},
{
"date_oldest_job_queued": "2012-09-28T20:34:33+00:00",
"date_recently_completed": "2012-09-28T20:35:00+00:00",
"processors_count": 1,
"avg_wait_sec": 13.8293,
"waiting_job_count": 48,
"date_created": "2012-09-28T20:35:01+00:00",
"id": 410654,
"avg_process_sec": 1.24177
},
{
"date_oldest_job_queued": "2012-09-28T20:29:32+00:00",
"date_recently_completed": "2012-09-28T20:30:01+00:00",
"processors_count": 1,
"avg_wait_sec": 14.8803,
"waiting_job_count": 1,
"date_created": "2012-09-28T20:30:01+00:00",
"id": 410653,
"avg_process_sec": 1.19637
}
],
"total": 12,
"socorro_revision": "017d7b3f7042ce76bc80949ae55b41d1e915ab62",
"schema_revision": "schema_12345"
}
SAMPLE_META = """ {
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s"
} """
SAMPLE_UNREDACTED = """ {
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": "%s",
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"json_dump": {
"status": "OK",
"sensitive": {
"exploitability": "high"
},
"threads": []
}
} """
BUG_STATUS = """ {
"hits": [{"id": "222222",
"signature": "FakeSignature1"},
{"id": "333333",
"signature": "FakeSignature1"},
{"id": "444444",
"signature": "Other FakeSignature"}
]
} """
SAMPLE_SIGNATURE_SUMMARY = {
"reports": {
"products": [
{
"version_string": "33.0a2",
"percentage": "57.542",
"report_count": 103,
"product_name": "Firefox"
},
],
"uptime": [
{
"category": "< 1 min",
"percentage": "29.126",
"report_count": 30
}
],
"architecture": [
{
"category": "x86",
"percentage": "100.000",
"report_count": 103
}
],
"flash_version": [
{
"category": "[blank]",
"percentage": "100.000",
"report_count": 103
}
],
"graphics": [
{
"report_count": 24,
"adapter_name": None,
"vendor_hex": "0x8086",
"percentage": "23.301",
"vendor_name": None,
"adapter_hex": "0x0166"
}
],
"distinct_install": [
{
"crashes": 103,
"version_string": "33.0a2",
"product_name": "Firefox",
"installations": 59
}
],
"devices": [
{
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"report_count": 52311,
"percentage": "48.440",
}
],
"os": [
{
"category": "Windows 8.1",
"percentage": "55.340",
"report_count": 57
}
],
"process_type": [
{
"category": "Browser",
"percentage": "100.000",
"report_count": 103
}
],
"exploitability": [
{
"low_count": 0,
"high_count": 0,
"null_count": 0,
"none_count": 4,
"report_date": "2014-08-12",
"medium_count": 0
}
]
}
}
class RobotsTestViews(DjangoTestCase):
@override_settings(ENGAGE_ROBOTS=True)
def test_robots_txt(self):
url = '/robots.txt'
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/plain')
ok_('Allow: /' in response.content)
@override_settings(ENGAGE_ROBOTS=False)
def test_robots_txt_disengage(self):
url = '/robots.txt'
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/plain')
ok_('Disallow: /' in response.content)
class FaviconTestViews(DjangoTestCase):
def test_favicon(self):
tmp_static_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmp_static_root)
favicon_dir = os.path.join(tmp_static_root, 'img')
os.makedirs(favicon_dir)
favicon_path = os.path.join(favicon_dir, 'favicon.ico')
with open(favicon_path, 'wb') as icon:
icon.write('totally fake')
with self.settings(STATIC_ROOT=tmp_static_root):
response = self.client.get('/favicon.ico')
eq_(response.status_code, 200)
ok_('image/x-icon' in response['Content-Type'])
class BaseTestViews(DjangoTestCase):
@mock.patch('requests.get')
def setUp(self, rget):
super(BaseTestViews, self).setUp()
# checking settings.CACHES isn't as safe as `cache.__class__`
if 'LocMemCache' not in cache.__class__.__name__:
raise ImproperlyConfigured(
'The tests requires that you use LocMemCache when running'
)
# we do this here so that the current/versions thing
# is cached since that's going to be called later
# in every view more or less
def mocked_get(url, params, **options):
now = datetime.datetime.utcnow()
yesterday = now - datetime.timedelta(days=1)
if '/platforms/' in url:
return Response({
"hits": [
{
'code': 'win',
'name': 'Windows',
},
{
'code': 'mac',
'name': 'Mac OS X',
},
{
'code': 'lin',
'name': 'Linux',
}
],
"total": 6
})
if 'products/' in url:
return Response("""
{"products": [
"WaterWolf",
"NightTrain",
"SeaMonkey",
"LandCrab"
],
"hits": {
"WaterWolf": [
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "19.0",
"release": "Beta",
"id": 922},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "18.0",
"release": "Stable",
"id": 920},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "2012-03-09",
"start_date": "2012-03-08",
"featured": true,
"version": "19.1",
"release": "Nightly",
"id": 928},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "20.0",
"release": "Nightly",
"id": 923}
],
"NightTrain":[
{"product": "NightTrain",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "18.0",
"release": "Aurora",
"id": 924},
{"product": "NightTrain",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "19.0",
"release": "Nightly",
"id": 925}
],
"SeaMonkey": [
{"product": "SeaMonkey",
"throttle": "99.00",
"end_date": "%(yesterday)s",
"start_date": "2012-03-08",
"featured": true,
"version": "9.5",
"release": "Alpha",
"id": 921},
{"product": "SeaMonkey",
"throttle": "99.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "10.5",
"release": "nightly",
"id": 926}
],
"LandCrab": [
{"product": "LandCrab",
"throttle": "99.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": false,
"version": "1.5",
"release": "Release",
"id": 927}
]
},
"total": 4
}
""" % {'end_date': now.strftime('%Y-%m-%d'),
'yesterday': yesterday.strftime('%Y-%m-%d')})
if '/supersearch/fields/' in url:
from crashstats.supersearch.tests.test_views import (
SUPERSEARCH_FIELDS_MOCKED_RESULTS
)
results = copy.copy(SUPERSEARCH_FIELDS_MOCKED_RESULTS)
# to be realistic we want to introduce some dupes
# that have a different key but its `in_database_name`
# is one that is already in the hardcoded list (the
# baseline)
assert 'accessibility' not in results
results['accessibility'] = {
'name': 'accessibility',
'query_type': 'string',
'namespace': 'raw_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'in_database_name': 'Accessibility',
}
return Response(results)
raise NotImplementedError(url)
rget.side_effect = mocked_get
# call these here so it gets patched for each test because
# it gets used so often
from crashstats.crashstats.models import CurrentVersions, Platforms
CurrentVersions().get()
Platforms().get()
from crashstats.supersearch.models import SuperSearchFields
SuperSearchFields().get()
def tearDown(self):
super(BaseTestViews, self).tearDown()
cache.clear()
def _login(self):
user = User.objects.create_user('test', 'test@mozilla.com', 'secret')
assert self.client.login(username='test', password='secret')
return user
def _logout(self):
self.client.logout()
def _add_permission(self, user, codename, group_name='Hackers'):
group = self._create_group_with_permission(codename)
user.groups.add(group)
def _create_group_with_permission(self, codename, group_name='Group'):
appname = 'crashstats'
ct, __ = ContentType.objects.get_or_create(
model='',
app_label=appname,
defaults={'name': appname}
)
permission, __ = Permission.objects.get_or_create(
codename=codename,
name=PERMISSIONS[codename],
content_type=ct
)
group, __ = Group.objects.get_or_create(
name=group_name,
)
group.permissions.add(permission)
return group
class TestGoogleAnalytics(BaseTestViews):
@override_settings(GOOGLE_ANALYTICS_ID='xyz123')
@override_settings(GOOGLE_ANALYTICS_DOMAIN='test.biz')
@mock.patch('requests.get')
def test_google_analytics(self, rget):
url = reverse('crashstats:home', args=('WaterWolf',))
def mocked_get(url, params, **options):
if 'products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('xyz123' in response.content)
ok_('test.biz' in response.content)
class TestViews(BaseTestViews):
def test_contribute_json(self):
response = self.client.get('/contribute.json')
eq_(response.status_code, 200)
# should be valid JSON
ok_(json.loads(response.content))
eq_(response['Content-Type'], 'application/json')
@mock.patch('requests.get')
def test_handler500(self, rget):
root_urlconf = __import__(
settings.ROOT_URLCONF,
globals(),
locals(),
['urls'],
-1
)
# ...so that we can access the 'handler500' defined in there
par, end = root_urlconf.handler500.rsplit('.', 1)
# ...which is an importable reference to the real handler500 function
views = __import__(par, globals(), locals(), [end], -1)
# ...and finally we have the handler500 function at hand
handler500 = getattr(views, end)
# to make a mock call to the django view functions you need a request
fake_request = RequestFactory().request(**{'wsgi.input': None})
# Need a fake user for the persona bits on crashstats_base
fake_request.user = AnonymousUser()
# the reason for first causing an exception to be raised is because
# the handler500 function is only called by django when an exception
# has been raised which means sys.exc_info() is something.
try:
raise NameError('sloppy code')
except NameError:
# do this inside a frame that has a sys.exc_info()
response = handler500(fake_request)
eq_(response.status_code, 500)
ok_('Internal Server Error' in response.content)
ok_('id="products_select"' not in response.content)
def test_handler404(self):
url = reverse('crashstats:home', args=('Unknown',))
response = self.client.get(url)
eq_(response.status_code, 404)
ok_('Page not Found' in response.content)
ok_('id="products_select"' not in response.content)
def test_homepage_redirect(self):
response = self.client.get('/')
eq_(response.status_code, 302)
destination = reverse('crashstats:home',
args=[settings.DEFAULT_PRODUCT])
ok_(destination in response['Location'])
def test_homepage_products_redirect_without_versions(self):
url = reverse('crashstats:home', args=['WaterWolf'])
# some legacy URLs have this
url += '/versions/'
response = self.client.get(url)
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
destination = reverse('crashstats:home', args=['WaterWolf'])
ok_(destination in response['Location'])
def test_legacy_query_redirect(self):
response = self.client.get('/query/query?foo=bar')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
ok_(reverse('crashstats:query') + '?foo=bar' in response['Location'])
@mock.patch('requests.get')
def test_buginfo(self, rget):
url = reverse('crashstats:buginfo')
def mocked_get(url, params, **options):
if 'bug?id=' in url:
return Response('{"bugs": [{"product": "allizom.org"}]}')
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_ids': '123,456'})
eq_(response.status_code, 400)
response = self.client.get(url, {'include_fields': 'product'})
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_ids': ' 123, 456 ',
'include_fields': ' product'})
eq_(response.status_code, 200)
struct = json.loads(response.content)
ok_(struct['bugs'])
eq_(struct['bugs'][0]['product'], 'allizom.org')
@mock.patch('requests.get')
def test_buginfo_with_caching(self, rget):
url = reverse('crashstats:buginfo')
def mocked_get(url, params, **options):
if 'bug?id=' in url:
return Response("""{"bugs": [
{"id": "987",
"product": "allizom.org",
"summary": "Summary 1"},
{"id": "654",
"product": "mozilla.org",
"summary": "Summary 2"}
]}""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'bug_ids': '987,654',
'include_fields': 'product,summary'
})
eq_(response.status_code, 200)
struct = json.loads(response.content)
eq_(struct['bugs'][0]['product'], 'allizom.org')
eq_(struct['bugs'][0]['summary'], 'Summary 1')
eq_(struct['bugs'][0]['id'], '987')
eq_(struct['bugs'][1]['product'], 'mozilla.org')
eq_(struct['bugs'][1]['summary'], 'Summary 2')
eq_(struct['bugs'][1]['id'], '654')
# expect to be able to find this in the cache now
cache_key = 'buginfo:987'
eq_(cache.get(cache_key), struct['bugs'][0])
@mock.patch('requests.get')
def test_home(self, rget):
url = reverse('crashstats:home', args=('WaterWolf',))
def mocked_get(url, params, **options):
if '/products' in url and 'versions' not in params:
return Response("""
{
"products": [
"WaterWolf"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 1
}
""")
elif '/products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
# Testing with unknown product
url = reverse('crashstats:home', args=('InternetExplorer',))
response = self.client.get(url)
eq_(response.status_code, 404)
# Testing with unknown version for product
url = reverse('crashstats:home', args=('WaterWolf', '99'))
response = self.client.get(url)
eq_(response.status_code, 404)
# Testing with valid version for product
url = reverse('crashstats:home', args=('WaterWolf', '19.0'))
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_frontpage_json(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
if '/crashes/daily' in url:
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'WaterWolf'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['product_versions'])
eq_(struct['count'], 1)
@mock.patch('requests.get')
def test_frontpage_json_bad_request(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
assert '/crashes/daily' in url, url
if 'product' in params and params['product'] == 'WaterWolf':
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'Neverheardof'})
eq_(response.status_code, 400)
response = self.client.get(url, {'versions': '999.1'})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '99.9' # mismatch
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '19.0'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': 'xxx'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': '-100'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': '10'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'junk'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'build'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'report'
})
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_frontpage_json_no_data_for_version(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
assert '/crashes/daily' in url, url
if 'product' in params and params['product'] == 'WaterWolf':
return Response("""
{
"hits": {}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '20.0'
})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
# Even though there was no data, the product_versions
# property should still exist and be populated.
eq_(struct['count'], 0)
ok_(struct['product_versions'])
selected_product = struct['product_versions'][0]
eq_(selected_product['product'], 'WaterWolf')
eq_(selected_product['version'], '20.0')
@mock.patch('requests.get')
def test_products_list(self, rget):
url = reverse('crashstats:products_list')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"Fennec"
],
"hits": [
{
"sort": "1",
"default_version": "15.0.1",
"release_name": "firefox",
"rapid_release_version": "5.0",
"product_name": "WaterWolf"
},
{
"sort": "3",
"default_version": "10.0.6esr",
"release_name": "mobile",
"rapid_release_version": "5.0",
"product_name": "Fennec"
}],
"total": "2"
}
""")
rget.side_effect = mocked_get
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@mock.patch('requests.get')
def test_gccrashes(self, rget):
url = reverse('crashstats:gccrashes', args=('WaterWolf',))
unknown_product_url = reverse('crashstats:gccrashes',
args=('NotKnown',))
invalid_version_url = reverse('crashstats:gccrashes',
args=('WaterWolf', '99'))
def mocked_get(**options):
if '/products' in options['url']:
return Response("""
{
"products": ["WaterWolf"],
"hits": [
{
"product": "WaterWolf",
"version": "20.0",
"release": "Nightly"
}
],
"total": "1"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Total Volume of GC Crashes for WaterWolf 19.1'
in response.content)
response = self.client.get(invalid_version_url)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
eq_(doc('.django-form-error li b')[0].text, 'Version:')
response = self.client.get(unknown_product_url)
eq_(response.status_code, 404)
@mock.patch('requests.get')
def test_gccrashes_json(self, rget):
url = reverse('crashstats:gccrashes_json')
def mocked_get(url, params, **options):
if '/gccrashes' in url:
return Response("""
{
"hits": [
[
"20140203000001",
366
]
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-01-27',
'end_date': '2014-02-04'
})
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
@mock.patch('requests.get')
def test_gccrashes_json_bad_request(self, rget):
url = reverse('crashstats:gccrashes_json')
def mocked_get(url, **options):
if 'gccrashes/' in url:
return Response("""
{
"hits": [
[
"20140203000001",
366
]
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': 'XXXXXX', # not even close
'end_date': '2014-02-04'
})
ok_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-33', # crazy date
'end_date': '2014-02-04'
})
ok_(response.status_code, 400)
# same but on the end_date
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-13',
'end_date': '2014-02-44' # crazy date
})
ok_(response.status_code, 400)
# start_date > end_date
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-02',
'end_date': '2014-01-01' # crazy date
})
ok_(response.status_code, 400)
def test_crash_trends(self):
url = reverse('crashstats:crash_trends', args=('WaterWolf',))
no_nightly_url = reverse('crashstats:crash_trends', args=('LandCrab',))
inconsistent_case_url = reverse('crashstats:crash_trends',
args=('SeaMonkey',))
unkown_product_url = reverse('crashstats:crash_trends',
args=('NotKnown',))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For WaterWolf' in response.content)
response = self.client.get(unkown_product_url)
eq_(response.status_code, 404)
# This used to cause a 500 because there is no Nightly associated
# with this product, should 200 now.
response = self.client.get(no_nightly_url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For LandCrab' in response.content)
# This used to cause a 500 because of inconsistent case for
# release names in the DB, causing some releases to be returned
# as 'nightly' instead of 'Nightly'. This should now return 200.
response = self.client.get(inconsistent_case_url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For SeaMonkey' in response.content)
@mock.patch('requests.get')
def test_get_nightlies_for_product_json(self, rget):
url = reverse('crashstats:get_nightlies_for_product_json')
def mocked_get(**options):
if '/products' in options['url']:
return Response("""
{
"hits": [
{
"sort": "1",
"default_version": "5.0a1",
"release_name": "waterwolf",
"rapid_release_version": "5.0",
"product_name": "WaterWolf"
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'WaterWolf'})
ok_('application/json' in response['content-type'])
eq_(response.status_code, 200)
ok_(response.content, ['20.0'])
response = self.client.get(url, {'product': 'NightTrain'})
eq_(response.status_code, 200)
ok_(response.content, ['18.0', '19.0'])
response = self.client.get(url, {'product': 'Unknown'})
ok_(response.content, [])
@mock.patch('requests.get')
def test_crashtrends_json(self, rget):
url = reverse('crashstats:crashtrends_json')
def mocked_get(url, params, **options):
ok_('start_date' in params)
eq_('2012-10-01', params['start_date'])
ok_('end_date' in params)
eq_('2012-10-10', params['end_date'])
if '/crashtrends' in url:
return Response("""
{
"crashtrends": [{
"build_date": "2012-10-10",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 6,
"report_count": 144,
"report_date": "2012-10-04",
"product_name": "WaterWolf"
},
{
"build_date": "2012-10-06",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 2,
"report_count": 162,
"report_date": "2012-10-08",
"product_name": "WaterWolf"
},
{
"build_date": "2012-09-29",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 5,
"report_count": 144,
"report_date": "2012-10-04",
"product_name": "WaterWolf"
}]
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2012-10-01',
'end_date': '2012-10-10'
})
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
eq_(struct['total'], 2)
# Test with product that does not have a nightly
response = self.client.get(url, {
'product': 'LandCrab',
'version': '9.5',
'start_date': '2012-10-01',
'end_date': '2012-10-10'
})
ok_(response.status_code, 400)
ok_('text/html' in response['content-type'])
ok_(
'LandCrab is not one of the available choices'
in response.content
)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher_ranks_bybug(self, rget, rpost):
url = reverse('crashstats:topcrasher_ranks_bybug')
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789", "signature": "FakeSignature 1"},
{"id": "123456789", "signature": "FakeSignature 3"}]}
""")
def mocked_get(url, params, **options):
signature_summary_data = copy.deepcopy(SAMPLE_SIGNATURE_SUMMARY)
if '/signaturesummary' in url:
signature_summary_data['reports']['products'] = [
{
"version_string": "18.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "WaterWolf",
},
{
"version_string": "18.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "NightTrain",
},
{
"version_string": "13.0b4",
"percentage": "9.244",
"report_count": 9983,
"product_name": "WaterWolf",
}
]
return Response(signature_summary_data)
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature 1",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
},
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature 2",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 2}
""")
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url, {'bug_number': '123456789'})
ok_('FakeSignature 1' in response.content)
ok_('FakeSignature 2' not in response.content)
ok_('FakeSignature 3' in response.content)
report_list_url = reverse('crashstats:report_list')
report_list_url1 = (
'%s?signature=%s' % (
report_list_url,
urllib.quote_plus('FakeSignature 1')
)
)
ok_(report_list_url1 in response.content)
report_list_url3 = (
'%s?signature=%s' % (
report_list_url,
urllib.quote_plus('FakeSignature 3')
)
)
ok_(report_list_url3 in response.content)
# ensure that multiple products appear
doc = pyquery.PyQuery(response.content)
eq_(doc('td[class=product]')[0].text, 'WaterWolf')
eq_(doc('td[class=product]')[1].text, 'NightTrain')
eq_(response.status_code, 200)
# we also have a signature with no active product+version
ok_('Not found in active topcrash lists' in response.content)
response = self.client.get(url, {'bug_number': '123bad'})
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_number': '1234564654564646'})
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher(self, rget, rpost):
# first without a version
no_version_url = reverse('crashstats:topcrasher',
args=('WaterWolf',))
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
has_builds_url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', 'build'))
reports_count_default = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
reports_count_100 = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', None, None,
None, '100'))
response = self.client.get(no_version_url)
ok_(url in response['Location'])
def mocked_post(**options):
assert '/bugs/' in options['url'], options['url']
return Response("""{
"hits": [
{"id": 123456789,
"signature": "Something"},
{"id": 22222,
"signature": "FakeSignature1 \u7684 Japanese"},
{"id": 33333,
"signature": "FakeSignature1 \u7684 Japanese"}
]
}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1 \u7684 Japanese",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
if '/products' in url:
return Response("""
{
"hits": [
{
"is_featured": true,
"throttle": 1.0,
"end_date": "string",
"start_date": "integer",
"build_type": "string",
"product": "WaterWolf",
"version": "19.0",
"has_builds": true
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('By Crash Date' in response.content)
response = self.client.get(has_builds_url)
eq_(response.status_code, 200)
ok_('By Build Date' in response.content)
response = self.client.get(reports_count_default)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
selected_count = doc('.tc-result-count a[class="selected"]')
eq_(selected_count.text(), '50')
# there's actually only one such TD
bug_ids = [x.text for x in doc('td.bug_ids_more > a')]
# higher bug number first
eq_(bug_ids, ['33333', '22222'])
response = self.client.get(reports_count_100)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
selected_count = doc('.tc-result-count a[class="selected"]')
eq_(selected_count.text(), '100')
# also, render the CSV
response = self.client.get(url, {'format': 'csv'})
eq_(response.status_code, 200)
ok_('text/csv' in response['Content-Type'])
# know your fixtures :)
ok_('WaterWolf' in response['Content-Disposition'])
ok_('19.0' in response['Content-Disposition'])
# we should be able unpack it
reader = csv.reader(StringIO(response.content))
line1, line2 = reader
eq_(line1[0], 'Rank')
try:
eq_(int(line2[0]), 1)
except Exception:
raise SkipTest
# bytestring when exported as CSV with UTF-8 encoding
eq_(line2[4], 'FakeSignature1 \xe7\x9a\x84 Japanese')
def test_topcrasher_with_invalid_version(self):
# 0.1 is not a valid release version
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '0.1'))
response = self.client.get(url)
eq_(response.status_code, 404)
def test_topcrasher_with_product_sans_release(self):
# SnowLion is not a product at all
url = reverse('crashstats:topcrasher',
args=('SnowLion', '0.1'))
response = self.client.get(url)
eq_(response.status_code, 404)
# SeaMonkey is a product but has no active releases
url = reverse('crashstats:topcrasher',
args=('SeaMonkey', '9.5'))
response = self.client.get(url)
eq_(response.status_code, 404)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher_without_any_signatures(self, rget, rpost):
# first without a version
no_version_url = reverse('crashstats:topcrasher',
args=('WaterWolf',))
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
has_builds_url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', 'build'))
response = self.client.get(no_version_url)
ok_(url in response['Location'])
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
if '/products' in url:
return Response("""
{
"hits": [
{
"is_featured": true,
"throttle": 1.0,
"end_date": "string",
"start_date": "integer",
"build_type": "string",
"product": "WaterWolf",
"version": "19.0",
"has_builds": true
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('By Crash Date' in response.content)
response = self.client.get(has_builds_url)
eq_(response.status_code, 200)
ok_('By Build Date' in response.content)
# also, render the CSV
response = self.client.get(url, {'format': 'csv'})
eq_(response.status_code, 200)
ok_('text/csv' in response['Content-Type'])
# know your fixtures :)
ok_('WaterWolf' in response['Content-Disposition'])
ok_('19.0' in response['Content-Disposition'])
#
# no signatures, the CSV is empty apart from the header
eq_(len(response.content.splitlines()), 1)
reader = csv.reader(StringIO(response.content))
line1, = reader
eq_(line1[0], 'Rank')
def test_topcrasher_without_versions_redirect(self):
response = self.client.get('/topcrasher/products/WaterWolf/versions/')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
actual_url = reverse('crashstats:topcrasher',
kwargs={'product': 'WaterWolf'})
ok_(response['location'].endswith(actual_url))
@mock.patch('requests.get')
def test_exploitable_crashes_without_product(self, rget):
url = reverse('crashstats:exploitable_crashes_legacy')
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 301)
correct_url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
ok_(response['location'].endswith(correct_url))
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_exploitable_crashes(self, rget, rpost):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
def mocked_post(url, **options):
assert '/bugs' in url, url
return Response({
"hits": [
{"id": "111111111", "signature": "FakeSignature 1"},
{"id": "222222222", "signature": "FakeSignature 3"},
{"id": "101010101", "signature": "FakeSignature"}
]
})
rpost.side_effect = mocked_post
def mocked_get(url, params, **options):
assert '/crashes/exploitability' in url
ok_('product' in params)
eq_('WaterWolf', params['product'])
return Response("""
{
"hits": [
{
"signature": "FakeSignature",
"report_date": "2013-06-06",
"high_count": 4,
"medium_count": 3,
"low_count": 2,
"none_count": 1,
"product_name": "%s",
"version_string": "2.0"
}
],
"total": 1
}
""" % (settings.DEFAULT_PRODUCT,))
rget.side_effect = mocked_get
response = self.client.get(url)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
ok_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 302)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('FakeSignature' in response.content)
# only this bug ID should be shown
ok_('101010101' in response.content)
# not these bug IDs
ok_('222222222' not in response.content)
ok_('111111111' not in response.content)
# if you try to mess with the paginator it should just load page 1
response = self.client.get(url, {'page': 'meow'})
ok_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_exploitable_crashes_by_product_and_version(self, rget, rpost):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT, '19.0')
)
def mocked_post(url, **options):
assert '/bugs' in url, url
return Response({
"hits": [
{"id": "111111111", "signature": "FakeSignature 1"},
{"id": "222222222", "signature": "FakeSignature 3"},
{"id": "101010101", "signature": "FakeSignature"}
]
})
rpost.side_effect = mocked_post
def mocked_get(url, params, **options):
assert '/crashes/exploitability' in url
ok_('product' in params)
eq_('WaterWolf', params['product'])
ok_('version' in params)
eq_('19.0', params['version'])
return Response("""
{
"hits": [
{
"signature": "FakeSignature",
"report_date": "2013-06-06",
"high_count": 4,
"medium_count": 3,
"low_count": 2,
"none_count": 1,
"product_name": "%s",
"version_string": "123.0"
}
],
"total": 1
}
""" % (settings.DEFAULT_PRODUCT,))
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('FakeSignature' in response.content)
@mock.patch('requests.get')
def test_exploitable_crashes_by_unknown_version(self, rget):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT, '999.0')
)
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 404)
@mock.patch('requests.get')
def test_daily(self, rget):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {
"WaterWolf:20.0": {
"2012-09-23": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "20.0"
}
},
"WaterWolf:19.0": {
"2012-08-23": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "19.0"
}
},
"WaterWolf:18.0": {
"2012-08-13": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "18.0"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0']
})
eq_(response.status_code, 200)
# XXX any basic tests with can do on response.content?
ok_('18.0' in response.content.split('id="version3"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version2"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version1"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version0"')[1].
split("</select>")[0])
# check that the CSV version is working too
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0'],
'format': 'csv'
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# also, I should be able to read it
reader = csv.reader(response)
# because response is an iterator that will return a blank line first
# we skip till the next time
rows = list(reader)[1:]
ok_(rows)
head_row = rows[0]
eq_(head_row[0], 'Date')
eq_(
head_row[1:],
[
'WaterWolf 20.0 Crashes',
'WaterWolf 20.0 ADI',
'WaterWolf 20.0 Throttle',
'WaterWolf 20.0 Ratio',
'WaterWolf 19.0 Crashes',
'WaterWolf 19.0 ADI',
'WaterWolf 19.0 Throttle',
'WaterWolf 19.0 Ratio'
]
)
first_row = rows[1]
eq_(first_row[0], '2012-09-23')
# Test dates don't cause problems
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0'],
'date_start': '2010-01-01'
})
eq_(response.status_code, 200)
@mock.patch('crashstats.crashstats.models.Platforms')
@mock.patch('requests.get')
def test_daily_by_os(self, rget, platforms_get):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
ok_('separated_by' in params)
eq_('os', params['separated_by'])
ok_('os' in params)
eq_(['Windows', 'Amiga'], params['os'])
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {
"WaterWolf:20.0:win": {
"2012-09-23": {
"os": "Windows",
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "20.0"
}
},
"WaterWolf:20.0:ami": {
"2012-09-23": {
"os": "Amiga",
"adu": 7377,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 871,
"throttle": 0.1,
"version": "20.0"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_platforms_get():
return [
{'code': 'win', 'name': 'Windows', 'display': True},
{'code': 'ami', 'name': 'Amiga', 'display': True},
{'code': 'win', 'name': 'Windows95'}, # not displayed
]
platforms_get().get.side_effect = mocked_platforms_get
response = self.client.get(url, {
'p': 'WaterWolf',
'v': '20.0',
'form_selection': 'by_os'
})
eq_(response.status_code, 200)
# XXX any basic tests with can do on response.content?
# check that the CSV version is working too
response = self.client.get(url, {
'p': 'WaterWolf',
'v': '20.0',
'format': 'csv',
'form_selection': 'by_os'
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# also, we should be able to read it
reader = csv.reader(response)
# because response is an iterator that will return a blank line first
# we skip till the next time
rows = list(reader)[1:]
head_row = rows[0]
first_row = rows[1]
eq_(head_row[0], 'Date')
eq_(
head_row[1:],
[
'WaterWolf 20.0 on Windows Crashes',
'WaterWolf 20.0 on Windows ADI',
'WaterWolf 20.0 on Windows Throttle',
'WaterWolf 20.0 on Windows Ratio',
'WaterWolf 20.0 on Amiga Crashes',
'WaterWolf 20.0 on Amiga ADI',
'WaterWolf 20.0 on Amiga Throttle',
'WaterWolf 20.0 on Amiga Ratio'
]
)
eq_(first_row[0], '2012-09-23')
def test_daily_legacy_redirect(self):
url = reverse('crashstats:daily')
response = self.client.get(url + '?p=WaterWolf&v[]=Something')
eq_(response.status_code, 301)
ok_('p=WaterWolf' in response['Location'].split('?')[1])
ok_('v=Something' in response['Location'].split('?')[1])
response = self.client.get(
url + '?p=WaterWolf&os[]=Something&os[]=Else'
)
eq_(response.status_code, 301)
ok_('p=WaterWolf' in response['Location'].split('?')[1])
ok_('os=Something' in response['Location'].split('?')[1])
ok_('os=Else' in response['Location'].split('?')[1])
@mock.patch('requests.get')
def test_daily_with_bad_input(self, rget):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'p': 'WaterWolf',
'date_start': u' \x00'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'date_range_type': 'any old crap'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'hang_type': 'any old crap'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'format': 'csv',
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# last sanity check
response = self.client.get(url, {
'p': 'WaterWolf',
})
eq_(response.status_code, 200)
def test_quick_search(self):
url = reverse('crashstats:quick_search')
# Test with no parameter.
response = self.client.get(url)
eq_(response.status_code, 302)
target = reverse('supersearch.search')
ok_(response['location'].endswith(target))
# Test with a signature.
response = self.client.get(
url,
{'query': 'moz'}
)
eq_(response.status_code, 302)
target = reverse('supersearch.search') + '?signature=%7Emoz'
ok_(response['location'].endswith(target))
# Test with a crash_id.
crash_id = '1234abcd-ef56-7890-ab12-abcdef130802'
response = self.client.get(
url,
{'query': crash_id}
)
eq_(response.status_code, 302)
target = reverse(
'crashstats:report_index',
kwargs=dict(crash_id=crash_id)
)
ok_(response['location'].endswith(target))
# Test a simple search containing a crash id and spaces
crash_id = ' 1234abcd-ef56-7890-ab12-abcdef130802 '
response = self.client.get(
url,
{'query': crash_id}
)
eq_(response.status_code, 302)
ok_(response['location'].endswith(target))
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query(self, rget, rpost):
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [
{
"id": "123456",
"signature": "nsASDOMWindowEnumerator::GetNext()"
}
],
"total": 1
}
""")
def mocked_get(url, params, **options):
assert '/search/signatures' in url
if 'products' in params and 'WaterWolf' in params['products']:
return Response("""{
"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 56,
"is_mac": 0,
"numhang": 0
},
{
"count": 13,
"signature": "mySignatureIsCool",
"numcontent": 0,
"is_windows": 10,
"is_linux": 2,
"numplugin": 0,
"is_mac": 1,
"numhang": 0
},
{
"count": 2,
"signature": "mineIsCoolerThanYours",
"numcontent": 0,
"is_windows": 0,
"is_linux": 0,
"numplugin": 0,
"is_mac": 2,
"numhang": 2
},
{
"count": 2,
"signature": null,
"numcontent": 0,
"is_windows": 0,
"is_linux": 0,
"numplugin": 0,
"is_mac": 2,
"numhang": 2
}
],
"total": 4
} """)
elif 'products' in params and 'NightTrain' in params['products']:
return Response('{"hits": [], "total": 0}')
elif 'products' in params and 'SeaMonkey' in params['products']:
ok_('plugin_search_mode' in params)
eq_(params['plugin_search_mode'], 'is_exactly')
return Response("""
{"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 533,
"is_mac": 0,
"numhang": 0,
"pluginname": "superAddOn",
"pluginfilename": "addon.dll",
"pluginversion": "1.2.3"
}],
"total": 1
}
""")
else:
return Response("""
{"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}],
"total": 1
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
# Verify that the passed product is selected in search form
response = self.client.get(url, {'product': 'NightTrain'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
ok_('value="NightTrain" selected' in response.content)
# Verify that the passed version is selected in nav
response = self.client.get(url, {
'product': 'NightTrain',
'version': 'NightTrain:18.0'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
# Because versions in the search form only gets set on DOM ready,
# we here ensure that the version was passed and set by checking
# that the correct version is selected in the versions drop-down.
ok_('option value="18.0" selected' in response.content)
response = self.client.get(url, {
'product': 'WaterWolf',
'date': '2012-01-01'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('mySignatureIsCool' in response.content)
ok_('mineIsCoolerThanYours' in response.content)
ok_('(null signature)' in response.content)
# Test that the default value for query_type is 'contains'
ok_('<option value="contains" selected' in response.content)
# Test with empty results
response = self.client.get(url, {
'product': 'NightTrain',
'date': '2012-01-01'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('The maximum query date' not in response.content)
ok_('table id="signatureList"' not in response.content)
ok_('Results within' in response.content)
ok_('No results were found' in response.content)
response = self.client.get(url, {'query': 'nsASDOMWindowEnumerator'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('123456' in response.content)
# Test that the signature parameter is used as default value
response = self.client.get(url, {'signature': 'myFunctionIsCool'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatures-list"' not in response.content)
ok_('value="myFunctionIsCool"' in response.content)
# Test that null bytes break the page cleanly
response = self.client.get(url, {'date': u' \x00'})
eq_(response.status_code, 400)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('Enter a valid date/time' in response.content)
# Test that do_query forces the query
response = self.client.get(url, {
'do_query': 1,
'product': 'WaterWolf'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
# Test that old query types are changed
# Test that plugin data is displayed
response = self.client.get(url, {
'do_query': 1,
'product': 'SeaMonkey',
'plugin_query_type': 'exact',
'process_type': 'plugin',
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('Plugin Filename' in response.content)
ok_('Plugin Name/Ver' in response.content)
ok_('addon.dll' in response.content)
ok_('superAddOn 1.2.3' in response.content)
# Test 'all' is an accepted value for report_type and hang_type
response = self.client.get(url, {
'do_query': 1,
'product': 'WaterWolf',
'hang_type': 'all',
'process_type': 'all',
})
eq_(response.status_code, 200)
ok_('table id="signatureList"' in response.content)
ok_('value="any" checked' in response.content)
# Test defaut date
expected = datetime.datetime.utcnow()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(expected.strftime('%m/%d/%Y %H:00:00') in response.content)
# Test passed date
response = self.client.get(url, {
'date': '11/27/2031 10:10:10'
})
eq_(response.status_code, 200)
ok_('11/27/2031 10:10:10' in response.content)
# Test value of build ids
response = self.client.get(url, {
'build_id': '12345'
})
eq_(response.status_code, 200)
ok_('value="12345"' in response.content)
response = self.client.get(url, {
'build_id': '12345,54321'
})
eq_(response.status_code, 200)
ok_('value="12345, 54321"' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_range(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
assert '/search/signatures' in url
response = ','.join('''
{
"count": %(x)s,
"signature": "sig%(x)s",
"numcontent": 0,
"is_windows": %(x)s,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}
''' % {'x': x} for x in range(150))
return Response('{"hits": [%s], "total": 150}' % response)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
# Test an out-of-range date range
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 9
})
eq_(response.status_code, 200)
ok_('The maximum query date' in response.content)
ok_('Admins may log in' in response.content)
ok_('name="range_value" value="%s"' % settings.QUERY_RANGE_DEFAULT_DAYS
in response.content)
ok_('value="days" selected' in response.content)
# Test an out-of-range date range for a logged in user
user = self._login()
group = self._create_group_with_permission('run_long_queries')
user.groups.add(group)
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 9
})
eq_(response.status_code, 200)
# we're logged in, that works now
ok_('The maximum query date' not in response.content)
# ... but this doesn't
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 30
})
eq_(response.status_code, 200)
ok_('The maximum query date' in response.content)
# an admin won't see that message
ok_('Admins may log in' not in response.content)
ok_('name="range_value" value="%s"' % settings.QUERY_RANGE_DEFAULT_DAYS
in response.content)
ok_('value="days" selected' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_pagination(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
assert '/search/signatures' in url
response = ','.join('''
{
"count": %(x)s,
"signature": "sig%(x)s",
"numcontent": 0,
"is_windows": %(x)s,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}
''' % {'x': x} for x in range(150))
return Response('{"hits": [%s], "total": 150}' % response)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {'do_query': 1})
eq_(response.status_code, 200)
next_page_url = '%s?do_query=1&page=2' % url
ok_(next_page_url in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_summary(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'query': 'test',
'query_type': 'contains'
})
eq_(response.status_code, 200)
ok_('Results within' in response.content)
ok_("crash signature contains 'test'" in response.content)
ok_('the crashing process was of any type' in response.content)
response = self.client.get(url, {
'query': 'test',
'query_type': 'is_exactly',
'build_id': '1234567890',
'product': ['WaterWolf', 'NightTrain'],
'version': ['WaterWolf:18.0'],
'platform': ['mac'],
'process_type': 'plugin',
'plugin_query_type': 'starts_with',
'plugin_query_field': 'filename',
'plugin_query': 'lib'
})
eq_(response.status_code, 200)
ok_('Results within' in response.content)
ok_("crash signature is exactly 'test'" in response.content)
ok_('product is one of WaterWolf, NightTrain' in response.content)
ok_('version is one of WaterWolf:18.0' in response.content)
ok_('platform is one of Mac OS X' in response.content)
ok_('for build 1234567890' in response.content)
ok_('the crashing process was a plugin' in response.content)
ok_('and its filename starts with lib' in response.content)
@override_settings(SEARCH_MIDDLEWARE_IMPL='elasticsearch')
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_settings(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('elasticsearch', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
})
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_url(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('postgres', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
'_force_api_impl': 'postgres'
})
eq_(response.status_code, 200)
@override_settings(SEARCH_MIDDLEWARE_IMPL='mongodb')
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_url_over_settings(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('mysql', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
'_force_api_impl': 'mysql'
})
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_plot_signature(self, rget):
def mocked_get(url, params, **options):
if '/crashes/signature_history' in url:
return Response("""
{
"hits": [],
"total": 0
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
# missing signature
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2011-12-01', '2011-12-02', ''))
response = self.client.get(url)
eq_(response.status_code, 400)
# invalid start date
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2012-02-33', '2012-12-01',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 400)
# invalid end date
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2012-02-28', '2012-13-01',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 400)
# valid dates
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2011-12-01', '2011-12-02',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['signature'])
@mock.patch('requests.get')
def test_explosive_view_without_explosives(self, rget):
url = reverse('crashstats:explosive')
def mocked_get(url, params, **options):
if '/suspicious' in url:
return Response("""
{"hits": [], "total": 0}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
resp = self.client.get(url)
eq_(resp.status_code, 200)
assert 'No explosive crashes found' in resp.content
@mock.patch('requests.get')
def test_explosive_view_with_explosives(self, rget):
url = reverse('crashstats:explosive')
def mocked_get(url, params, **options):
if '/suspicious' in url:
return Response("""
{"hits": [
{"date": "2013-09-01",
"signatures": ["signature1", "signature2"]
}
], "total": 1}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
resp = self.client.get(url)
eq_(resp.status_code, 200)
assert 'is explosive' in resp.content
@mock.patch('requests.get')
def test_explosive_data(self, rget):
url = reverse('crashstats:explosive_data',
args=('signature', '2013-03-05'))
def mocked_get(url, params, **options):
if '/crashes/count_by_day' in url:
return Response("""{
"hits": {
"2013-02-26": 100,
"2013-02-27": 100,
"2013-02-28": 100,
"2013-03-01": 100,
"2013-03-02": 100,
"2013-03-03": 100,
"2013-03-04": 100,
"2013-03-05": 100,
"2013-03-06": 100,
"2013-03-07": 100,
"2013-03-08": 100
}
}""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
resp = json.loads(response.content)
ok_('counts' in resp)
# returns 11 days of data since we are after it.
# the first day is 7 days prior, the last is 3 days after.
eq_(len(resp['counts']), 11)
eq_(resp['counts'][0][0], '2013-02-26')
eq_(resp['counts'][0][1], 100)
eq_(resp['counts'][-1][0], '2013-03-08')
eq_(resp['counts'][-1][1], 100)
@mock.patch('requests.get')
def test_explosive_data_today(self, rget):
now = datetime.datetime.utcnow()
start = now - datetime.timedelta(10)
now = now.strftime('%Y-%m-%d')
start = start.strftime('%Y-%m-%d')
url = reverse('crashstats:explosive_data', args=('signature', now))
def mocked_get(url, params, **options):
if '/crashes/count_by_day' in url:
dates = []
current = datetime.datetime.strptime(start, "%Y-%m-%d")
end = datetime.datetime.strptime(now, "%Y-%m-%d")
while current <= end:
dates.append(current.strftime("%Y-%m-%d"))
current += datetime.timedelta(1)
return Response("""{
"hits": {
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100
}
}""" % tuple(dates))
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
resp = json.loads(response.content)
eq_(resp['counts'][0][0], start)
eq_(resp['counts'][0][1], 100)
eq_(resp['counts'][-1][0], now)
eq_(resp['counts'][-1][1], 100)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topchangers(self, rget, rpost):
url = reverse('crashstats:topchangers',
args=('WaterWolf', '19.0'))
bad_url = reverse('crashstats:topchangers',
args=('SeaMonkey', '19.0'))
bad_url2 = reverse('crashstats:topchangers',
args=('WaterWolf', '19.999'))
url_wo_version = reverse('crashstats:topchangers',
args=('WaterWolf',))
def mocked_post(**options):
assert 'by=signatures' in options['url'], options['url']
return Response("""
{"bug_associations": [{"bug_id": "123456789",
"signature": "Something"}]}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response("""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1",
"versions_count": 8,
"changeInRank": 0,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url_wo_version)
eq_(response.status_code, 200)
# invalid version for the product name
response = self.client.get(bad_url)
eq_(response.status_code, 404)
# invalid version for the product name
response = self.client.get(bad_url2)
eq_(response.status_code, 404)
response = self.client.get(url)
eq_(response.status_code, 200)
def test_topchangers_without_versions_redirect(self):
response = self.client.get('/topchangers/products/WaterWolf/versions/')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
actual_url = reverse('crashstats:topchangers',
kwargs={'product': 'WaterWolf'})
ok_(response['location'].endswith(actual_url))
@mock.patch('requests.get')
def test_signature_summary(self, rget):
def mocked_get(url, params, **options):
if '/signaturesummary' in url:
assert params['report_types']
return Response({
"reports": {
"products": [
{
"version_string": "33.0a2",
"percentage": "57.542",
"report_count": 103,
"product_name": "Firefox"
},
],
"uptime": [
{
"category": "< 1 min",
"percentage": "29.126",
"report_count": 30
}
],
"architecture": [
{
"category": "x86",
"percentage": "100.000",
"report_count": 103
}
],
"flash_version": [
{
"category": "[blank]",
"percentage": "100.000",
"report_count": 103
}
],
"graphics": [
{
"report_count": 24,
"adapter_name": None,
"vendor_hex": "0x8086",
"percentage": "23.301",
"vendor_name": None,
"adapter_hex": "0x0166"
}
],
"distinct_install": [
{
"crashes": 103,
"version_string": "33.0a2",
"product_name": "Firefox",
"installations": 59
}
],
"devices": [
{
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"report_count": 52311,
"percentage": "48.440",
}
],
"os": [
{
"category": "Windows 8.1",
"percentage": "55.340",
"report_count": 57
}
],
"process_type": [
{
"category": "Browser",
"percentage": "100.000",
"report_count": 103
}
],
"exploitability": [
{
"low_count": 0,
"high_count": 0,
"null_count": 0,
"none_count": 4,
"report_date": "2014-08-12",
"medium_count": 0
}
]
}
})
raise NotImplementedError(url)
url = reverse('crashstats:signature_summary')
rget.side_effect = mocked_get
# first try without the necessary parameters
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {
'range_value': '1',
'signature': 'sig',
'version': 'WaterWolf:19.0'
})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['architectures'])
ok_(struct['flashVersions'])
ok_(struct['percentageByOs'])
ok_(struct['processTypes'])
ok_(struct['productVersions'])
ok_(struct['uptimeRange'])
ok_(struct['distinctInstall'])
ok_(struct['devices'])
ok_(struct['graphics'])
ok_(not struct['canViewExploitability'])
ok_('exploitabilityScore' not in struct)
# percentages are turned into string as they're fed straight into
# a mustache template.
# for example,
eq_(struct['uptimeRange'][0]['percentage'], '29.13')
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
response = self.client.get(url, {'range_value': '1',
'signature': 'sig',
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['canViewExploitability'])
ok_(struct['exploitabilityScore'])
@mock.patch('requests.get')
def test_signature_summary_flash_exploitability(self, rget):
def mocked_get(url, params, **options):
signature_summary_data = copy.deepcopy(SAMPLE_SIGNATURE_SUMMARY)
if '/signaturesummary' in url:
if 'sig1' in params['signature']:
signature_summary_data['reports']['flash_version'] = [
{
"category": "11.9.900.117",
"percentage": "50.794",
"report_count": 320
},
{
"category": "11.9.900.152",
"percentage": "45.397",
"report_count": 286
},
{
"category": "11.7.700.224",
"percentage": "1.429",
"report_count": 9
}
]
elif 'sig2' in params['signature']:
signature_summary_data['reports']['flash_version'] = [
{
"category": "11.9.900.117",
"percentage": "50.794",
"report_count": 320
},
{
"category": "[blank]",
"percentage": "45.397",
"report_count": 286
},
{
"category": "11.7.700.224",
"percentage": "1.429",
"report_count": 9
}
]
return Response(signature_summary_data)
raise NotImplementedError(url)
url = reverse('crashstats:signature_summary')
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_flash_exploitability')
user.groups.add(group)
response = self.client.get(url, {
'range_value': '1',
'signature': 'sig1',
'version': 'WaterWolf:19.0'
})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['canViewExploitability'])
ok_(struct['exploitabilityScore'])
response = self.client.get(url, {'range_value': '1',
'signature': 'sig2', # different
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(not struct['canViewExploitability'])
ok_('exploitabilityScore' not in struct)
@mock.patch('requests.get')
def test_status(self, rget):
def mocked_get(url, **options):
assert '/server_status' in url, url
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('schema_12345' in response.content)
ok_('017d7b3f7042ce76bc80949ae55b41d1e915ab62' in response.content)
ok_('1035' in response.content)
ok_('Sep 28 2012 20:30:01' in response.content)
@mock.patch('requests.get')
def test_status_revision(self, rget):
def mocked_get(url, **options):
assert '/server_status' in url, url
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status_revision')
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.content, '017d7b3f7042ce76bc80949ae55b41d1e915ab62')
ok_('text/plain' in response['content-type'])
def test_login_required(self):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
response = self.client.get(url)
eq_(response.status_code, 302)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
@mock.patch('requests.get')
def test_status_json(self, rget):
def mocked_get(**options):
assert '/server_status' in options['url'], options['url']
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status_json')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(response.content.strip().startswith('{'))
ok_('017d7b3f7042ce76bc80949ae55b41d1e915ab62' in response.content)
ok_('1035' in response.content)
ok_('2012-09-28T20:30:01+00:00' in response.content)
ok_('application/json' in response['Content-Type'])
eq_('*', response['Access-Control-Allow-Origin'])
def test_crontabber_state(self):
url = reverse('crashstats:crontabber_state')
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_your_crashes(self, rget):
url = reverse('crashstats:your_crashes')
def mocked_get(url, params, **options):
assert '/supersearch/' in url
if '/supersearch/fields/' in url:
return Response({
'email': {
'name': 'email',
'query_type': 'string',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
}
})
assert 'email' in params
assert params['email'] == ['test@mozilla.com']
return Response({
'hits': [
{
'uuid': '1234abcd-ef56-7890-ab12-abcdef130801',
'date': '2000-01-01T00:00:00'
},
{
'uuid': '1234abcd-ef56-7890-ab12-abcdef130802',
'date': '2000-01-02T00:00:00'
}
],
'total': 2
})
rget.side_effect = mocked_get
# A user needs to be signed in to see this page.
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('1234abcd-ef56-7890-ab12-abcdef130801' in response.content)
ok_('1234abcd-ef56-7890-ab12-abcdef130802' in response.content)
ok_('test@mozilla.com' in response.content)
@mock.patch('requests.get')
def test_your_crashes_no_data(self, rget):
url = reverse('crashstats:your_crashes')
def mocked_get(url, params, **options):
assert '/supersearch/' in url
if '/supersearch/fields/' in url:
return Response({
'email': {
'name': 'email',
'query_type': 'string',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
}
})
assert 'email' in params
assert params['email'] == ['test@mozilla.com']
return Response({
'hits': [],
'total': 0
})
rget.side_effect = mocked_get
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('test@mozilla.com' in response.content)
ok_('no crash report' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
return Response(SAMPLE_UNREDACTED % (
dump,
comment0
))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
# which bug IDs appear is important and the order matters too
ok_(
-1 ==
response.content.find('444444') <
response.content.find('333333') <
response.content.find('222222')
)
ok_('FakeSignature1' in response.content)
ok_('11cb72f5-eb28-41e1-a8e4-849982120611' in response.content)
comment_transformed = (
comment0
.replace('\\n', '<br>')
.replace('peterbe@mozilla.com', '(email removed)')
.replace('www.p0rn.com', '(URL removed)')
)
ok_(comment_transformed in response.content)
# but the email should have been scrubbed
ok_('peterbe@mozilla.com' not in response.content)
ok_(email0 not in response.content)
ok_(url0 not in response.content)
ok_(
'You need to be signed in to be able to download raw dumps.'
in response.content
)
# Should not be able to see sensitive key from stackwalker JSON
ok_('"sensitive"' not in response.content)
ok_('"exploitability"' not in response.content)
# the email address will appear if we log in
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
assert user.has_perm('crashstats.view_pii')
response = self.client.get(url)
ok_('peterbe@mozilla.com' in response.content)
ok_(email0 in response.content)
ok_(url0 in response.content)
ok_('"sensitive"' in response.content)
ok_('"exploitability"' in response.content)
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_additional_raw_dump_links(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response({
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "secret@email.com",
"Vendor": "Mozilla",
"URL": "farmville.com",
"additional_minidumps": "foo, bar,",
})
if params['datatype'] == 'unredacted':
return Response({
"client_crash_date": "2012-06-11T06:08:45",
"dump": dump,
"signature": "FakeSignature1",
"user_comments": None,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": None,
"distributor_version": None,
"truncated": True,
"process_type": None,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": None,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": True,
"exploitability": "Unknown Exploitability"
})
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
url = reverse('crashstats:report_index', args=(crash_id,))
response = self.client.get(url)
eq_(response.status_code, 200)
# first of all, expect these basic URLs
raw_json_url = reverse('crashstats:raw_data', args=(crash_id, 'json'))
raw_dmp_url = reverse('crashstats:raw_data', args=(crash_id, 'dmp'))
# not quite yet
ok_(raw_json_url not in response.content)
ok_(raw_dmp_url not in response.content)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
# still they don't appear
ok_(raw_json_url not in response.content)
ok_(raw_dmp_url not in response.content)
group = self._create_group_with_permission('view_rawdump')
user.groups.add(group)
response = self.client.get(url)
eq_(response.status_code, 200)
# finally they appear
ok_(raw_json_url in response.content)
ok_(raw_dmp_url in response.content)
# also, check that the other links are there
foo_dmp_url = reverse(
'crashstats:raw_data_named',
args=(crash_id, 'upload_file_minidump_foo', 'dmp')
)
ok_(foo_dmp_url in response.content)
bar_dmp_url = reverse(
'crashstats:raw_data_named',
args=(crash_id, 'upload_file_minidump_bar', 'dmp')
)
ok_(bar_dmp_url in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_fennecandroid_report(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
raw_crash_json = SAMPLE_UNREDACTED % (
dump,
comment0
)
raw_crash_json = json.loads(raw_crash_json)
raw_crash_json['product'] = 'WinterSun'
return Response(raw_crash_json)
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
bug_product_map = {
'WinterSun': 'Winter Is Coming'
}
with self.settings(BUG_PRODUCT_MAP=bug_product_map):
response = self.client.get(url)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
link = doc('#bugzilla a[target="_blank"]').eq(0)
eq_(link.text(), 'Winter Is Coming')
ok_('product=Winter+Is+Coming' in link.attr('href'))
# also, the "More Reports" link should have WinterSun in it
link = doc('a.sig-overview').eq(0)
ok_('product=WinterSun' in link.attr('href'))
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_odd_product_and_version(self, rget, rpost):
"""If the processed JSON references an unfamiliar product and
version it should not use that to make links in the nav to
reports for that unfamiliar product and version."""
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
processed_json = SAMPLE_UNREDACTED % (dump, comment0)
assert '"WaterWolf"' in processed_json
assert '"5.0a1"' in processed_json
processed_json = processed_json.replace(
'"WaterWolf"', '"SummerWolf"'
)
processed_json = processed_json.replace(
'"5.0a1"', '"99.9"'
)
return Response(processed_json)
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
# the title should have the "SummerWolf 99.9" in it
doc = pyquery.PyQuery(response.content)
title = doc('title').text()
ok_('SummerWolf' in title)
ok_('99.9' in title)
# there shouldn't be any links to reports for the product
# mentioned in the processed JSON
bad_url = reverse('crashstats:home', args=('SummerWolf',))
ok_(bad_url not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_correlations_failed(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
return Response(SAMPLE_UNREDACTED % (
dump,
comment0
))
if 'correlations/signatures' in url:
raise models.BadStatusCodeError(500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_no_dump(self, rget, rpost):
dump = ""
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
data = json.loads(
SAMPLE_UNREDACTED % (dump, comment0)
)
del data['dump']
del data['json_dump']
return Response(data)
if 'correlations/signatures' in url:
raise models.BadStatusCodeError(500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('No dump available' in response.content)
def test_report_index_invalid_crash_id(self):
# last 6 digits indicate 30th Feb 2012 which doesn't exist
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120230'])
response = self.client.get(url)
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_pending_today(self, rget, rpost):
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(404)
rget.side_effect = mocked_get
today = datetime.datetime.utcnow().strftime('%y%m%d')
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982%s' % today])
response = self.client.get(url)
ok_('pendingStatus' in response.content)
eq_(response.status_code, 200)
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
yesterday = yesterday.strftime('%y%m%d')
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982%s' % yesterday])
response = self.client.get(url)
ok_('Crash Not Found' in response.content)
eq_(response.status_code, 200)
url = reverse('crashstats:report_index',
args=['blablabla'])
response = self.client.get(url)
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_hangid_in_raw_data(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('Hang Minidump' in response.content)
# the HangID in the fixture above
ok_('123456789' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_invalid_InstallTime(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('<th>Install Time</th>' not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_invalid_parsed_dump(self, rget, rpost):
json_dump = {
u'crash_info': {
u'address': u'0x88',
u'type': u'EXCEPTION_ACCESS_VIOLATION_READ'
},
u'main_module': 0,
u'modules': [
{
u'base_addr': u'0x980000',
u'debug_file': u'FlashPlayerPlugin.pdb',
u'debug_id': u'5F3C0D3034CA49FE9B94FC97EBF590A81',
u'end_addr': u'0xb4d000',
u'filename': u'FlashPlayerPlugin_13_0_0_214.exe',
u'version': u'13.0.0.214'},
],
u'sensitive': {u'exploitability': u'none'},
u'status': u'OK',
u'system_info': {
u'cpu_arch': u'x86',
u'cpu_count': 8,
u'cpu_info': u'GenuineIntel family 6 model 26 stepping 4',
u'os': u'Windows NT',
u'os_ver': u'6.0.6002 Service Pack 2'
},
u'thread_count': 1,
u'threads': [{u'frame_count': 0, u'frames': []}]
}
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"json_dump": %s,
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % json.dumps(json_dump))
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('<th>Install Time</th>' not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_sparse_json_dump(self, rget, rpost):
json_dump = {u'status': u'ERROR_NO_MINIDUMP_HEADER', u'sensitive': {}}
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"json_dump": %s,
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % json.dumps(json_dump))
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_crash_exploitability(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if '/crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if '/correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index', args=[crash_id])
response = self.client.get(url)
ok_('Exploitability</th>' not in response.content)
# you must be signed in to see exploitability
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
response = self.client.get(url)
ok_('Exploitability</th>' in response.content)
ok_('Unknown Exploitability' in response.content)
@mock.patch('requests.get')
def test_report_index_processed_crash_not_found(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(404)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_("Crash Not Found" in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_raw_crash_not_found(self, rget, rpost):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
def mocked_get(url, params, **options):
assert '/crash_data/' in url
assert 'datatype' in params
if params['datatype'] == 'unredacted':
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
elif params['datatype'] == 'meta': # raw crash json!
raise models.BadStatusCodeError(404)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_("Crash Not Found" in response.content)
@mock.patch('requests.get')
def test_report_index_pending(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(408)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Fetching this archived report' in response.content)
@mock.patch('requests.get')
def test_report_index_too_old(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(410)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('This archived report has expired' in response.content)
@mock.patch('requests.get')
def test_report_index_other_error(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response('Scary Error', status_code=500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
assert_raises(
models.BadStatusCodeError,
self.client.get,
url
)
# Let's also check that we get the response in the exception
# message.
try:
self.client.get(url)
assert False # shouldn't get here
except models.BadStatusCodeError as exception:
ok_('Scary Error' in str(exception))
# and it should include the URL it used
mware_url = models.UnredactedCrash.base_url + '/crash_data/'
ok_(mware_url in str(exception))
@mock.patch('requests.get')
def test_report_pending_json(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(408)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_pending',
args=[crash_id])
response = self.client.get(url)
expected = {
'status': 'error',
'status_message': ('The report for %s'
' is not available yet.' % crash_id),
'url_redirect': ''
}
eq_(response.status_code, 200)
eq_(expected, json.loads(response.content))
def test_report_index_and_pending_missing_crash_id(self):
url = reverse('crashstats:report_index', args=[''])
response = self.client.get(url)
eq_(response.status_code, 404)
url = reverse('crashstats:report_pending', args=[''])
response = self.client.get(url)
eq_(response.status_code, 404)
def test_report_list(self):
url = reverse('crashstats:report_list')
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 'xxx'
})
eq_(response.status_code, 400)
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('Crash Reports for sig' in response.content)
def test_report_list_all_link(self):
url = reverse('crashstats:report_list')
sig = 'js::jit::EnterBaselineMethod(JSContext*, js::RunState&)'
response = self.client.get(url, {
'product': 'WaterWolf',
'signature': sig
})
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
for link in doc('a'):
if link.text and 'View ALL' in link.text:
ok_(urllib.quote_plus(sig) in link.attrib['href'])
def test_report_list_columns_offered(self):
url = reverse('crashstats:report_list')
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
# The "user_comments" field is a choice
ok_('<option value="user_comments">' in response.content)
# The "URL" field is not a choice
ok_('<option value="URL">' not in response.content)
# also, all fields in models.RawCrash.API_WHITELIST should
# be there
for field in models.RawCrash.API_WHITELIST:
html = '<option value="%s">' % field
ok_(html in response.content)
# but it's different if you're logged in
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
ok_('<option value="user_comments">' in response.content)
ok_('<option value="URL">' in response.content)
# and a column from the Raw Crash
ok_('<option value="Accessibility">' in response.content)
# and it's only supposed to appear once
eq_(response.content.count('<option value="Accessibility">'), 1)
@mock.patch('requests.get')
def test_report_list_partial_correlations(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('correlations',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# relevant data is put into 'data' attributes
ok_('data-correlation_version="5.0a1"' in response.content)
ok_('data-correlation_os="Mac OS X"' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_correlations_no_data(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('correlations',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# relevant data is put into 'data' attributes
ok_('data-correlation_version=""' in response.content)
ok_('data-correlation_os=""' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_sigurls(self, rget):
really_long_url = (
'http://thisistheworldsfivehundredthirtyfifthslong'
'esturk.com/that/contains/a/path/and/?a=query&'
)
assert len(really_long_url) > 80
def mocked_get(url, params, **options):
# no specific product was specified, then it should be all products
ok_('products' in params)
ok_(settings.DEFAULT_PRODUCT not in params['products'])
ok_('ALL' in params['products'])
if '/signatureurls' in url:
return Response("""{
"hits": [
{"url": "http://farm.ville", "crash_count":123},
{"url": "%s", "crash_count": 1}
],
"total": 2
}
""" % (really_long_url))
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('sigurls',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('Must be signed in to see signature URLs' in response.content)
ok_('http://farm.ville' not in response.content)
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# <a href="HERE" title="HERE">HERE</a>
eq_(response.content.count('http://farm.ville'), 3)
# because the label is truncated
# <a href="HERE" title="HERE">HE...</a>
eq_(response.content.count(really_long_url), 2)
@mock.patch('requests.get')
def test_report_list_partial_sigurls_specific_product(self, rget):
really_long_url = (
'http://thisistheworldsfivehundredthirtyfifthslong'
'esturk.com/that/contains/a/path/and/?a=query&'
)
assert len(really_long_url) > 80
def mocked_get(url, params, **options):
# 'NightTrain' was specifically requested
ok_('products' in params)
ok_('NightTrain' in params['products'])
if '/signatureurls' in url:
return Response("""{
"hits": [
{"url": "http://farm.ville", "crash_count":123},
{"url": "%s", "crash_count": 1}
],
"total": 2
}
""" % (really_long_url))
raise NotImplementedError(url)
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
url = reverse('crashstats:report_list_partial', args=('sigurls',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'product': 'NightTrain'
})
eq_(response.status_code, 200)
eq_(response.content.count('http://farm.ville'), 3)
@mock.patch('requests.get')
def test_report_list_partial_comments(self, rget):
def mocked_get(url, params, **options):
if '/crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "I LOVE CHEESE cheese@email.com",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('comments',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('email removed' in response.content)
ok_('bob@uncle.com' not in response.content)
ok_('cheese@email.com' not in response.content)
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('email removed' not in response.content)
ok_('bob@uncle.com' in response.content)
ok_('cheese@email.com' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_comments_paginated(self, rget):
called_with_params = []
def mocked_get(url, params, **options):
if '/crashes/comments' in url:
called_with_params.append(params)
if params.get('result_offset'):
return Response({
"hits": [{
"user_comments": "I LOVE HAM",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}],
"total": 2
})
else:
return Response({
"hits": [{
"user_comments": "I LOVE CHEESE",
"date_processed": "2011-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120829"
}],
"total": 2
})
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('comments',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('I LOVE HAM' not in response.content)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'page': 2,
})
eq_(response.status_code, 200)
ok_('I LOVE HAM' in response.content)
ok_('I LOVE CHEESE' not in response.content)
eq_(len(called_with_params), 2)
@mock.patch('requests.get')
def test_report_list_partial_reports(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('0xdeadbeef' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_with_sorting(self, rget):
mock_calls = []
def mocked_get(url, params, **options):
mock_calls.append(params)
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T22:19:59+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'FakeSignature2',
'range_value': 3
}
response = self.client.get(url, data)
eq_(response.status_code, 200)
assert len(mock_calls) == 1
eq_(mock_calls[-1]['sort'], 'date_processed')
ok_('reverse' not in mock_calls[-1])
response = self.client.get(url, dict(
data,
sort='build'
))
eq_(response.status_code, 200)
assert len(mock_calls) == 2
eq_(mock_calls[-1]['sort'], 'build')
ok_('reverse' not in mock_calls[-1])
response = self.client.get(url, dict(
data,
sort='build',
reverse='True'
))
eq_(response.status_code, 200)
assert len(mock_calls) == 3
eq_(mock_calls[-1]['sort'], 'build')
eq_(mock_calls[-1]['reverse'], True)
@mock.patch('requests.get')
def test_report_list_partial_reports_columns_override(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'c': ['crap', 'date_processed', 'reason', 'os_and_version']
})
eq_(response.status_code, 200)
# 'reason' in _columns
ok_('reason7' in response.content)
# 'address' not in _columns
ok_('0xdeadbeef' not in response.content)
# 'cpu_name' not in _columns
ok_('x86' not in response.content)
# 'os_and_version' not in _columns
ok_('Mac OS X' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_with_rawcrash(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null,
"raw_crash": {
"Winsock_LSP": "Peter",
"SecondsSinceLastCrash": "Bengtsson"
}
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null,
"raw_crash": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'c': ['date_processed', 'Winsock_LSP', 'SecondsSinceLastCrash']
})
eq_(response.status_code, 200)
ok_('Peter' in response.content)
ok_('Bengtsson' in response.content)
# and also the table headers should be there
ok_('Winsock_LSP*' in response.content)
ok_('SecondsSinceLastCrash*' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_page_2(self, rget):
uuids = []
_date = datetime.datetime.now()
for i in range(300):
uuids.append(
'441017f4-e006-4eea-8451-dc20e' +
_date.strftime('%Y%m%d')
)
_date += datetime.timedelta(days=1)
def mocked_get(url, params, **options):
if 'report/list' in url:
result_number = int(params['result_number'])
try:
result_offset = int(params['result_offset'])
except KeyError:
result_offset = 0
first = {
"user_comments": None,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": None,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": None
}
hits = []
for i in range(result_offset, result_offset + result_number):
try:
item = dict(first, uuid=uuids[i])
hits.append(item)
except IndexError:
break
return Response(json.dumps({
"hits": hits,
"total": len(uuids)
}))
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
})
eq_(response.status_code, 200)
ok_(uuids[0] in response.content)
ok_(uuids[-1] not in response.content)
# expect there to be a link with `page=2` in there
report_list_url = reverse('crashstats:report_list')
report_list_url += '?signature=sig'
ok_(report_list_url + '&page=2' in response.content)
# we'll need a copy of this for later
response_first = response
response = self.client.get(url, {
'signature': 'sig',
'page': 2
})
eq_(response.status_code, 200)
ok_(uuids[0] not in response.content)
ok_(uuids[-1] in response.content)
# try to be a smartass
response_zero = self.client.get(url, {
'signature': 'sig',
'page': 0
})
eq_(response.status_code, 200)
# because with page < 1 you get page=1
tbody_zero = response_zero.content.split('<tbody')[1]
tbody_first = response_first.content.split('<tbody')[1]
eq_(hash(tbody_zero), hash(tbody_first))
response = self.client.get(url, {
'signature': 'sig',
'page': 'xx'
})
eq_(response.status_code, 400)
@mock.patch('requests.get')
def test_report_list_partial_reports_non_defaults(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'sig',
'range_unit': settings.RANGE_UNITS[-1],
'process_type': settings.PROCESS_TYPES[-1],
'range_value': 48,
'plugin_field': settings.PLUGIN_FIELDS[-1],
'hang_type': settings.HANG_TYPES[-1],
'plugin_query_type': settings.QUERY_TYPES[-1],
'product': 'NightTrain',
}
response = self.client.get(url, data)
eq_(response.status_code, 200)
def test_report_list_partial_reports_invalid_range_value(self):
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'sig',
'range_unit': 'days',
'process_type': settings.PROCESS_TYPES[-1],
'range_value': 48,
'plugin_field': settings.PLUGIN_FIELDS[-1],
'hang_type': settings.HANG_TYPES[-1],
'plugin_query_type': settings.QUERY_TYPES[-1],
'product': 'NightTrain',
}
response = self.client.get(url, data)
eq_(response.status_code, 400)
response = self.client.get(url, dict(data, range_unit='weeks'))
eq_(response.status_code, 400)
response = self.client.get(url, dict(
data,
range_unit='hours',
range_value=24 * 48
))
eq_(response.status_code, 400)
@mock.patch('requests.post')
def test_report_list_partial_bugzilla(self, rpost):
def mocked_post(url, **options):
if '/bugs/' in url:
return Response({
"hits": [
{"id": 111111,
"signature": "Something"},
{"id": 123456789,
"signature": "Something"}
]
})
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_list_partial', args=('bugzilla',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# not the right signature so it's part of "Related Crash Signatures"
ok_(
response.content.find('Related Crash Signatures') <
response.content.find('123456789')
)
response = self.client.get(url, {
'signature': 'Something',
'range_value': 3
})
eq_(response.status_code, 200)
# now the right signature
ok_('123456789' in response.content)
ok_('111111' in response.content)
# because bug id 123456789 is > than 111111 we expect that order
# in the rendered output
ok_(
response.content.find('123456789') <
response.content.find('111111') <
response.content.find('Related Crash Signatures')
)
@mock.patch('requests.get')
def test_report_list_partial_table(self, rget):
def mocked_get(url, params, **options):
if '/crashes/frequency' in url:
# these fixtures make sure we stress the possibility that
# the build_date might be invalid or simply just null.
return Response("""
{
"hits": [
{
"count": 1050,
"build_date": "20130806030203",
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1050,
"frequency": 1.0,
"count_linux": 0,
"total": 1050,
"frequency_linux": 0.0,
"frequency_mac": 0.0
},
{
"count": 1150,
"build_date": "notadate",
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1150,
"frequency": 1.0,
"count_linux": 0,
"total": 1150,
"frequency_linux": 0.0,
"frequency_mac": 0.0
},
{
"count": 1250,
"build_date": null,
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1250,
"frequency": 1.0,
"count_linux": 0,
"total": 1250,
"frequency_linux": 0.0,
"frequency_mac": 0.0
}
]
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('table',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('1050 - 100.0%' in response.content)
ok_('1150 - 100.0%' in response.content)
ok_('1250 - 100.0%' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_redirect_by_prefix(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s"
}
""" % (email0, url0))
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
base_crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
crash_id = settings.CRASH_ID_PREFIX + base_crash_id
assert len(crash_id) > 36
url = reverse('crashstats:report_index', args=[crash_id])
response = self.client.get(url)
correct_url = reverse('crashstats:report_index', args=[base_crash_id])
self.assertRedirects(response, correct_url)
@mock.patch('requests.get')
def test_report_list_with_no_data(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [],
"total": 0
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
# it sucks to depend on the output like this but it'll do for now since
# it's quite a rare occurance.
ok_('</html>' not in response.content) # it's a partial
ok_('no reports in the time period specified' in response.content)
@mock.patch('requests.get')
def test_raw_data(self, rget):
def mocked_get(url, params, **options):
assert '/crash_data' in url
if 'datatype' in params and params['datatype'] == 'raw':
return Response("""
bla bla bla
""".strip())
else:
# default is datatype/meta
return Response("""
{"foo": "bar",
"stuff": 123}
""")
rget.side_effect = mocked_get
crash_id = '176bcd6c-c2ec-4b0c-9d5f-dadea2120531'
json_url = reverse('crashstats:raw_data', args=(crash_id, 'json'))
response = self.client.get(json_url)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % json_url
)
eq_(response.status_code, 302)
user = self._login()
group = self._create_group_with_permission('view_rawdump')
user.groups.add(group)
assert user.has_perm('crashstats.view_rawdump')
response = self.client.get(json_url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
eq_(json.loads(response.content),
{"foo": "bar", "stuff": 123})
dump_url = reverse('crashstats:raw_data', args=(crash_id, 'dmp'))
response = self.client.get(dump_url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/octet-stream')
ok_('bla bla bla' in response.content, response.content)
# dump files are cached.
# check the mock function and expect no change
def different_mocked_get(url, **options):
if '/crash_data' in url and 'datatype=raw' in url:
return Response("""
SOMETHING DIFFERENT
""".strip())
raise NotImplementedError(url)
rget.side_effect = different_mocked_get
response = self.client.get(dump_url)
eq_(response.status_code, 200)
ok_('bla bla bla' in response.content) # still. good.
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_remembered_date_range_type(self, rget, rpost):
# if you visit the home page, the default date_range_type will be
# 'report' but if you switch to 'build' it'll remember that
def mocked_get(url, params, **options):
if '/products' in url and 'versions' not in params:
return Response("""
{
"products": [
"WaterWolf"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 1
}
""")
elif '/products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
if '/crashes/daily' in url:
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
if '/crashes/signatures' in url:
return Response("""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1",
"versions_count": 8,
"changeInRank": 0,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
raise NotImplementedError(url)
def mocked_post(**options):
assert '/bugs/' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:home', args=('WaterWolf',))
response = self.client.get(url)
eq_(response.status_code, 200)
regex = re.compile('(<a\s+href="\?date_range_type=(\w+)[^>]+)')
for tag, value in regex.findall(response.content):
if value == 'report':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# now, like the home page does, fire of an AJAX request to frontpage
# for 'build' instead
frontpage_json_url = reverse('crashstats:frontpage_json')
frontpage_reponse = self.client.get(frontpage_json_url, {
'product': 'WaterWolf',
'date_range_type': 'build'
})
eq_(frontpage_reponse.status_code, 200)
# load the home page again, and it should be on build date instead
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'build':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# open topcrashers with 'report'
topcrasher_report_url = reverse(
'crashstats:topcrasher',
kwargs={
'product': 'WaterWolf',
'versions': '19.0',
'date_range_type': 'report'
}
)
response = self.client.get(topcrasher_report_url)
eq_(response.status_code, 200)
# now, go back to the home page, and 'report' should be the new default
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'report':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# open topcrashers with 'build'
topcrasher_report_url = reverse(
'crashstats:topcrasher',
kwargs={
'product': 'WaterWolf',
'versions': '19.0',
'date_range_type': 'build'
}
)
response = self.client.get(topcrasher_report_url)
eq_(response.status_code, 200)
# now, go back to the home page, and 'report' should be the new default
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'build':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
@mock.patch('requests.get')
def test_correlations_json(self, rget):
url = reverse('crashstats:correlations_json')
def mocked_get(url, params, **options):
if '/correlations/' in url:
ok_('report_type' in params)
eq_(params['report_type'], 'core-counts')
return Response({
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"count": 13,
"load": "36% (4/11) vs. 26% (47/180) amd64 with 2 cores"
})
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(
url,
{'correlation_report_type': 'core-counts',
'product': 'WaterWolf',
'version': '19.0',
'platform': 'Windows NT',
'signature': 'FakeSignature'}
)
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
eq_(struct['reason'], 'EXC_BAD_ACCESS / KERN_INVALID_ADDRESS')
@mock.patch('requests.get')
def test_correlations_signatures_json(self, rget):
url = reverse('crashstats:correlations_signatures_json')
def mocked_get(url, params, **options):
if '/correlations/' in url:
return Response({
"hits": ["FakeSignature1",
"FakeSignature2"],
"total": 2
})
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(
url,
{'correlation_report_type': 'core-counts',
'product': 'WaterWolf',
'version': '19.0',
'platforms': 'Windows NT,Linux'}
)
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
eq_(struct['total'], 2)
def test_unauthenticated_user_redirected_from_protected_page(self):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
response = self.client.get(url)
self.assertRedirects(
response,
'%s?%s=%s' % (
reverse('crashstats:login'),
REDIRECT_FIELD_NAME,
url,
)
)
def test_login_page_renders(self):
url = reverse('crashstats:login')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Login Required' in response.content)
ok_('Insufficient Privileges' not in response.content)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Login Required' not in response.content)
ok_('Insufficient Privileges' in response.content)
def test_your_permissions_page(self):
url = reverse('crashstats:permissions')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(user.email in response.content)
# make some groups and attach permissions
self._create_group_with_permission(
'view_pii', 'Group A'
)
groupB = self._create_group_with_permission(
'view_exploitability', 'Group B'
)
user.groups.add(groupB)
assert not user.has_perm('crashstats.view_pii')
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(PERMISSIONS['view_pii'] in response.content)
ok_(PERMISSIONS['view_exploitability'] in response.content)
doc = pyquery.PyQuery(response.content)
for row in doc('table.permissions tbody tr'):
cells = []
for td in doc('td', row):
cells.append(td.text.strip())
if cells[0] == PERMISSIONS['view_pii']:
eq_(cells[1], 'No')
elif cells[0] == PERMISSIONS['view_exploitability']:
eq_(cells[1], 'Yes!')
|
rhelmer/socorro-webapp
|
crashstats/crashstats/tests/test_views.py
|
Python
|
mpl-2.0
| 222,462
|
from django.conf import settings as django_settings
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.cache import never_cache
from helfertool.utils import nopermission
from registration.models import Event
from registration.permissions import has_access, ACCESS_BADGES_EDIT
from ..forms import BadgeSettingsForm, BadgeDefaultsForm, BadgeJobDefaultsForm
from .utils import notactive
@login_required
@never_cache
def settings(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_BADGES_EDIT):
return nopermission(request)
# check if badge system is active
if not event.badges:
return notactive(request)
# roles
roles = event.badge_settings.badgerole_set.all()
# designs
designs = event.badge_settings.badgedesign_set.all()
# forms for defaults
defaults_form = BadgeDefaultsForm(request.POST or None,
instance=event.badge_settings.defaults,
settings=event.badge_settings,
prefix='event')
job_defaults_form = BadgeJobDefaultsForm(request.POST or None, event=event,
prefix='jobs')
if defaults_form.is_valid() and job_defaults_form.is_valid():
defaults_form.save()
job_defaults_form.save()
return redirect('badges:settings', event_url_name=event.url_name)
context = {'event': event,
'roles': roles,
'designs': designs,
'defaults_form': defaults_form,
'job_defaults_form': job_defaults_form}
return render(request, 'badges/settings.html', context)
@login_required
@never_cache
def settings_advanced(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_BADGES_EDIT):
return nopermission(request)
# check if badge system is active
if not event.badges:
return notactive(request)
# form for settings
form = BadgeSettingsForm(request.POST or None, request.FILES or None,
instance=event.badge_settings)
# for for permissions
permissions = event.badge_settings.badgepermission_set.all()
if form.is_valid():
form.save()
return redirect('badges:settings_advanced', event_url_name=event.url_name)
# render
context = {'event': event,
'form': form,
'permissions': permissions}
return render(request, 'badges/settings_advanced.html',
context)
@login_required
@never_cache
def default_template(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_BADGES_EDIT):
return nopermission(request)
# check if badge system is active
if not event.badges:
return notactive(request)
# output
response = HttpResponse(content_type='application/x-tex')
response['Content-Disposition'] = 'attachment; filename="template.tex"'
# send file
with open(django_settings.BADGE_DEFAULT_TEMPLATE, 'rb') as f:
response.write(f.read())
return response
@login_required
@never_cache
def current_template(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_BADGES_EDIT):
return nopermission(request)
# check if badge system is active
if not event.badges:
return notactive(request)
# check if file is there
if not event.badge_settings.latex_template:
raise Http404()
# output
response = HttpResponse(content_type='application/x-tex')
response['Content-Disposition'] = 'attachment; filename="template_{}.tex"'.format(event.url_name)
# send file
with event.badge_settings.latex_template.open('rb') as f:
response.write(f.read())
return response
|
helfertool/helfertool
|
src/badges/views/settings.py
|
Python
|
agpl-3.0
| 4,316
|
import datetime
import os
from sqlalchemy import create_engine
from sqlalchemy import MetaData, Table, Column, DateTime, Float, Integer, ForeignKey
from sqlalchemy import between, func
from sqlalchemy.sql import select
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import Column, String, DateTime, Float, Integer
from werkzeug.security import generate_password_hash, check_password_hash
from . import db, app
class Meter(db.Model):
""" A list of meters """
__tablename__ = "meter"
meter_id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer, ForeignKey("user.user_id"))
sharing = Column(String(7)) # Public / Private
api_key = Column(String(36))
meter_name = Column(String(20))
def delete_meter_data(meter_id):
""" Delete meter and all data """
Meter.query.filter(Meter.meter_id == meter_id).delete()
db.session.commit()
db_loc = f"data/meter_{meter_id}.db"
if os.path.isfile(db_loc):
os.remove(db_loc)
def get_meter_name(meter_id):
""" Return a list of meters that the user manages """
meter = Meter.query.filter(Meter.meter_id == meter_id).first()
return meter.meter_name
def get_meter_api_key(meter_id):
""" Return the API key for the meter """
meter = Meter.query.filter(Meter.meter_id == meter_id).first()
return meter.api_key
def get_user_meters(user_id):
""" Return a list of meters that the user manages """
meters = Meter.query.filter(Meter.user_id == user_id)
for meter in meters:
user_name = User.query.filter_by(user_id=meter.user_id).first().username
yield (meter.meter_id, meter.meter_name, user_name)
def get_public_meters():
""" Return a list of publicly viewable meters """
meters = Meter.query.filter(Meter.sharing == "public")
for meter in meters:
user_name = User.query.filter_by(user_id=meter.user_id).first().username
yield (meter.meter_id, meter.meter_name, user_name)
def visible_meters(user_id):
""" Return a list of meters that the user can view """
if user_id:
meters = Meter.query.filter(
(Meter.user_id == user_id) | (Meter.sharing == "public")
)
else:
meters = Meter.query.filter(Meter.sharing == "public")
for meter in meters:
user_name = User.query.filter_by(user_id=meter.user_id).first().username
yield (meter.meter_id, meter.meter_name, user_name)
class User(db.Model):
""" A user account """
__tablename__ = "user"
user_id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
apikey = Column(String(128))
def __repr__(self):
return "<User {}>".format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.user_id
def is_authenticated(self):
"""Return True if the user is authenticated."""
return True
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
|
aguinane/energyusage
|
energy/models.py
|
Python
|
agpl-3.0
| 3,513
|
from django.conf.urls.defaults import patterns, url
from django.conf import settings
import views
urlpatterns = patterns('',
# CRUD
url(r'^new$', views.new, name='create'),
# Search
url(r'^search$', views.search, name='search'),
url(r'^search/cantfind$', views.search_cantfind, name='search-cantfind'),
# dashboard
url(r'^(?P<venue_slug>[-\w]+)/dashboard$', views.dashboard, name='dashboard'),
url(r'^(?P<venue_slug>[-\w]+)/myprofile$', views.my_public_view, name='myprofile'),
url(r'^(?P<venue_slug>[-\w]+)/profile$', views.public_view, name='profile'),
# Presskit tracker
url(r'^(?P<venue_slug>[-\w]+)/presskit/viewrequest/(?P<viewrequest_id>\d+)$', views.presskit_viewrequest_venue, name='presskit-viewrequest-venue'),
url(r'^(?P<venue_slug>[-\w]+)/presskit/viewrequest/(?P<viewrequest_id>\d+)/venue_comment$', views.presskit_viewrequest_venue_comment, name='presskit-viewrequest-venue-comment'),
url(r'^(?P<venue_slug>[-\w]+)/presskit/viewrequest/(?P<viewrequest_id>\d+)/accept$', views.presskit_viewrequest_venue_accept, name='presskit-viewrequest-venue-accept'),
url(r'^(?P<venue_slug>[-\w]+)/presskit/viewrequest/(?P<viewrequest_id>\d+)/refuse$', views.presskit_viewrequest_venue_refuse, name='presskit-viewrequest-venue-refuse'),
# pictures
url(r'^(?P<venue_slug>[-\w]+)/pictures$', views.picture_list, name='venue-pictures'),
url(r'^(?P<venue_slug>[-\w]+)/pictures/new$', views.picture_new, name='venue-picture-new'),
url(r'^(?P<venue_slug>[-\w]+)/pictures/delete/(?P<picture_id>\d+)$', views.picture_delete, name='venue-picture-delete'),
# membership
# url(r'^(?P<venue_slug>[-\w]+)/members/request$', views.members.membership_request, name='membership-request'),
url(r'^(?P<venue_slug>[-\w]+)/members/(?P<member_id>\d+)/remove$', views.membership_remove, name='membership-remove'),
url(r'^(?P<venue_slug>[-\w]+)/members/manage$', views.membership_manage, name='membership-manage'),
url(r'^(?P<venue_slug>[-\w]+)$', views.detail, name='detail'),
url(r'^(?P<venue_slug>[-\w]+)/edit$', views.edit, name='edit'),
)
if settings.DEBUG:
urlpatterns += patterns('django.views.generic.simple',
(r'^$', 'redirect_to', {'url': 'list'}),
)
urlpatterns += patterns('',
url(r'^list$', views.venue_list, name='list'),
)
|
SpreadBand/SpreadBand
|
apps/venue/urls.py
|
Python
|
agpl-3.0
| 2,471
|
'''
Created on 2010 aza 30
@author: peio
It test the mapping module of franklin
'''
import unittest, os, StringIO
from os.path import join, exists
from tempfile import NamedTemporaryFile
from franklin.utils.misc_utils import TEST_DATA_DIR, NamedTemporaryDir
from franklin.mapping import map_reads_with_gmap, map_reads_with_bwa
from franklin.sam import bam2sam
SOLEXA = '@seq1\n'
SOLEXA += 'TCATTGAAAGTTGAAACTGATAGTAGCAGAGTTTTTTCCTCTGTTTGG\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIIIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq2\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGGCTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq14\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGGCTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq15\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGGCTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq12\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGACTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq13\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGACTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq16\n'
SOLEXA += 'ATATGATTGAAGATATTTCTGGACTTTAAGGGTTCTTGAGGATTTATA\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
SOLEXA += '@seq17\n'
SOLEXA += 'atgtcgtacatattggcattgcagtcagcggtatctagtgctaggtaa\n'
SOLEXA += '+\n'
SOLEXA += 'IIIIIIHIIIIIIIIIIIIIIIZIIUJUAUGJUUJUDFAOUDJOFSUD\n'
class GmapTest(unittest.TestCase):
'It test the gmap mapper'
@staticmethod
def test_gmap_mapper():
'It test the gmap mapper'
mappers_dir = join(TEST_DATA_DIR, 'mappers')
gmap_dir = join(TEST_DATA_DIR, 'mappers', 'gmap')
work_dir = NamedTemporaryDir()
temp_genome = join(work_dir.name, 'genome.fa')
os.symlink(join(mappers_dir, 'genome.fa'), temp_genome)
reads_fpath = join(gmap_dir, 'lb_lib1.pl_sanger.sm_sam1.fa')
out_bam_fhand = NamedTemporaryFile(suffix='.bam')
parameters = {'threads':None, 'kmer':13}
map_reads_with_gmap(temp_genome, reads_fpath, out_bam_fhand.name,
parameters)
sam_fhand = NamedTemporaryFile(suffix='.sam')
bam2sam(out_bam_fhand.name, sam_fhand.name, header=True)
result = open(sam_fhand.name).read()
assert exists(out_bam_fhand.name)
assert '36M2I204M' in result
assert 'SN:SL2.30ch00' in result
assert 'seq9_rev_MOD' in result
work_dir.close()
out_bam_fhand.close()
sam_fhand.close()
work_dir = NamedTemporaryDir()
temp_genome = join(work_dir.name, 'genome.fa')
os.symlink(join(mappers_dir, 'genome.fa'), temp_genome)
reads_fpath = join(gmap_dir, 'lb_lib1.pl_sanger.sm_sam1.sfastq')
out_bam_fhand = NamedTemporaryFile(suffix='.bam')
unmapped_fhand = StringIO.StringIO()
parameters = {'threads':None, 'kmer':13,
'unmapped_fhand':unmapped_fhand}
map_reads_with_gmap(temp_genome, reads_fpath, out_bam_fhand.name,
parameters)
sam_fhand = NamedTemporaryFile(suffix='.sam')
bam2sam(out_bam_fhand.name, sam_fhand.name, header=True)
result = open(sam_fhand.name).read()
assert exists(out_bam_fhand.name)
assert '36M2I204M' in result
assert 'SN:SL2.30ch00' in result
assert 'seq9_rev_MOD' in result
assert '?????????????????' in result
work_dir.close()
out_bam_fhand.close()
sam_fhand.close()
@staticmethod
def test_gmap_without_mapping_output():
'''It test that the gmap doesn't map anything'''
mappers_dir = join(TEST_DATA_DIR, 'mappers')
cmap_dir = join(TEST_DATA_DIR, 'mappers', 'gmap')
work_dir = NamedTemporaryDir()
temp_genome = join(work_dir.name, 'genome.fa')
os.symlink(join(mappers_dir, 'genome.fa'), temp_genome)
reads_fhand = NamedTemporaryFile()
reads_fhand.write('>seq\natgtgatagat\n')
reads_fhand.flush()
out_bam_fhand = NamedTemporaryFile()
out_bam_fpath = out_bam_fhand.name
out_bam_fhand.close()
parameters = {'threads':None, 'kmer':13}
map_reads_with_gmap(temp_genome, reads_fhand.name, out_bam_fpath,
parameters)
reads_fhand.close()
temp_sam_fhand = NamedTemporaryFile(suffix='.sam')
bam2sam(out_bam_fpath, temp_sam_fhand.name, True)
result = open(temp_sam_fhand.name).read()
assert 'seq\t4\t*\t0\t0' in result
class BwaTest(unittest.TestCase):
'It test the bwa mapper'
@staticmethod
def test_bwa_mapping():
'''It test that the gmap doesn't map anything'''
reference = join(TEST_DATA_DIR, 'blast/arabidopsis_genes')
work_dir = NamedTemporaryDir()
reference_fpath = join(work_dir.name, 'arabidopsis_genes')
os.symlink(reference, reference_fpath)
reads_fhand = NamedTemporaryFile(suffix='.sfastq')
reads_fhand.write(SOLEXA)
reads_fhand.flush()
out_bam_fhand = NamedTemporaryFile()
out_bam_fpath = out_bam_fhand.name
out_bam_fhand.close()
parameters = {'colorspace': False, 'reads_length':'short',
'threads':None, 'java_conf':None}
map_reads_with_bwa(reference_fpath, reads_fhand.name, out_bam_fpath,
parameters)
test_sam_fhand = NamedTemporaryFile(suffix='sam')
bam2sam(out_bam_fpath, test_sam_fhand.name)
result = open(test_sam_fhand.name).read()
assert 'seq17' in result
unmapped_fhand = StringIO.StringIO()
parameters = {'colorspace': False, 'reads_length':'short',
'threads':None, 'java_conf':None,
'unmapped_fhand':unmapped_fhand}
map_reads_with_bwa(reference_fpath, reads_fhand.name, out_bam_fpath,
parameters)
assert 'seq17' in unmapped_fhand.getvalue()
test_sam_fhand = NamedTemporaryFile(suffix='sam')
bam2sam(out_bam_fpath, test_sam_fhand.name)
result = open(test_sam_fhand.name).read()
assert 'seq17' not in result
if __name__ == "__main__":
import sys;sys.argv = ['', 'BwaTest.test_bwa_mapping']
unittest.main()
|
JoseBlanca/franklin
|
test/mapping_test.py
|
Python
|
agpl-3.0
| 6,577
|
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
from django.conf.urls import patterns, url
from piston.authentication import HttpBasicAuthentication
from piston.resource import Resource
from identityprovider.auth import (
basic_authenticate,
SSOOAuthAuthentication,
)
import api.v10.handlers as v10
import api.v11.handlers as v11
import api.v20.handlers as v20
from api.v20.utils import ApiOAuthAuthentication
v10root = Resource(handler=v10.RootHandler)
v10captcha = Resource(handler=v10.CaptchaHandler)
v10registration = Resource(handler=v10.RegistrationHandler)
v10auth = Resource(
handler=v10.AuthenticationHandler,
authentication=HttpBasicAuthentication(auth_func=basic_authenticate)
)
v10accounts = Resource(
handler=v10.AccountsHandler,
authentication=SSOOAuthAuthentication()
)
v11root = Resource(handler=v11.RootHandler)
v11auth = Resource(
handler=v11.AuthenticationHandler,
authentication=HttpBasicAuthentication(auth_func=basic_authenticate)
)
v2accounts = Resource(
handler=v20.AccountsHandler, authentication=ApiOAuthAuthentication())
v2emails = Resource(handler=v20.EmailsHandler)
v2login = Resource(handler=v20.AccountLoginHandler)
v2login_phone = Resource(handler=v20.AccountPhoneLoginHandler)
v2registration = Resource(handler=v20.AccountRegistrationHandler)
v2requests = Resource(handler=v20.RequestsHandler)
v2password_reset = Resource(handler=v20.PasswordResetTokenHandler)
urlpatterns = patterns(
'',
# v1.0
url(r'^1.0/$', v10root, name='api-10-root',
kwargs={'emitter_format': 'lazr.restful'}),
url(r'^1.0/captchas$', v10captcha, name='api-10-captchas',
kwargs={'emitter_format': 'lazr.restful'}),
url(r'^1.0/registration$', v10registration, name='api-10-registration',
kwargs={'emitter_format': 'lazr.restful'}),
url(r'^1.0/authentications$', v10auth, name='api-10-authentications',
kwargs={'emitter_format': 'lazr.restful'}),
url(r'^1.0/accounts$', v10accounts, name='api-10-accounts',
kwargs={'emitter_format': 'lazr.restful'}),
# v1.1
url(r'^1.1/$', v11root,
kwargs={'emitter_format': 'lazr.restful'}),
# add backwards compatible endpoints
url(r'^1.1/captchas$', v10captcha,
kwargs={'emitter_format': 'lazr.restful'}),
url(r'^1.1/registration$', v10registration,
kwargs={'emitter_format': 'lazr.restful'}),
url(r'^1.1/accounts$', v10accounts,
kwargs={'emitter_format': 'lazr.restful'}),
# add overriding endpoints
url(r'^1.1/authentications$', v11auth,
kwargs={'emitter_format': 'lazr.restful'}),
# v2
url(r'^v2/accounts$', v2registration, name='api-registration'),
url(r'^v2/tokens/oauth$', v2login, name='api-login'),
url(r'^v2/tokens/password$', v2password_reset, name='api-password-reset'),
url(r'^v2/accounts/(\w+)$', v2accounts, name='api-account'),
url(r'^v2/requests/validate$', v2requests, name='api-requests'),
# login from phone, with a phone user id
url(r'^v2/tokens/phone$', v2login_phone, name='api-login-phone'),
# temporarily hooked up so we can do reverse()
url(r'^v2/emails/(.*)$', v2emails, name='api-email'),
url(r'^v2/tokens/oauth/(.*)$', v2login, name='api-token'),
url(r'^v2/tokens/password/(.*)$', v2password_reset,
name='api-password-reset'),
)
|
miing/mci_migo
|
api/urls.py
|
Python
|
agpl-3.0
| 3,433
|
#
# Generated by the Open ERP module recorder !
#
|
avanzosc/avanzosc6.1
|
steel_quality_test/__init__.py
|
Python
|
agpl-3.0
| 50
|