code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_marooned_pirate_hum_f.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/mobile/shared_dressed_marooned_pirate_hum_f.py | Python | mit | 456 |
#!/usr/bin/env python
'''
Created by Samvel Khalatyan, May 01, 2012
Copyright 2012, All rights reserved
'''
from __future__ import division
import time
class Timer(object):
def __init__(self):
self._calls = 0
self._elapsed = 0
self._start = None
def start(self):
if not self._start:
self._start = time.clock()
def stop(self):
if self._start:
self._elapsed += time.clock() - self._start
self._calls += 1
self._start = None
def calls(self):
return self._calls
def elapsed(self):
return self._elapsed
def __str__(self):
return "avg: {avg:.3f} mkS calls: {calls} total: {total:.6f} S".format(
avg=self.elapsed() / self.calls() * 1e6 if self.calls() else 0,
calls=self.calls(),
total=self.elapsed())
| skhal/performance | python/utility/timer.py | Python | mit | 901 |
def resizeApp(app, dx, dy):
switchApp(app)
corner = find(Pattern("1273159241516.png").targetOffset(3,14))
dragDrop(corner, corner.getCenter().offset(dx, dy))
resizeApp("Safari", 50, 50)
# exists("1273159241516.png")
# click(Pattern("1273159241516.png").targetOffset(3,14).similar(0.7).firstN(2))
# with Region(10,100,300,300):
# pass
# click("__SIKULI-CAPTURE-BUTTON__")
| bx5974/sikuli | sikuli-ide/sample-scripts/resize-app.sikuli/resize-app.py | Python | mit | 380 |
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import palettes
class Swirl(Circle):
COLOR_DEFAULTS = ('palette', palettes.get('three_sixty')),
def __init__(self, layout, angle=12, **kwds):
super().__init__(layout, **kwds)
self.angle = angle
def pre_run(self):
self._step = 0
def step(self, amt=1):
for a in range(0, 360, self.angle):
c = self.palette(self._step)
for i in range(self.ringCount):
self.layout.set(i, a, c)
self._step += amt
| ManiacalLabs/BiblioPixelAnimations | BiblioPixelAnimations/circle/swirl.py | Python | mit | 586 |
import click
from arrow.cli import pass_context, json_loads
from arrow.decorators import custom_exception, dict_output
@click.command('get_comments')
@click.argument("feature_id", type=str)
@click.option(
"--organism",
help="Organism Common Name",
type=str
)
@click.option(
"--sequence",
help="Sequence Name",
type=str
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, feature_id, organism="", sequence=""):
"""Get a feature's comments
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.get_comments(feature_id, organism=organism, sequence=sequence)
| erasche/python-apollo | arrow/commands/annotations/get_comments.py | Python | mit | 652 |
# Pylot | mardix/pylot | pylot/app_templates/default/__init__.py | Python | mit | 8 |
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="y", parent_name="volume.caps", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Y"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the y `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/volume/caps/_y.py | Python | mit | 1,174 |
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext import declarative as legacy_decl
from sqlalchemy.ext.declarative import instrument_declarative
from sqlalchemy.orm import Mapper
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_deprecated_20
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
class TestInstrumentDeclarative(fixtures.TestBase):
def test_ok(self):
class Foo(object):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
meta = sa.MetaData()
reg = {}
with expect_deprecated_20(
"the instrument_declarative function is deprecated"
):
instrument_declarative(Foo, reg, meta)
mapper = sa.inspect(Foo)
is_true(isinstance(mapper, Mapper))
is_(mapper.class_, Foo)
class DeprecatedImportsTest(fixtures.TestBase):
def _expect_warning(self, name):
return expect_deprecated_20(
r"The ``%s\(\)`` function is now available as "
r"sqlalchemy.orm.%s\(\)" % (name, name)
)
def test_declarative_base(self):
with self._expect_warning("declarative_base"):
Base = legacy_decl.declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
assert inspect(Foo).mapper
def test_as_declarative(self):
with self._expect_warning("as_declarative"):
@legacy_decl.as_declarative()
class Base(object):
pass
class Foo(Base):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
assert inspect(Foo).mapper
def test_has_inherited_table(self, registry):
@registry.mapped
class Foo(object):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
@registry.mapped
class Bar(Foo):
__tablename__ = "bar"
id = sa.Column(sa.ForeignKey("foo.id"), primary_key=True)
with self._expect_warning("has_inherited_table"):
is_true(legacy_decl.has_inherited_table(Bar))
with self._expect_warning("has_inherited_table"):
is_false(legacy_decl.has_inherited_table(Foo))
def test_synonym_for(self, registry):
with self._expect_warning("synonym_for"):
@registry.mapped
class Foo(object):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
@legacy_decl.synonym_for("id")
@property
def id_prop(self):
return self.id
f1 = Foo(id=5)
eq_(f1.id_prop, 5)
| monetate/sqlalchemy | test/ext/declarative/test_deprecations.py | Python | mit | 2,870 |
#
import time
from flask import Flask, json, request
from flask.app import setupmethod
from threading import Thread
class DaemonThread(Thread):
def start(self):
self.daemon = True
super(DaemonThread, self).start()
class WSConnection(object):
def __init__(self, app, wsid, fid):
self.app = app
self.fid = fid
self.wsid = wsid
def __repr__(self):
return '<WSConnection %s fid=..%s>' % (self.wsid, self.fid[-4:])
def created(self):
# do something when first connected
print "%r: created" % self
pass
def closed(self):
# do something after being closed, but cannot send messages at this point
print "%r: closed" % self
pass
def tx(self, msg):
self.app.tx(msg, conn=self)
class CFCContextVars(object):
# This class is put into the context of all templates as "CFC"
#
@property
def WEBSOCKET_URL(self):
" Provide a URL for the websocket to be used "
scheme = request.environ['wsgi.url_scheme']
return '%s://%s/__W__' % ('ws' if scheme == 'http' else 'wss', request.host)
class CFCFlask(Flask):
''' Extensions to Flask() object to support app needs for CFC frontend '''
#
# override this -- what is the domain we're associated with
# in the front end?
# lh = localhost/127.0.0.1
# none = no host given (ie. default)
# example.com = traffic for example.com
#
# Your app can still handle other traffic, but websocket stuff should be on these domains.
#
# how often to do websocket-level keepalive on sockets.
ping_rate = 15 # seconds
def __init__(self, *a, **kws):
# List of functions that want to receive data from websocket clients
# Extend this using the decorator app.ws_rx_handler
self.ws_rx_handlers = []
# map of all current connections
self.ws_connections = {}
# Domains we are implementing today; lowercase, canonical names only.
# you can still redirect www. variations and such, but don't include them
# in this list.
self.my_vhosts = kws.pop('vhosts', ['lh', 'none'])
# We need some threads. You can add yours too, by decorating with
# app.background_task
self.ws_background_tasks = [ self.pinger, self.rxer ]
super(CFCFlask, self).__init__(*a, **kws)
@self.context_processor
def extra_ctx():
return dict(CFC = CFCContextVars())
def pinger(self):
# Keep all connections alive with some minimal traffic
RDB = self.redis
#RDB.publish('bcast', 'RESTART')
while 1:
RDB.publish('bcast', 'PING')
time.sleep(self.ping_rate)
def rxer(self):
# Listen for all traffic from the clients to us. Forward upwards to app
RDB = self.redis
endpoints = ['rx|'+v for v in self.my_vhosts]
while 1:
# block on read from a few lists...
vhost, here = RDB.blpop(endpoints)
# name of list which provides the value is the vhost source
assert vhost.startswith('rx|')
vhost = vhost[3:]
assert vhost in self.my_vhosts, "Unexpended hostname: %s" % vhost
# This data from WS is already wrapped as JSON by LUA code. Trustable.
try:
here = json.loads(here)
except:
self.logger.error('Badly wrapped WS message? %s' % here, exc_info=1)
continue
assert 'fid' in here
assert 'wsid' in here
wsid = here['wsid']
fid = here['fid']
# Socket state changes will "state" set but not "msg"
if 'state' in here:
sc = here['state']
if sc == 'OPEN':
self.ws_new_connection(wsid, fid)
elif sc == 'CLOSE':
conn = self.ws_connections.pop(wsid, None)
if conn:
conn.closed()
# end of processing.
continue
assert 'msg' in here
conn = self.ws_connections.get(wsid, None)
if not conn:
# this will happen if you restart python while the nginx/lua stays up
self.logger.warn('Existing/unexpected WSID')
conn = self.ws_new_connection(wsid, fid)
# Important: do not trust "msg" here as it comes
# unverified from browser-side code. Could be nasty junk.
msg = here.get('msg', None)
if msg[0] == '{' and msg[-1] == '}':
# looks like json
try:
msg = json.loads(msg)
except:
self.logger.debug('RX[%s] got bad JSON: %r' % (vhost, msg))
for handler in self.ws_rx_handlers:
handler(vhost, conn, msg)
if not self.ws_rx_handlers:
self.logger.debug('RX[%s] %r' % (vhost, msg))
def ws_new_connection(self, wsid, fid):
''' New WS connection, track it.
'''
self.ws_connections[wsid] = rv = WSConnection(self, wsid, fid)
rv.created()
return rv
def tx(self, msg, conn=None, fid=None, wsid=None, bcast=False):
'''
Send a message via websocket to a specific browser, specific tab (wsid) or all
'msg' can be text, but should probably be JSON in most applications.
'''
assert conn or fid or wsid or bcast, "Must provide a destination"
if conn:
chan = 'wsid|' + conn.wsid
elif wsid:
chan = 'wsid|' + wsid
elif fid:
chan = 'fid|' + fid
elif bcast:
chan = 'bcast'
if not isinstance(msg, basestring):
# convert into json, if not already
msg = json.dumps(msg)
self.redis.publish(chan, msg)
def ws_close(self, wsid_or_conn):
'''
Close a specific web socket from server side.
LUA code detects this message and kills it's connection.
'''
self.tx('CLOSE', wsid=getattr(wsid_or_conn, 'wsid', wsid_or_conn))
def ws_kill(self, conn):
'''
Close all web sockets from server side; because user mis-behaved, and
also kill it's session on CFC. User will have to wait for javascript POW.
'''
self.tx('KILL', fid=conn.fid)
@setupmethod
def ws_rx_handler(self, f):
"""
Registers a function to be called when traffic is received via web sockets
"""
self.ws_rx_handlers.append(f)
return f
@setupmethod
def background_task(self, f):
"""
Registers a function to be run as a background thread
"""
self.ws_background_tasks.append(f)
return f
def start_bg_tasks(self):
''' start long-lived background threads '''
for fn in self.ws_background_tasks:
DaemonThread(name=fn.__name__, target=fn, args=[]).start()
| sckoh/cloudfire | python/cfcapp.py | Python | mit | 5,964 |
from django.test.client import Client
from django.test import TestCase
from django.urls import reverse
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.servers.models import server_model
class TestServerViews(TestCase):
def setUp(self):
User.objects.all().delete()
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
def tearDown(self):
self.c.logout()
self.user.delete()
server_model.collection.remove()
def all_servers_test(self):
url = reverse('servers')
response = self.c.get(url)
assert response.status_code == 200
def add_server_test(self):
server_model.collection.remove()
url = reverse('add_server')
response = self.c.get(url)
assert response.status_code == 200
response = self.c.post(url, {'name': 'test', 'check_every': 60,'keep_data': 30})
created_server = server_model.collection.find_one()
eq_(created_server['name'], 'test')
response_url = "{0}#{1}".format(reverse('servers'), created_server['_id'])
self.assertRedirects(response, response_url)
server_model.collection.remove()
def edit_server_test(self):
server_model.collection.remove()
server_model.collection.insert({'name': 'test' , 'check_every': 60,'keep_data': 30, "key": "test"})
server = server_model.collection.find_one()
url = reverse('edit_server', kwargs={'server_id': server['_id']})
response = self.c.get(url)
assert response.status_code == 200
response = self.c.post(url, {'name': 'changetest', 'check_every': 300,'keep_data': 30})
updated_server = server_model.collection.find_one()
self.assertRedirects(response, reverse('servers'))
eq_(updated_server['name'], 'changetest')
eq_(updated_server['check_every'], 300)
server_model.collection.remove()
def delete_server_test(self):
server_model.collection.remove()
server_model.collection.insert({'name': 'test'})
server = server_model.collection.find_one()
url = reverse('delete_server', kwargs={'server_id': server['_id']})
response = self.c.get(url)
self.assertRedirects(response, reverse('servers'))
deleted_server = server_model.collection.find().count()
eq_(deleted_server, 0)
server_model.collection.remove()
| martinrusev/amonone | amon/apps/servers/tests/views_tests.py | Python | mit | 2,693 |
from egat.testset import UnorderedTestSet
class Test2(UnorderedTestSet):
def testStep1(self):
pass
def testStep2(self):
pass
def testStep3(self):
pass
| egineering-llc/egat | examples/config_example/test2.py | Python | mit | 188 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import platform
if platform.system() == 'Windows':
from compat import *
from puke.Error import *
from puke.Task import *
from puke.Tools import *
from puke.ToolsExec import *
from puke.FileList import *
from puke.Sed import *
from puke.Console import *
from puke.Env import *
from puke.Cache import *
from puke.Require import *
from puke.Yak import *
from puke.VirtualEnv import *
from puke.Std import *
from puke.SSH import SSH
import puke.System
import puke.FileSystem
import puke.Utils
import requests as http
VERSION = 0.1
__all__ = [
"main", "VERSION", "Error", "FileList", "Sed", "Std", "Env", "Require", "Load", "Yak", "VirtualEnv", "System", "FileSystem", "Utils",
"combine", "sh", "minify", "jslint", "http", "jsdoc", "jsdoc3", "patch", "prompt", "deepcopy", "stats", "pack", "unpack", "hsizeof", "console", "SSH"
]
import sys, logging, os, traceback
import pkg_resources
from optparse import OptionParser
from colorama import *
try:
sys.path.insert(1, os.getcwd())
except:
pass
def run():
""" Main routine which should be called on startup """
#
# Parse options
#
parser = OptionParser()
parser.add_option("-c", "--clear", action="store_true", dest="clearcache", help="Spring time, clean all the vomit")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose", help="don't print status messages to stdout")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="print more detailed status messages to stdout")
parser.add_option("-t", "--tasks",action="store_true", dest="list_tasks", help="list tasks")
parser.add_option("-l", "--log", dest="logfile", help="Write debug messages to given logfile")
parser.add_option("-f", "--file", dest="file", help="Use the given build script")
parser.add_option("-p", "--patch",action="store_true", dest="patch", help="Patch closure")
parser.add_option("-i", "--info",action="store_true", dest="info", help="puke task --info show task informations")
if sys.platform.lower() == "darwin":
parser.add_option("-s", "--speak", action="store_true", dest="speak",
help="puke speaks on fail/success")
(options, args) = parser.parse_args()
if hasattr(options, 'speak'):
Console.SPEAK_ENABLED = True
rLog = logging.getLogger('requests')
rLog.setLevel(logging.WARNING)
#
# Configure logging
#
if options.logfile:
logging.basicConfig(filename=options.logfile, level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
elif not logging.root.handlers:
if options.verbose is True:
logging.getLogger().setLevel(logging.DEBUG)
rLog.setLevel(logging.DEBUG)
elif options.verbose is False:
logging.getLogger().setLevel(logging.WARN)
else:
logging.getLogger().setLevel(logging.INFO)
# Define a Handler which writes INFO messages or higher to the sys.stderr
consoleCfg = logging.StreamHandler()
if options.verbose is True:
consoleCfg.setLevel(logging.DEBUG)
elif options.verbose is False:
consoleCfg.setLevel(logging.WARN)
else:
consoleCfg.setLevel(logging.INFO)
if os.environ.get("NOCOLOR"):
consoleCfg.setFormatter(logging.Formatter( ' %(message)s' , '%H:%M:%S'))
else:
consoleCfg.setFormatter(logging.Formatter( ' %(message)s' + Style.RESET_ALL, '%H:%M:%S'))
logging.getLogger().addHandler(consoleCfg)
#Patch closure
try:
closure = pkg_resources.get_distribution('closure_linter').location
closure_lock = os.path.join(closure, 'closure_linter', 'puke.lock')
if options.patch or not os.path.isfile(closure_lock):
closure = os.path.join(closure, 'closure_linter', 'ecmalintrules.py')
try:
handle = source = destination = None
import shutil
shutil.move( closure, closure+"~" )
destination= open( closure, "w" )
source= open( closure+"~", "r" )
content = source.read()
content = content.replace('MAX_LINE_LENGTH = 80', 'MAX_LINE_LENGTH = 120')
destination.write(content)
source.close()
destination.close()
os.remove(closure+"~" )
handle = file(closure_lock, 'a')
handle.close
if options.patch:
console.confirm('Patch successful')
sys.exit(0)
except Exception as e:
console.warn(">>> you should consider running \"sudo puke --patch\"")
if handle:
handle.close()
if source:
source.close()
if destination:
destination.close()
if options.patch:
sys.exit(0)
except Exception as e:
console.error('Closure linter not found %s' % e)
#
# Handle .pukeignore
#
if os.path.isfile('.pukeignore'):
try:
f = open('.pukeignore', 'r')
for line in f:
FileList.addGlobalExclude(line.strip())
except Exception as e:
console.warn('Puke ignore error : %s' % e)
#
# Find and execute build script
#
pukefiles = ["pukefile", "pukeFile", "pukefile", "pukefile.py", "pukeFile.py", "pukefile.py"]
script = None
if options.file:
if os.path.isfile(options.file):
script = options.file
else:
for name in pukefiles:
if os.path.isfile(name):
script = name
if script is None:
if options.file:
raise PukeError("No generate file '%s' found!" % options.file)
else:
raise PukeError("No generate file found!")
retval = execfile(script)
#
# Execute tasks
#
if options.list_tasks:
console.confirm("Please choose from: ")
printTasks()
sys.exit(0)
if options.clearcache:
console.header("Spring time, cleaning all the vomit around ...")
console.log("...")
if Cache.clean():
console.confirm("You're good to go !\n")
else:
console.confirm("Your room is already tidy, good boy :-) \n")
sys.exit(0)
try:
args = args.strip()
except:
pass
if not args:
if hasDefault():
executeTask('default')
else:
logging.error("No tasks to execute. Please choose from: ")
printTasks()
sys.exit(1)
else:
name = args.pop(0)
if options.info:
printHelp(name.strip())
else:
executeTask(name.strip(), *args)
def gettraceback(level = 0):
trace = ""
exception = ""
exc_list = traceback.format_exception_only (sys.exc_type, sys.exc_value)
reverse = -1 - level
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list[reverse]:
trace += entry
return trace
def main():
try:
run()
except Exception as error:
console.fail("\n\n :puke: \n PUKE %s \n %s \n" % (error, gettraceback()))
sys.exit(1)
except KeyboardInterrupt:
console.warn("\n\n :puke: \nBuild interrupted!\n")
sys.exit(2)
if Console.SPEAK_ENABLED and Console.SPEAK_MESSAGE_ON_SUCCESS:
console.say(Console.SPEAK_MESSAGE_ON_SUCCESS)
sys.exit(0) | webitup/puke | puke/__init__.py | Python | mit | 7,759 |
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils import six
class AccountActivationTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (
six.text_type(user.pk) + six.text_type(timestamp) +
six.text_type(user.profile.email_confirmed)
)
account_activation_token = AccountActivationTokenGenerator() | shashank-sharma/mythical-learning | mysite/login/tokens.py | Python | mit | 422 |
import sublime
import sublime_plugin
import json
from os.path import dirname, realpath, join
from .node_bridge import node_bridge
# monkeypatch `Region` to be iterable
sublime.Region.totuple = lambda self: (self.a, self.b)
sublime.Region.__iter__ = lambda self: self.totuple().__iter__()
BIN_PATH = join(sublime.packages_path(), dirname(realpath(__file__)), 'babel-transform.js')
class BabelCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
selected_text = self.get_text()
code = self.babelify(selected_text)
if code:
w = sublime.Window.new_file(view.window())
w.settings().set('default_extension', 'js')
w.set_syntax_file(view.settings().get('syntax'))
w.set_scratch(True)
w.insert(edit, 0, code)
def babelify(self, data):
try:
return node_bridge(data, BIN_PATH, [json.dumps({
'filename': self.view.file_name(),
'debug': self.get_setting('debug'),
'ensure_newline_at_eof': self.get_setting('ensure_newline_at_eof'),
'use_local_babel': self.get_setting('use_local_babel'),
'node_modules': self.get_setting('node_modules'),
'options': self.get_setting('options')
})])
except Exception as e:
return str(e)
def get_text(self):
if not self.has_selection():
region = sublime.Region(0, self.view.size())
return self.view.substr(region)
selected_text = ''
for region in self.view.sel():
selected_text = selected_text + self.view.substr(region) + '\n'
return selected_text
def has_selection(self):
for sel in self.view.sel():
start, end = sel
if start != end:
return True
return False
def get_setting(self, key):
settings = self.view.settings().get('Babel')
if settings is None:
settings = sublime.load_settings('Babel.sublime-settings')
return settings.get(key)
| okoala/sublime-bak | Backup/20150720091700/Babel/Babel.py | Python | mit | 1,796 |
""" A component that designates a wire. """
from graph import Node, Edge
from constraint import Constraint
class Wire(object):
""" Wire component """
def __init__(self, graph, node_a=None, node_b=None, edge_i=None):
""" Initializes a wire with two nodes. Current goes from
A to B. If nodes / edges aren't supplied, new ones are created.
Supplied nodes / edges should be part of the supplied graph.
Args:
graph : Graph object
node_a : Node object
node_b : Node object
edge_i : Edge object
Returns:
Wire object
"""
if not node_a:
node_a = Node(graph)
if not node_b:
node_b = Node(graph)
if not edge_i:
edge_i = Edge(graph, node_a, node_b)
self._node_a = node_a
self._node_b = node_b
self._edge_i = edge_i
def node_a(self):
""" Returns node A.
Returns:
Node object
"""
return self._node_a
def node_b(self):
""" Returns node B.
Returns:
Node object
"""
return self._node_b
def edge_i(self):
""" Returns the edge that stores current from A to B.
Returns:
Edge object
"""
return self._edge_i
def substitutions(self):
""" Return a dictionary mapping each symbol to a value. Return
an empty dictionary if no substitutions exist
Returns:
dictionary from sympy variable to value
"""
return {}
def variables(self):
""" Returns a set of variables under constraints.
Returns:
set of Nodes, Edges, tuples, or strings
"""
return set([self._node_a, self._node_b, self._edge_i])
def constraints(self):
""" Returns a list of constraints that must be solved.
A constraint is a tuple (coefficients, variables), where
coefficients is a list of numbers corresponding to the linear
equation:
A_0 * x_0 + A_1 * x_1 + ... + A_{n-1} * x_{n-1} = 0,
and variables is a list of the Node and Edge objects.
Returns:
List of Constraint objects
"""
cs = [1, -1]
xs = [self._node_a, self._node_b]
constraint = Constraint(cs, xs)
return [constraint]
| ThatSnail/impede | impede-app/server/py/wire.py | Python | mit | 2,414 |
"""Test that boolean conditions simplify to a constant value"""
# pylint: disable=pointless-statement
from unknown import Unknown # pylint: disable=import-error
def func(_):
"""Pointless function"""
CONSTANT = 100
OTHER = 200
# Simplifies any boolean expression that is coerced into a True/False value
bool(CONSTANT or True) # [condition-evals-to-constant]
assert CONSTANT or True # [condition-evals-to-constant]
if CONSTANT and False: # [condition-evals-to-constant]
pass
elif CONSTANT and False: # [condition-evals-to-constant]
pass
while CONSTANT and False: # [condition-evals-to-constant]
break
1 if CONSTANT or True else 2 # [condition-evals-to-constant]
z = [x for x in range(10) if x or True] # [condition-evals-to-constant]
# Simplifies recursively
assert True or CONSTANT or OTHER # [condition-evals-to-constant]
assert (CONSTANT or True) or (CONSTANT or True) # [condition-evals-to-constant]
# Will try to infer the truthiness of an expression as long as it doesn't contain any variables
assert 3 + 4 or CONSTANT # [condition-evals-to-constant]
assert Unknown or True # [condition-evals-to-constant]
assert True or True # [condition-evals-to-constant]
assert False or False # [condition-evals-to-constant]
assert True and True # [condition-evals-to-constant]
assert False and False # [condition-evals-to-constant]
# A bare constant that's not inside of a boolean operation will emit `using-constant-test` instead
if True: # pylint: disable=using-constant-test
pass
# Expressions not in one of the above situations will not emit a message
CONSTANT or True
bool(CONSTANT or OTHER)
bool(func(CONSTANT or True))
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/c/condition_evals_to_constant.py | Python | mit | 1,666 |
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
for name,data in out_data.items():
if name == "seq_file2":
data.dbkey = param_dict['dbkey_2']
app.model.context.add( data )
app.model.context.flush()
break | volpino/Yeps-EURAC | tools/filters/axt_to_lav_code.py | Python | mit | 306 |
from ampadb.support import Forms
from django import forms
from django.core.exceptions import ValidationError
from . import ies_format
from .ampacsv import InvalidFormat
from .import_fmts import IEFormats
class ExportForm(Forms.Form):
FORMAT_CHOICES = [(IEFormats.CSV, 'CSV (E-mail)'), (IEFormats.AMPACSV,
'CSV (Importació)'),
(IEFormats.JSON, 'JSON'), (IEFormats.PICKLE, 'Pickle')]
format = forms.ChoiceField(
required=True, choices=FORMAT_CHOICES, widget=forms.RadioSelect)
classe = forms.CharField(required=False, widget=forms.HiddenInput)
contrasenya = forms.CharField(required=False, widget=forms.PasswordInput)
repeteix_la_contrasenya = forms.CharField(
required=False, widget=forms.PasswordInput)
def clean(self):
cleaned_data = super().clean()
contrasenya = cleaned_data.get('contrasenya')
if contrasenya and (contrasenya !=
cleaned_data.get('repeteix_la_contrasenya')):
self.add_error('repeteix_la_contrasenya',
ValidationError('La contrasenya no coincideix'))
class ImportForm(Forms.Form):
FORMAT_CHOICES = [(IEFormats.AUTO, 'Autodetectar'),
(IEFormats.AMPACSV, 'CSV'), (IEFormats.EXCELCSV,
'CSV (Excel)'),
(IEFormats.JSON, 'JSON'), (IEFormats.PICKLE, 'Pickle')]
PREEXISTENT_CHOICES = [('', 'Conservar'), ('DEL',
'Eliminar no mencionades'),
('DEL_ALL', 'Eliminar tot (no recomanat)')]
format = forms.ChoiceField(
required=False, choices=FORMAT_CHOICES, widget=forms.RadioSelect)
contrasenya = forms.CharField(
required=False,
widget=forms.PasswordInput,
help_text=("Si és un arxiu Pickle xifrat, s'intentarà desxifrar amb"
" aquesta contrasenya. Si el format no és Pickle,"
" aquest camp s'ignorarà."))
preexistents = forms.ChoiceField(
required=False, # 'Conservar' per defecte
choices=PREEXISTENT_CHOICES,
label='Entrades preexistents',
widget=forms.RadioSelect,
help_text=(
"Què fer amb les entrades preexistents que no es mencionen a "
"l'arxiu. \"Conservar\" no les modifica; \"Eliminar no "
"mencionades\" les elimina, però, si la entrada existeix i conté "
"dades que l'arxiu no té, aquestes es conserven (ex. si un alumne "
"té el correu de l'alumne però l'arxiu no té aquest camp, es "
"conserva el que ja tenia); \"Eliminar tot\" només deixa les "
"dades que hi ha a l'arxiu."))
ifile = forms.FileField(required=True, label="Arxiu d'importació")
class Ies: # pylint: disable=too-few-public-methods
class UploadForm(Forms.Form):
ifile = forms.FileField(
required=True,
label="Arxiu d'importació",
widget=forms.FileInput(attrs={
'accept': '.csv'
}))
| ampafdv/ampadb | importexport/forms.py | Python | mit | 3,163 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
import math
import sys
from thumbor.point import FocalPoint
from thumbor.utils import logger
trim_enabled = True
try:
from thumbor.ext.filters import _bounding_box
except ImportError:
logger.warn("Error importing bounding_box filter, trimming won't work")
trim_enabled = False
class Transformer(object):
def __init__(self, context):
self.context = context
self.engine = self.context.request.engine
def calculate_target_dimensions(self):
source_width, source_height = self.engine.size
source_width = float(source_width)
source_height = float(source_height)
if not self.context.request.width and not self.context.request.height:
self.target_width = source_width
self.target_height = source_height
else:
if self.context.request.width:
if self.context.request.width == "orig":
self.target_width = source_width
else:
self.target_width = float(self.context.request.width)
else:
self.target_width = self.engine.get_proportional_width(self.context.request.height)
if self.context.request.height:
if self.context.request.height == "orig":
self.target_height = source_height
else:
self.target_height = float(self.context.request.height)
else:
self.target_height = self.engine.get_proportional_height(self.context.request.width)
def adjust_focal_points(self):
source_width, source_height = self.engine.size
self.focal_points = None
if self.context.request.focal_points:
if self.context.request.should_crop:
self.focal_points = []
crop = self.context.request.crop
for point in self.context.request.focal_points:
if point.x < crop['left'] or point.x > crop['right'] or point.y < crop['top'] or point.y > crop['bottom']:
continue
point.x -= crop['left'] or 0
point.y -= crop['top'] or 0
self.focal_points.append(point)
else:
self.focal_points = self.context.request.focal_points
if not self.focal_points:
self.focal_points = [
FocalPoint.from_alignment(self.context.request.halign,
self.context.request.valign,
source_width,
source_height)
]
self.engine.focus(self.focal_points)
def transform(self, callback):
self.done_callback = callback
if self.context.config.RESPECT_ORIENTATION:
self.engine.reorientate()
self.trim()
self.smart_detect()
def trim(self):
is_gifsicle = (self.context.request.engine.extension == '.gif' and self.context.config.USE_GIFSICLE_ENGINE)
if self.context.request.trim is None or not trim_enabled or is_gifsicle:
return
mode, data = self.engine.image_data_as_rgb()
box = _bounding_box.apply(
mode,
self.engine.size[0],
self.engine.size[1],
self.context.request.trim_pos,
self.context.request.trim_tolerance,
data
)
if box[2] < box[0] or box[3] < box[1]:
logger.warn("Ignoring trim, there wouldn't be any image left, check the tolerance.")
return
self.engine.crop(box[0], box[1], box[2] + 1, box[3] + 1)
if self.context.request.should_crop:
self.context.request.crop['left'] -= box[0]
self.context.request.crop['top'] -= box[1]
self.context.request.crop['right'] -= box[0]
self.context.request.crop['bottom'] -= box[1]
@property
def smart_storage_key(self):
return self.context.request.image_url
def smart_detect(self):
is_gifsicle = (self.context.request.engine.extension == '.gif' and self.context.config.USE_GIFSICLE_ENGINE)
if (not (self.context.modules.detectors and self.context.request.smart)) or is_gifsicle:
self.do_image_operations()
return
try:
# Beware! Boolean hell ahead.
#
# The `running_smart_detection` flag is needed so we can know
# whether `after_smart_detect()` is running synchronously or not.
#
# If we're running it in a sync fashion it will set
# `should_run_image_operations` to True so we can avoid running
# image operation inside the try block.
self.should_run_image_operations = False
self.running_smart_detection = True
self.do_smart_detection()
self.running_smart_detection = False
except Exception:
if not self.context.config.IGNORE_SMART_ERRORS:
raise
logger.exception("Ignored error during smart detection")
if self.context.config.USE_CUSTOM_ERROR_HANDLING:
self.context.modules.importer.error_handler.handle_error(
context=self.context,
handler=self.context.request_handler,
exception=sys.exc_info()
)
self.context.request.prevent_result_storage = True
self.context.request.detection_error = True
self.do_image_operations()
if self.should_run_image_operations:
self.do_image_operations()
def do_smart_detection(self):
focal_points = self.context.modules.storage.get_detector_data(self.smart_storage_key)
if focal_points is not None:
self.after_smart_detect(focal_points, points_from_storage=True)
else:
detectors = self.context.modules.detectors
detectors[0](self.context, index=0, detectors=detectors).detect(self.after_smart_detect)
def after_smart_detect(self, focal_points=[], points_from_storage=False):
for point in focal_points:
self.context.request.focal_points.append(FocalPoint.from_dict(point))
if self.context.request.focal_points and self.context.modules.storage and not points_from_storage:
storage = self.context.modules.storage
points = []
for point in self.context.request.focal_points:
points.append(point.to_dict())
storage.put_detector_data(self.smart_storage_key, points)
if self.running_smart_detection:
self.should_run_image_operations = True
return
self.do_image_operations()
def do_image_operations(self):
if '.gif' == self.context.request.engine.extension and 'cover()' in self.context.request.filters:
self.extract_cover()
self.manual_crop()
self.calculate_target_dimensions()
self.adjust_focal_points()
if self.context.request.debug:
self.debug()
else:
if self.context.request.fit_in:
self.fit_in_resize()
else:
self.auto_crop()
self.resize()
self.flip()
self.done_callback()
def extract_cover(self):
self.engine.extract_cover()
def manual_crop(self):
if self.context.request.should_crop:
limit = lambda dimension, maximum: min(max(dimension, 0), maximum)
source_width, source_height = self.engine.size
crop = self.context.request.crop
crop['left'] = limit(crop['left'], source_width)
crop['top'] = limit(crop['top'], source_height)
crop['right'] = limit(crop['right'], source_width)
crop['bottom'] = limit(crop['bottom'], source_height)
if crop['left'] >= crop['right'] or crop['top'] >= crop['bottom']:
self.context.request.should_crop = False
crop['left'] = crop['right'] = crop['top'] = crop['bottom'] = 0
return
self.engine.crop(crop['left'], crop['top'], crop['right'], crop['bottom'])
def auto_crop(self):
source_width, source_height = self.engine.size
target_height = self.target_height or 1
target_width = self.target_width or 1
source_ratio = round(float(source_width) / source_height, 2)
target_ratio = round(float(target_width) / target_height, 2)
if source_ratio == target_ratio:
return
focal_x, focal_y = self.get_center_of_mass()
if self.target_width / source_width > self.target_height / source_height:
crop_width = source_width
crop_height = int(round(source_width * self.target_height / target_width, 0))
else:
crop_width = int(round(math.ceil(self.target_width * source_height / target_height), 0))
crop_height = source_height
crop_left = int(round(min(max(focal_x - (crop_width / 2), 0.0), source_width - crop_width)))
crop_right = min(crop_left + crop_width, source_width)
crop_top = int(round(min(max(focal_y - (crop_height / 2), 0.0), source_height - crop_height)))
crop_bottom = min(crop_top + crop_height, source_height)
self.engine.crop(crop_left, crop_top, crop_right, crop_bottom)
def flip(self):
if self.context.request.horizontal_flip:
self.engine.flip_horizontally()
if self.context.request.vertical_flip:
self.engine.flip_vertically()
def get_center_of_mass(self):
total_weight = 0.0
total_x = 0.0
total_y = 0.0
for focal_point in self.focal_points:
total_weight += focal_point.weight
total_x += focal_point.x * focal_point.weight
total_y += focal_point.y * focal_point.weight
x = total_x / total_weight
y = total_y / total_weight
return x, y
def resize(self):
source_width, source_height = self.engine.size
if self.target_width == source_width and self.target_height == source_height:
return
self.engine.resize(self.target_width or 1, self.target_height or 1) # avoiding 0px images
def fit_in_resize(self):
source_width, source_height = self.engine.size
#invert width and height if image orientation is not the same as request orientation and need adaptive
if self.context.request.adaptive and (
(source_width - source_height < 0 and self.target_width - self.target_height > 0) or
(source_width - source_height > 0 and self.target_width - self.target_height < 0)
):
tmp = self.context.request.width
self.context.request.width = self.context.request.height
self.context.request.height = tmp
tmp = self.target_width
self.target_width = self.target_height
self.target_height = tmp
sign = 1
if self.context.request.full:
sign = -1
if sign == 1 and self.target_width >= source_width and self.target_height >= source_height:
return
if source_width / self.target_width * sign >= source_height / self.target_height * sign:
resize_height = round(source_height * self.target_width / source_width)
resize_width = self.target_width
else:
resize_height = self.target_height
resize_width = round(source_width * self.target_height / source_height)
self.engine.resize(resize_width, resize_height)
def debug(self):
if not self.context.request.focal_points:
return
for point in self.context.request.focal_points:
if point.width <= 1:
point.width = 10
if point.height <= 1:
point.height = 10
self.engine.draw_rectangle(int(point.x - (point.width / 2)),
int(point.y - (point.height / 2)),
point.width,
point.height)
| food52/thumbor | thumbor/transformer.py | Python | mit | 12,511 |
from widget import Widget
from gui_util import intersect
# Simple container, list of children is not meant to be mutable
class Container(Widget):
def __init__(self, width, height):
super(Container, self).__init__(width, height)
self.children = []
self.curFocus = 0
def draw(self, canvas, offsetx, offsety, minx, miny, maxx, maxy):
for (child_x, child_y, child_widget) in self.children:
(child_width, child_height) = child_widget.size()
regions = intersect((minx, miny, maxx, maxy),
(child_x, child_y, child_x+child_width, child_y+child_height))
for (cminx, cminy, cmaxx, cmaxy) in regions:
child_widget.draw(canvas, offsetx+child_x, offsety+child_y,
cminx-child_x, cminy-child_y, cmaxx-child_x, cmaxy-child_y)
def addChild(self, child_x, child_y, child_widget):
self.children.append((child_x, child_y, child_widget))
return len(self.children)-1
def getChild(self, index):
return self.children[index]
def remChild(self, index):
self.children.pop(index)
def setChildPos(self, index, child_x, child_y):
self.children[index] = (child_x, child_y, self.children[index][2])
def numChildren(self):
return len(self.children)
def onFocus(self):
while self.curFocus < len(self.children):
if self.children[self.curFocus][2].onFocus():
return True
self.curFocus += 1
self.curFocus = 0
return False
def changeFocus(self):
# child that has focus gets chance to handle it self first
if self.children[self.curFocus][2].changeFocus():
return True
# Then we handle it
self.curFocus += 1
while self.curFocus < len(self.children):
if self.children[self.curFocus][2].onFocus():
return True
self.curFocus += 1
self.curFocus = 0
return False
def offFocus(self):
self.children[self.curFocus][2].offFocus()
def keyEvent(self, key):
if self.curFocus < len(self.children):
return self.children[self.curFocus][2].keyEvent(key)
| thijsmie/madmin | gui_lib/container.py | Python | mit | 2,228 |
import pandas as pd
def handler(event, context):
return ':)'
| moesy/AWS-Lambda-ML-Microservice-Skeleton | app/main.py | Python | mit | 69 |
from functools import partial
import time
from ukt import KT_NONE
from ukt import KyotoTycoon
from huey.api import Huey
from huey.constants import EmptyData
from huey.storage import BaseStorage
from huey.utils import decode
class KyotoTycoonStorage(BaseStorage):
priority = True
def __init__(self, name='huey', host='127.0.0.1', port=1978, db=None,
timeout=None, max_age=3600, queue_db=None, client=None,
blocking=False, result_expire_time=None):
super(KyotoTycoonStorage, self).__init__(name)
if client is None:
client = KyotoTycoon(host, port, timeout, db, serializer=KT_NONE,
max_age=max_age)
self.blocking = blocking
self.expire_time = result_expire_time
self.kt = client
self._db = db
self._queue_db = queue_db if queue_db is not None else db
self.qname = self.name + '.q'
self.sname = self.name + '.s'
self.q = self.kt.Queue(self.qname, self._queue_db)
self.s = self.kt.Schedule(self.sname, self._queue_db)
def enqueue(self, data, priority=None):
self.q.add(data, priority)
def dequeue(self):
if self.blocking:
return self.q.bpop(timeout=30)
else:
return self.q.pop()
def queue_size(self):
return len(self.q)
def enqueued_items(self, limit=None):
return self.q.peek(n=limit or -1)
def flush_queue(self):
return self.q.clear()
def convert_ts(self, ts):
return int(time.mktime(ts.timetuple()))
def add_to_schedule(self, data, ts, utc):
self.s.add(data, self.convert_ts(ts))
def read_schedule(self, ts):
return self.s.read(self.convert_ts(ts))
def schedule_size(self):
return len(self.s)
def scheduled_items(self, limit=None):
return self.s.items(limit)
def flush_schedule(self):
return self.s.clear()
def prefix_key(self, key):
return '%s.%s' % (self.qname, decode(key))
def put_data(self, key, value, is_result=False):
xt = self.expire_time if is_result else None
self.kt.set(self.prefix_key(key), value, self._db, expire_time=xt)
def peek_data(self, key):
result = self.kt.get_bytes(self.prefix_key(key), self._db)
return EmptyData if result is None else result
def pop_data(self, key):
if self.expire_time is not None:
return self.peek_data(key)
result = self.kt.seize(self.prefix_key(key), self._db)
return EmptyData if result is None else result
def delete_data(self, key):
return self.kt.seize(self.prefix_key(key), self._db) is not None
def has_data_for_key(self, key):
return self.kt.exists(self.prefix_key(key), self._db)
def put_if_empty(self, key, value):
return self.kt.add(self.prefix_key(key), value, self._db)
def result_store_size(self):
return len(self.kt.match_prefix(self.prefix_key(''), db=self._db))
def result_items(self):
prefix = self.prefix_key('')
keys = self.kt.match_prefix(prefix, db=self._db)
result = self.kt.get_bulk(keys, self._db)
plen = len(prefix)
return {key[plen:]: value for key, value in result.items()}
def flush_results(self):
prefix = self.prefix_key('')
keys = self.kt.match_prefix(prefix, db=self._db)
return self.kt.remove_bulk(keys, self._db)
def flush_all(self):
self.flush_queue()
self.flush_schedule()
self.flush_results()
class KyotoTycoonHuey(Huey):
storage_class = KyotoTycoonStorage
| rsalmaso/huey | huey/contrib/kyototycoon.py | Python | mit | 3,659 |
from __future__ import with_statement
from nose.tools import (
eq_ as eq,
)
from filesystem.test.util import (
maketemp,
assert_raises,
)
import errno
import os
import filesystem
import filesystem.copyonwrite
def test_mkdir():
tmp = maketemp()
filesystem.copyonwrite.path(filesystem.path(tmp)).child('foo').mkdir()
foo = os.path.join(tmp, 'foo')
assert not os.path.isdir(foo)
def test_mkdir_bad_exists():
tmp = maketemp()
p = filesystem.copyonwrite.path(filesystem.path(tmp)).child('foo')
with p.open('w') as f:
f.write('bar')
e = assert_raises(
OSError,
p.mkdir,
)
eq(e.errno, errno.EEXIST)
| nailor/filesystem | filesystem/test/test_copyonwrite_mkdir.py | Python | mit | 686 |
import chimera.auth as auth
from chimera.auth import User
from flask import Blueprint, abort, redirect, render_template, request, flash, json, url_for
module = Blueprint('users', __name__, template_folder='templates')
def make_json(data, status=200, headers={}):
default_headers = {"Content-Type": "application/json"}
default_headers.update(headers)
return json.dumps(data), status, default_headers
@module.record_once
def on_load(state):
global config
config = state.app.config
@module.route('/')
@auth.permission_required('users:index')
def index():
return render_template('index.html', users=User.all_ids())
@module.route('/', methods=['POST'])
@module.route('/new')
@auth.permission_required('users:create')
def create():
if request.method == 'POST':
return "TODO"
else:
return render_template('new.html')
@module.route('/<id>')
@auth.login_required
def edit(id):
if id != auth.current_user.get_id() and not(auth.current_user.has_permission('users:show')):
flash("You don't have permission for that.", 'danger')
return redirect('/')
user = User.get(id)
if not(user.is_authenticated()):
return abort(404)
if auth.current_user.has_permission('users:edit'):
return render_template('edit.html', user=user)
else:
return render_template('show.html', user=user)
@module.route('/<id>', methods=['PUT'])
@auth.permission_required('users:edit')
def update(id):
return "update "+id
@module.route('/<id>', methods=['DELETE'])
@auth.permission_required('users:delete')
def delete(id):
return "delete "+id
| sjklein92/senior-design | chimera/users/__init__.py | Python | mit | 1,620 |
#FLM: Save Files for MakeInstances
###################################################
### THE VALUES BELOW CAN BE EDITED AS NEEDED ######
###################################################
kDefaultMMFontFileName = "mmfont.pfa"
kInstancesDataFileName = "instances"
kCompositeDataName = "temp.composite.dat"
###################################################
__copyright__ = """
Copyright 2014-2016 Adobe Systems Incorporated (http://www.adobe.com/). All Rights Reserved.
This software is licensed as OpenSource, under the Apache License, Version 2.0. This license is available at: http://opensource.org/licenses/Apache-2.0.
"""
__doc__ = """
Save Files for MakeInstances v2.0 - April 12 2016
This script will do part of the work to create a set of single-master fonts
("instances") from a Multiple Master (MM) FontLab font. It will save a
Type 1 MM font (needed by the makeInstances program) and, in some cases,
a text file named 'temp.composite.dat' that contains data related with
composite glyphs.
You must then run the makeInstances program to actually build the instance Type 1
fonts. makeInstances can remove working glyphs, and rename MM-exception glyphs.
It will also do overlap removal, and autohint the instance fonts. This last is
desirable, as autohinting which is specific to an instance font is usually
significantly better than the hinting from interpolating the MM font hints.
As always with overlap removal, you should check all affected glyphs - it
doesn't always do the right thing.
Note that the makeInstances program can be run alone, given an MM Type1 font
file. However, if you use the ExceptionSuffixes keyword, then you must run
this script first. The script will make a file that identifies composite glyphs,
and allows makeInstances to correctly substitute contours in the composite glyph
from the exception glyph. This is necessary because FontLab cannot write all the
composite glyphs as Type 1 composites (also known as SEAC glyphs). This script
must be run again to renew this data file whenever changes are made to composite
glyphs.
Both this script and the "makeInstances" program depend on info provided by an
external text file named "instances", which contains all the instance-specific
values. The "instances" file must be a simple text file, located in the same
folder as the MM FontLab file.
For information on how to format the "instances" file, please read the
documentation in the InstanceGenerator.py script.
==================================================
Versions:
v2.0 - Apr 12 2016 - Added step to fix the MM FontBBox values of the mmfont.pfa file,
when the VFB's UPM value is not 1000 (long-standing FontLab bug).
v1.0 - Feb 15 2010 - Initial release
"""
import copy
import re
import os
kFieldsKey = "#KEYS:"
kFamilyName = "FamilyName"
kFontName = "FontName"
kFullName = "FullName"
kWeight = "Weight"
kCoordsKey = "Coords"
kIsBoldKey = "IsBold" # This is changed to kForceBold in the instanceDict when reading in the instance file.
kForceBold = "ForceBold"
kIsItalicKey = "IsItalic"
kExceptionSuffixes = "ExceptionSuffixes"
kExtraGlyphs = "ExtraGlyphs"
kFixedFieldKeys = {
# field index: key name
0:kFamilyName,
1:kFontName,
2:kFullName,
3:kWeight,
4:kCoordsKey,
5:kIsBoldKey,
}
kNumFixedFields = len(kFixedFieldKeys)
kBlueScale = "BlueScale"
kBlueShift = "BlueShift"
kBlueFuzz = "BlueFuzz"
kBlueValues = "BlueValues"
kOtherBlues = "OtherBlues"
kFamilyBlues = "FamilyBlues"
kFamilyOtherBlues = "FamilyOtherBlues"
kStdHW = "StdHW"
kStdVW = "StdVW"
kStemSnapH = "StemSnapH"
kStemSnapV = "StemSnapV"
kAlignmentZonesKeys = [kBlueValues, kOtherBlues, kFamilyBlues, kFamilyOtherBlues]
kTopAlignZonesKeys = [kBlueValues, kFamilyBlues]
kMaxTopZonesSize = 14 # 7 zones
kBotAlignZonesKeys = [kOtherBlues, kFamilyOtherBlues]
kMaxBotZonesSize = 10 # 5 zones
kStdStemsKeys = [kStdHW, kStdVW]
kMaxStdStemsSize = 1
kStemSnapKeys = [kStemSnapH, kStemSnapV]
kMaxStemSnapSize = 12 # including StdStem
class ParseError(ValueError):
pass
def validateArrayValues(arrayList, valuesMustBePositive):
for i in range(len(arrayList)):
try:
arrayList[i] = eval(arrayList[i])
except (NameError, SyntaxError):
return
if valuesMustBePositive:
if arrayList[i] < 0:
return
return arrayList
def readInstanceFile(instancesFilePath):
f = open(instancesFilePath, "rt")
data = f.read()
f.close()
lines = data.splitlines()
i = 0
parseError = 0
keyDict = copy.copy(kFixedFieldKeys)
numKeys = kNumFixedFields
numLines = len(lines)
instancesList = []
for i in range(numLines):
line = lines[i]
# Skip over blank lines
line2 = line.strip()
if not line2:
continue
# Get rid of all comments. If we find a key definition comment line, parse it.
commentIndex = line.find('#')
if commentIndex >= 0:
if line.startswith(kFieldsKey):
if instancesList:
print "ERROR: Header line (%s) must preceed a data line." % kFieldsKey
raise ParseError
# parse the line with the field names.
line = line[len(kFieldsKey):]
line = line.strip()
keys = line.split('\t')
keys = map(lambda name: name.strip(), keys)
numKeys = len(keys)
k = kNumFixedFields
while k < numKeys:
keyDict[k] = keys[k]
k +=1
continue
else:
line = line[:commentIndex]
continue
# Must be a data line.
fields = line.split('\t')
fields = map(lambda datum: datum.strip(), fields)
numFields = len(fields)
if (numFields != numKeys):
print "ERROR: In line %s, the number of fields %s does not match the number of key names %s (FamilyName, FontName, FullName, Weight, Coords, IsBold)." % (i+1, numFields, numKeys)
parseError = 1
continue
instanceDict= {}
#Build a dict from key to value. Some kinds of values needs special processing.
for k in range(numFields):
key = keyDict[k]
field = fields[k]
if not field:
continue
if field in ["Default", "None", "FontBBox"]:
# FontBBox is no longer supported - I calculate the real
# instance fontBBox from the glyph metrics instead,
continue
if key == kFontName:
value = field
elif key in [kExtraGlyphs, kExceptionSuffixes]:
value = eval(field)
elif key in [kIsBoldKey, kIsItalicKey, kCoordsKey]:
try:
value = eval(field) # this works for all three fields.
if key == kIsBoldKey: # need to convert to Type 1 field key.
instanceDict[key] = value
# add kForceBold key.
key = kForceBold
if value == 1:
value = "true"
else:
value = "false"
elif key == kIsItalicKey:
if value == 1:
value = "true"
else:
value = "false"
elif key == kCoordsKey:
if type(value) == type(0):
value = (value,)
except (NameError, SyntaxError):
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
elif field[0] in ["[","{"]: # it is a Type 1 array value. Turn it into a list and verify that there's an even number of values for the alignment zones
value = field[1:-1].split() # Remove the begin and end brackets/braces, and make a list
if key in kAlignmentZonesKeys:
if len(value) % 2 != 0:
print "ERROR: In line %s, the %s field does not have an even number of values." % (i+1, key)
parseError = 1
continue
if key in kTopAlignZonesKeys: # The Type 1 spec only allows 7 top zones (7 pairs of values)
if len(value) > kMaxTopZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxTopZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kBotAlignZonesKeys: # The Type 1 spec only allows 5 top zones (5 pairs of values)
if len(value) > kMaxBotZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxBotZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kStdStemsKeys:
if len(value) > kMaxStdStemsSize:
print "ERROR: In line %s, the %s field can only have %d value." % (i+1, key, kMaxStdStemsSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
if key in kStemSnapKeys: # The Type 1 spec only allows 12 stem widths, including 1 standard stem
if len(value) > kMaxStemSnapSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxStemSnapSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
else:
# either a single number or a string.
if re.match(r"^[-.\d]+$", field):
value = field #it is a Type 1 number. Pass as is, as a string.
else:
value = field
instanceDict[key] = value
if (kStdHW in instanceDict and kStemSnapH not in instanceDict) or (kStdHW not in instanceDict and kStemSnapH in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdHW, kStemSnapH)
parseError = 1
elif (kStdHW in instanceDict and kStemSnapH in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapH][0] != instanceDict[kStdHW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapH, kStdHW)
parseError = 1
if (kStdVW in instanceDict and kStemSnapV not in instanceDict) or (kStdVW not in instanceDict and kStemSnapV in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdVW, kStemSnapV)
parseError = 1
elif (kStdVW in instanceDict and kStemSnapV in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapV][0] != instanceDict[kStdVW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapV, kStdVW)
parseError = 1
instancesList.append(instanceDict)
if parseError or len(instancesList) == 0:
raise(ParseError)
return instancesList
def saveCompositeInfo(fontMM, mmParentDir):
filePath = os.path.join(mmParentDir, kCompositeDataName)
numGlyphs = len(fontMM)
glyphDict = {}
numMasters = fontMM.glyphs[0].layers_number
for gid in range(numGlyphs):
glyph = fontMM.glyphs[gid]
lenComps = len(glyph.components)
if lenComps == 0:
continue
compList = []
glyphDict[glyph.name] = compList
numBaseContours = glyph.GetContoursNumber()
pathIndex = numBaseContours
for cpi in range(lenComps):
component = glyph.components[cpi]
compGlyph = fontMM.glyphs[component.index]
compName = compGlyph.name,
compEntry = [compName, numBaseContours + cpi]
metricsList = [None]*numMasters
seenAnyChange = 0
for mi in range(numMasters):
shift = component.deltas[mi]
scale = component.scales[mi]
shiftEntry = scaleEntry = None
if (shift.x != 0) or (shift.y != 0):
shiftEntry = (shift.x, shift.y)
if (scale.x != 1.0) or (scale.y !=1.0 ):
scaleEntry = (scale.x, scale.y)
if scaleEntry or shiftEntry:
metricsEntry = (shiftEntry, scaleEntry)
seenAnyChange = 1
else:
metricsEntry = None
metricsList[mi] = metricsEntry
compName = fontMM.glyphs[component.index].name
if seenAnyChange:
compList.append([compName, pathIndex, metricsList])
else:
compList.append([compName, pathIndex, None])
pathIndex += compGlyph.GetContoursNumber()
fp = open(filePath, "wt")
fp.write(repr(glyphDict))
fp.close()
def parseVals(valList):
valList = valList.split()
valList = map(eval, valList)
return valList
def fixFontBBox(data, pfaPath):
bboxMatch = re.search(r"/FontBBox\s*\{\{([^}]+)\}\s*\{([^}]+)\}\s*\{([^}]+)\}\s*\{([^}]+)\}\}", data)
if not bboxMatch:
print "Failed to find MM FontBBox %s" % pfaPath
return
pfaBBox = [bboxMatch.group(1), bboxMatch.group(2), bboxMatch.group(3), bboxMatch.group(4)]
pfaBBox = map(parseVals, pfaBBox)
print "Calculating correct MM FontBBox..."
mastersRange = range(fl.font.glyphs[0].layers_number)
flBBox = [[], [], [], []]
for i in range(4):
for m in mastersRange:
flBBox[i].append([])
c = 0
for flGlyph in fl.font.glyphs:
for m in mastersRange:
bbox = flGlyph.GetBoundingRect(m)
flBBox[0][m].append(bbox.ll.x)
flBBox[1][m].append(bbox.ll.y)
flBBox[2][m].append(bbox.ur.x)
flBBox[3][m].append(bbox.ur.y)
for m in mastersRange:
flBBox[0][m] = int( round( min(flBBox[0][m])) )
flBBox[1][m] = int( round( min(flBBox[1][m])) )
flBBox[2][m] = int( round( max(flBBox[2][m])) )
flBBox[3][m] = int( round( max(flBBox[3][m])) )
if pfaBBox == flBBox:
print "mmfont.pfa and fl.font have the same MM FontBBox values."
else:
matchGroups = bboxMatch.groups()
numGroups = 4 # by definition of regex above.
prefix = data[:bboxMatch.start(1)-1]
postfix = data[bboxMatch.end(4)+1:]
newString = []
for i in range(numGroups):
newString.append("{")
for m in mastersRange:
newString.append
newString.append("%s" % (flBBox[i][m]) )
newString.append("}")
newString = " ".join(newString)
data = prefix + newString + postfix
try:
fp = open(pfaPath, "wt")
fp.write(data)
fp.close()
print "Updated mmfont.pfa with correct MM FontBBox values."
except (OSError,IOError):
print "Failed to open and write %s" % pfaPath
def saveFiles():
try:
parentDir = os.path.dirname(os.path.abspath(fl.font.file_name))
except AttributeError:
print "The font has not been saved. Please save the font and try again."
return
instancesFilePath = os.path.join(parentDir, kInstancesDataFileName)
if not os.path.isfile(instancesFilePath):
print "Could not find the file named '%s' in the path below\n\t%s" % (kInstancesDataFileName, parentDir)
return
try:
print "Parsing instances file..."
instancesList = readInstanceFile(instancesFilePath)
except ParseError:
print "Error parsing file or file is empty."
return
# Set FontLab preferences
flPrefs = Options()
flPrefs.Load()
flPrefs.T1Terminal = 0 # so we don't have to close the dialog with each instance.
flPrefs.T1Encoding = 1 # always write Std Encoding.
flPrefs.T1Decompose = 1 # Do decompose SEAC chars
flPrefs.T1Autohint = 0 # Do not autohint unhinted chars
# Generate mmfont.pfa
pfaPath = os.path.join(parentDir, kDefaultMMFontFileName)
print "Saving Type 1 MM font file to:%s\t%s" % (os.linesep, pfaPath)
fl.GenerateFont(eval("ftTYPE1ASCII_MM"), pfaPath)
# Check if mmfont.pfa was indeed generated
if not os.path.exists(pfaPath):
print "Failed to find %s" % pfaPath
return
# Save the composite glyph data, but only if it's necessary
if (kExceptionSuffixes in instancesList[0] or kExtraGlyphs in instancesList[0]):
compositePath = os.path.join(parentDir, kCompositeDataName)
print "Saving composite glyphs data to:%s\t%s" % (os.linesep, compositePath)
saveCompositeInfo(fl.font, parentDir)
# Fix the FontBBox values if the font's UPM is not 1000
if fl.font.upm != 1000:
try:
fp = open(pfaPath, "rt")
data = fp.read()
fp.close()
except (OSError,IOError):
print "Failed to open and read %s" % pfaPath
return
fixFontBBox(data, pfaPath)
print "Done!"
def run():
global debug
if fl.count == 0:
print 'No font opened.'
return
if len(fl.font) == 0:
print 'The font has no glyphs.'
return
if fl.font[0].layers_number == 1:
print 'The font is not MM.'
return
else:
fl.output = ''
saveFiles()
if __name__ == "__main__":
run()
| shannpersand/cooper-type | _resources/FDK Adobe/Tools/FontLabMacros/MM Designs/SaveFilesForMakeInstances.py | Python | cc0-1.0 | 17,235 |
"""Tests for distutils.command.sdist."""
import os
import tarfile
import unittest
import warnings
import zipfile
from os.path import join
from textwrap import dedent
from test.test_support import captured_stdout, check_warnings, run_unittest
# zlib is not used here, but if it's not available
# the tests that use zipfile may fail
try:
import zlib
except ImportError:
zlib = None
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
from distutils.command.sdist import sdist, show_formats
from distutils.core import Distribution
from distutils.tests.test_config import PyPIRCCommandTestCase
from distutils.errors import DistutilsOptionError
from distutils.spawn import find_executable
from distutils.log import WARN
from distutils.filelist import FileList
from distutils.archive_util import ARCHIVE_FORMATS
SETUP_PY = """
from distutils.core import setup
import somecode
setup(name='fake')
"""
MANIFEST = """\
# file GENERATED by distutils, do NOT edit
README
buildout.cfg
inroot.txt
setup.py
data%(sep)sdata.dt
scripts%(sep)sscript.py
some%(sep)sfile.txt
some%(sep)sother_file.txt
somecode%(sep)s__init__.py
somecode%(sep)sdoc.dat
somecode%(sep)sdoc.txt
"""
class SDistTestCase(PyPIRCCommandTestCase):
def setUp(self):
# PyPIRCCommandTestCase creates a temp dir already
# and put it in self.tmp_dir
super(SDistTestCase, self).setUp()
# setting up an environment
self.old_path = os.getcwd()
os.mkdir(join(self.tmp_dir, 'somecode'))
os.mkdir(join(self.tmp_dir, 'dist'))
# a package, and a README
self.write_file((self.tmp_dir, 'README'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '__init__.py'), '#')
self.write_file((self.tmp_dir, 'setup.py'), SETUP_PY)
os.chdir(self.tmp_dir)
def tearDown(self):
# back to normal
os.chdir(self.old_path)
super(SDistTestCase, self).tearDown()
def get_cmd(self, metadata=None):
"""Returns a cmd"""
if metadata is None:
metadata = {'name': 'fake', 'version': '1.0',
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'}
dist = Distribution(metadata)
dist.script_name = 'setup.py'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.dist_dir = 'dist'
return dist, cmd
@unittest.skipUnless(zlib, "requires zlib")
def test_prune_file_list(self):
# this test creates a package with some vcs dirs in it
# and launch sdist to make sure they get pruned
# on all systems
# creating VCS directories with some files in them
os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
self.write_file((self.tmp_dir, 'somecode', '.svn', 'ok.py'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.hg'))
self.write_file((self.tmp_dir, 'somecode', '.hg',
'ok'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.git'))
self.write_file((self.tmp_dir, 'somecode', '.git',
'ok'), 'xxx')
# now building a sdist
dist, cmd = self.get_cmd()
# zip is available universally
# (tar might not be installed under win32)
cmd.formats = ['zip']
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything has been pruned correctly
self.assertEqual(len(content), 4)
@unittest.skipUnless(zlib, "requires zlib")
def test_make_distribution(self):
# check if tar and gzip are installed
if (find_executable('tar') is None or
find_executable('gzip') is None):
return
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar then a tar
cmd.formats = ['gztar', 'tar']
cmd.ensure_finalized()
cmd.run()
# making sure we have two files
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
os.remove(join(dist_folder, 'fake-1.0.tar'))
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
# now trying a tar then a gztar
cmd.formats = ['tar', 'gztar']
cmd.ensure_finalized()
cmd.run()
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
@unittest.skipUnless(zlib, "requires zlib")
def test_unicode_metadata_tgz(self):
"""
Unicode name or version should not break building to tar.gz format.
Reference issue #11638.
"""
# create the sdist command with unicode parameters
dist, cmd = self.get_cmd({'name': u'fake', 'version': u'1.0'})
# create the sdist as gztar and run the command
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
# The command should have created the .tar.gz file
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
self.assertEqual(result, ['fake-1.0.tar.gz'])
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
@unittest.skipUnless(zlib, "requires zlib")
def test_add_defaults(self):
# http://bugs.python.org/issue2279
# add_default should also include
# data_files and package_data
dist, cmd = self.get_cmd()
# filling data_files by pointing files
# in package_data
dist.package_data = {'': ['*.cfg', '*.dat'],
'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#')
# adding some data in data_files
data_dir = join(self.tmp_dir, 'data')
os.mkdir(data_dir)
self.write_file((data_dir, 'data.dt'), '#')
some_dir = join(self.tmp_dir, 'some')
os.mkdir(some_dir)
# make sure VCS directories are pruned (#14004)
hg_dir = join(self.tmp_dir, '.hg')
os.mkdir(hg_dir)
self.write_file((hg_dir, 'last-message.txt'), '#')
# a buggy regex used to prevent this from working on windows (#6884)
self.write_file((self.tmp_dir, 'buildout.cfg'), '#')
self.write_file((self.tmp_dir, 'inroot.txt'), '#')
self.write_file((some_dir, 'file.txt'), '#')
self.write_file((some_dir, 'other_file.txt'), '#')
dist.data_files = [('data', ['data/data.dt',
'buildout.cfg',
'inroot.txt',
'notexisting']),
'some/file.txt',
'some/other_file.txt']
# adding a script
script_dir = join(self.tmp_dir, 'scripts')
os.mkdir(script_dir)
self.write_file((script_dir, 'script.py'), '#')
dist.scripts = [join('scripts', 'script.py')]
cmd.formats = ['zip']
cmd.use_defaults = True
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything was added
self.assertEqual(len(content), 12)
# checking the MANIFEST
f = open(join(self.tmp_dir, 'MANIFEST'))
try:
manifest = f.read()
finally:
f.close()
self.assertEqual(manifest, MANIFEST % {'sep': os.sep})
@unittest.skipUnless(zlib, "requires zlib")
def test_metadata_check_option(self):
# testing the `medata-check` option
dist, cmd = self.get_cmd(metadata={})
# this should raise some warnings !
# with the `check` subcommand
cmd.ensure_finalized()
cmd.run()
warnings = [msg for msg in self.get_logs(WARN) if
msg.startswith('warning: check:')]
self.assertEqual(len(warnings), 2)
# trying with a complete set of metadata
self.clear_logs()
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.metadata_check = 0
cmd.run()
warnings = [msg for msg in self.get_logs(WARN) if
msg.startswith('warning: check:')]
self.assertEqual(len(warnings), 0)
def test_check_metadata_deprecated(self):
# makes sure make_metadata is deprecated
dist, cmd = self.get_cmd()
with check_warnings() as w:
warnings.simplefilter("always")
cmd.check_metadata()
self.assertEqual(len(w.warnings), 1)
def test_show_formats(self):
with captured_stdout() as stdout:
show_formats()
# the output should be a header line + one line per format
num_formats = len(ARCHIVE_FORMATS.keys())
output = [line for line in stdout.getvalue().split('\n')
if line.strip().startswith('--formats=')]
self.assertEqual(len(output), num_formats)
def test_finalize_options(self):
dist, cmd = self.get_cmd()
cmd.finalize_options()
# default options set by finalize
self.assertEqual(cmd.manifest, 'MANIFEST')
self.assertEqual(cmd.template, 'MANIFEST.in')
self.assertEqual(cmd.dist_dir, 'dist')
# formats has to be a string splitable on (' ', ',') or
# a stringlist
cmd.formats = 1
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.formats = ['zip']
cmd.finalize_options()
# formats has to be known
cmd.formats = 'supazipa'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
@unittest.skipUnless(zlib, "requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_make_distribution_owner_group(self):
# check if tar and gzip are installed
if (find_executable('tar') is None or
find_executable('gzip') is None):
return
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar and specifying the owner+group
cmd.formats = ['gztar']
cmd.owner = pwd.getpwuid(0)[0]
cmd.group = grp.getgrgid(0)[0]
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
# building a sdist again
dist, cmd = self.get_cmd()
# creating a gztar
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
# note that we are not testing the group ownership here
# because, depending on the platforms and the container
# rights (see #7408)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, os.getuid())
finally:
archive.close()
# the following tests make sure there is a nice error message instead
# of a traceback when parsing an invalid manifest template
def _check_template(self, content):
dist, cmd = self.get_cmd()
os.chdir(self.tmp_dir)
self.write_file('MANIFEST.in', content)
cmd.ensure_finalized()
cmd.filelist = FileList()
cmd.read_template()
warnings = self.get_logs(WARN)
self.assertEqual(len(warnings), 1)
def test_invalid_template_unknown_command(self):
self._check_template('taunt knights *')
def test_invalid_template_wrong_arguments(self):
# this manifest command takes one argument
self._check_template('prune')
@unittest.skipIf(os.name != 'nt', 'test relevant for Windows only')
def test_invalid_template_wrong_path(self):
# on Windows, trailing slashes are not allowed
# this used to crash instead of raising a warning: #8286
self._check_template('include examples/')
@unittest.skipUnless(zlib, "requires zlib")
def test_get_file_list(self):
# make sure MANIFEST is recalculated
dist, cmd = self.get_cmd()
# filling data_files by pointing files in package_data
dist.package_data = {'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(len(manifest), 5)
# adding a file
self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#')
# make sure build_py is reinitialized, like a fresh run
build_py = dist.get_command_obj('build_py')
build_py.finalized = False
build_py.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest2 = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
# do we have the new file in MANIFEST ?
self.assertEqual(len(manifest2), 6)
self.assertIn('doc2.txt', manifest2[-1])
@unittest.skipUnless(zlib, "requires zlib")
def test_manifest_marker(self):
# check that autogenerated MANIFESTs have a marker
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest[0],
'# file GENERATED by distutils, do NOT edit')
@unittest.skipUnless(zlib, 'requires zlib')
def test_manifest_comments(self):
# make sure comments don't cause exceptions or wrong includes
contents = dedent("""\
# bad.py
#bad.py
good.py
""")
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), contents)
self.write_file((self.tmp_dir, 'good.py'), '# pick me!')
self.write_file((self.tmp_dir, 'bad.py'), "# don't pick me!")
self.write_file((self.tmp_dir, '#bad.py'), "# don't pick me!")
cmd.run()
self.assertEqual(cmd.filelist.files, ['good.py'])
@unittest.skipUnless(zlib, "requires zlib")
def test_manual_manifest(self):
# check that a MANIFEST without a marker is left alone
dist, cmd = self.get_cmd()
cmd.formats = ['gztar']
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), 'README.manual')
self.write_file((self.tmp_dir, 'README.manual'),
'This project maintains its MANIFEST file itself.')
cmd.run()
self.assertEqual(cmd.filelist.files, ['README.manual'])
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest, ['README.manual'])
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
filenames = [tarinfo.name for tarinfo in archive]
finally:
archive.close()
self.assertEqual(sorted(filenames), ['fake-1.0', 'fake-1.0/PKG-INFO',
'fake-1.0/README.manual'])
def test_suite():
return unittest.makeSuite(SDistTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/distutils/tests/test_sdist.py | Python | epl-1.0 | 16,944 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''CMU dict file parser
Copyright (C) 2010
Yosuke Matsusaka
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the Eclipse Public License -v 1.0 (EPL)
http://www.opensource.org/licenses/eclipse-1.0.txt
'''
class CMUDict:
""" Utility class to parse CMU Pronunciation Dictionaly."""
def __init__(self, fname):
self._fname = fname
self._dict = {}
self.parse(self._fname)
def parse(self, fname):
f = open(fname, 'r')
f.readline()
for l in f:
t = l.strip().split(' ', 2)
w = t[0].strip('()"')
v = t[2].replace('(', '').replace(')', '').replace(' 0', '').replace(' 1', '')
try:
self._dict[w].append(v)
except KeyError:
self._dict[w] = [v,]
def lookup(self, w):
try:
return self._dict[w]
except KeyError:
return []
if __name__ == '__main__':
doc = CMUDict('/usr/share/festival/dicts/cmu/cmudict-0.4.out')
print doc.lookup('hello')
| yosuke/OpenHRIVoice | openhrivoice/parsecmudict.py | Python | epl-1.0 | 1,210 |
# -*- coding: utf-8 -*-
#
# (C) Pywikipedia bot team, 2004-2013
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: 255cc3dc08b2410093f318dc48471c6f3ff691e5 $'
import re
import urllib
from datetime import timedelta, datetime
import config
import wikipedia as pywikibot
# Parent class for all wiki families
class Family(object):
def __init__(self):
if not hasattr(self, 'name'):
self.name = None
# For interwiki sorting order see
# http://meta.wikimedia.org/wiki/Interwiki_sorting_order
# The sorting order by language name from meta
# MediaWiki:Interwiki_config-sorting_order-native-languagename
self.alphabetic = [
'ace', 'kbd', 'af', 'ak', 'als', 'am', 'ang', 'ab', 'ar', 'an',
'arc', 'roa-rup', 'frp', 'as', 'ast', 'gn', 'av', 'ay', 'az', 'bm',
'bn', 'bjn', 'zh-min-nan', 'nan', 'map-bms', 'ba', 'be', 'be-x-old',
'bh', 'bcl', 'bi', 'bg', 'bar', 'bo', 'bs', 'br', 'bxr', 'ca', 'cv',
'ceb', 'cs', 'ch', 'cbk-zam', 'ny', 'sn', 'tum', 'cho', 'co', 'cy',
'da', 'dk', 'pdc', 'de', 'dv', 'nv', 'dsb', 'dz', 'mh', 'et', 'el',
'eml', 'en', 'myv', 'es', 'eo', 'ext', 'eu', 'ee', 'fa', 'hif',
'fo', 'fr', 'fy', 'ff', 'fur', 'ga', 'gv', 'gag', 'gd', 'gl', 'gan',
'ki', 'glk', 'gu', 'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy',
'hi', 'ho', 'hsb', 'hr', 'io', 'ig', 'ilo', 'bpy', 'id', 'ia', 'ie',
'iu', 'ik', 'os', 'xh', 'zu', 'is', 'it', 'he', 'jv', 'kl', 'kn',
'kr', 'pam', 'krc', 'ka', 'ks', 'csb', 'kk', 'kw', 'rw', 'rn', 'sw',
'kv', 'kg', 'ht', 'ku', 'kj', 'ky', 'mrj', 'lad', 'lbe', 'lez',
'lo', 'ltg', 'la', 'lv', 'lb', 'lt', 'lij', 'li', 'ln', 'jbo', 'lg',
'lmo', 'hu', 'mk', 'mg', 'ml', 'mt', 'mi', 'mr', 'xmf', 'arz',
'mzn', 'ms', 'min', 'cdo', 'mwl', 'mdf', 'mo', 'mn', 'mus', 'my',
'nah', 'na', 'fj', 'nl', 'nds-nl', 'cr', 'ne', 'new', 'ja', 'nap',
'ce', 'frr', 'pih', 'no', 'nb', 'nn', 'nrm', 'nov', 'ii', 'oc',
'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa', 'pi', 'pfl', 'pag',
'pnb', 'pap', 'ps', 'koi', 'km', 'pcd', 'pms', 'tpi', 'nds', 'pl',
'tokipona', 'tp', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh',
'ro', 'rmy', 'rm', 'qu', 'rue', 'ru', 'sah', 'se', 'sm', 'sa', 'sg',
'sc', 'sco', 'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple',
'sd', 'ss', 'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh',
'su', 'fi', 'sv', 'tl', 'ta', 'shi', 'kab', 'roa-tara', 'tt', 'te',
'tet', 'th', 'ti', 'tg', 'to', 'chr', 'chy', 've', 'tr', 'tk', 'tw',
'udm', 'bug', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vi', 'vo',
'fiu-vro', 'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu', 'ts',
'yi', 'yo', 'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw',
'zh-cn',
]
# The revised sorting order by first word from meta
# MediaWiki:Interwiki_config-sorting_order-native-languagename-firstword
self.alphabetic_revised = [
'ace', 'kbd', 'af', 'ak', 'als', 'am', 'ang', 'ab', 'ar', 'an',
'arc', 'roa-rup', 'frp', 'as', 'ast', 'gn', 'av', 'ay', 'az', 'bjn',
'id', 'ms', 'bm', 'bn', 'zh-min-nan', 'nan', 'map-bms', 'jv', 'su',
'ba', 'min', 'be', 'be-x-old', 'bh', 'bcl', 'bi', 'bar', 'bo', 'bs',
'br', 'bug', 'bg', 'bxr', 'ca', 'ceb', 'cv', 'cs', 'ch', 'cbk-zam',
'ny', 'sn', 'tum', 'cho', 'co', 'cy', 'da', 'dk', 'pdc', 'de', 'dv',
'nv', 'dsb', 'na', 'dz', 'mh', 'et', 'el', 'eml', 'en', 'myv', 'es',
'eo', 'ext', 'eu', 'ee', 'fa', 'hif', 'fo', 'fr', 'fy', 'ff', 'fur',
'ga', 'gv', 'sm', 'gag', 'gd', 'gl', 'gan', 'ki', 'glk', 'gu',
'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy', 'hi', 'ho', 'hsb',
'hr', 'io', 'ig', 'ilo', 'bpy', 'ia', 'ie', 'iu', 'ik', 'os', 'xh',
'zu', 'is', 'it', 'he', 'kl', 'kn', 'kr', 'pam', 'ka', 'ks', 'csb',
'kk', 'kw', 'rw', 'ky', 'rn', 'mrj', 'sw', 'kv', 'kg', 'ht', 'ku',
'kj', 'lad', 'lbe', 'lez', 'lo', 'la', 'ltg', 'lv', 'to', 'lb',
'lt', 'lij', 'li', 'ln', 'jbo', 'lg', 'lmo', 'hu', 'mk', 'mg', 'ml',
'krc', 'mt', 'mi', 'mr', 'xmf', 'arz', 'mzn', 'cdo', 'mwl', 'koi',
'mdf', 'mo', 'mn', 'mus', 'my', 'nah', 'fj', 'nl', 'nds-nl', 'cr',
'ne', 'new', 'ja', 'nap', 'ce', 'frr', 'pih', 'no', 'nb', 'nn',
'nrm', 'nov', 'ii', 'oc', 'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa',
'pi', 'pfl', 'pag', 'pnb', 'pap', 'ps', 'km', 'pcd', 'pms', 'nds',
'pl', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh', 'ro', 'rmy',
'rm', 'qu', 'ru', 'rue', 'sah', 'se', 'sa', 'sg', 'sc', 'sco',
'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple', 'sd', 'ss',
'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh', 'fi', 'sv',
'tl', 'ta', 'shi', 'kab', 'roa-tara', 'tt', 'te', 'tet', 'th', 'vi',
'ti', 'tg', 'tpi', 'tokipona', 'tp', 'chr', 'chy', 've', 'tr', 'tk',
'tw', 'udm', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vo', 'fiu-vro',
'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu', 'ts', 'yi', 'yo',
'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw', 'zh-cn',
]
# Order for fy: alphabetical by code, but y counts as i
def fycomp(x, y):
x = x.replace("y", "i") + x.count("y") * "!"
y = y.replace("y", "i") + y.count("y") * "!"
return cmp(x, y)
self.fyinterwiki = self.alphabetic[:]
self.fyinterwiki.remove('nb')
self.fyinterwiki.sort(fycomp)
self.langs = {}
# The timedelta to GMT of the server.
# Exemple for a server running CET :
# timedelta(hours=+1)
self.servergmtoffset = timedelta()
# Translation used on all wikis for the different namespaces.
# (Please sort languages alphabetically)
# You only need to enter translations that differ from _default.
self.namespaces = {
-2: {
'_default': u'Media',
'ab': [u'Амедиа', u'Медиа'],
'ace': u'Alat',
'ak': u'Medya',
'als': u'Medium',
'am': u'ፋይል',
'ar': [u'ميديا', u'وسائط'],
'arc': u'ܡܝܕܝܐ',
'arz': [u'ميديا', u'وسائط'],
'as': u'মাধ্যম',
'ast': u'Medios',
'av': u'Медиа',
'ay': u'Medio',
'az': [u'Media', u'Mediya'],
'ba': u'Медиа',
'bar': [u'Media', u'Medium'],
'bat-smg': u'Medėjė',
'bcl': u'Medio',
'be': u'Мультымедыя',
'be-x-old': u'Мэдыя',
'bg': u'Медия',
'bh': u'मीडिया',
'bm': u'Média',
'bn': u'মিডিয়া',
'bpy': u'মিডিয়া',
'bs': [u'Mediji', u'Medija'],
'bxr': u'Меди',
'cbk-zam': u'Medio',
'ce': [u'Медиа', u'Медйа'],
'ceb': u'Medya',
'ckb': u'میدیا',
'crh': [u'Media', u'Медиа'],
'cs': u'Média',
'cu': u'Срѣдьства',
'cv': u'Медиа',
'de': u'Medium',
'diq': u'Medya',
'dsb': u'Medija',
'dv': u'މީޑިއާ',
'el': [u'Μέσο', u'Μέσον'],
'eo': u'Aŭdvidaĵo',
'es': u'Medio',
'et': u'Meedia',
'fa': [u'مدیا', u'رسانه', u'رسانهای'],
'ff': u'Média',
'fiu-vro': u'Meediä',
'fo': u'Miðil',
'fr': u'Média',
'frp': u'Mèdia',
'frr': u'Medium',
'ga': u'Meán',
'gag': [u'Mediya', u'Medya'],
'gan': u'媒體',
'gd': u'Meadhan',
'glk': [u'مدیا', u'رسانه', u'رسانهای'],
'gn': u'Medio',
'gu': u'દ્રશ્ય-શ્રાવ્ય (મિડિયા)',
'gv': u'Meanyn',
'haw': u'Pāpaho',
'he': u'מדיה',
'hi': u'मीडिया',
'hif': u'saadhan',
'hr': u'Mediji',
'ht': u'Medya',
'hu': u'Média',
'hy': u'Մեդիա',
'ia': u'Multimedia',
'ig': [u'Midia', u'Nká'],
'ilo': u'Midia',
'is': u'Miðill',
'ja': u'メディア',
'ka': u'მედია',
'kaa': [u'Media', u'Таспа', u'تاسپا'],
'kbd': u'Медиа',
'kk': [u'Таспа', u'Taspa', u'تاسپا'],
'km': [u'មេឌា', u'មីឌា'],
'kn': u'ಮೀಡಿಯ',
'ko': u'미디어',
'koi': u'Медиа',
'krc': u'Медиа',
'ks': u'میڈیا',
'ksh': [u'Medie', u'Medium', u'Meedije', u'Meedijum'],
'ku': u'Medya',
'kv': u'Медиа',
'ky': u'Медиа',
'lad': [u'Medya', u'Meddia'],
'lbe': u'Медиа',
'lez': u'Медиа',
'ln': u'Média',
'lo': [u'ສື່', u'ສື່ອ'],
'lt': u'Medija',
'ltg': u'Medeja',
'mdf': u'Медиа',
'mg': [u'Rakitra', u'Média'],
'mhr': u'Медиа',
'mk': [u'Медиум', u'Медија'],
'ml': u'മീഡിയ',
'mn': u'Медиа',
'mr': u'मिडिया',
'mrj': u'Медиа',
'mt': [u'Medja', u'Midja'],
'myv': u'Медия',
'mzn': [u'مدیا', u'مهدیا'],
'nah': u'Mēdiatl',
'ne': u'मीडिया',
'new': u'माध्यम',
'nn': u'Filpeikar',
'no': u'Medium',
'oc': u'Mèdia',
'or': u'ମାଧ୍ୟମ',
'os': u'Медиа',
'pa': u'ਮੀਡੀਆ',
'pcd': u'Média',
'pdc': u'Medium',
'pfl': u'Medium',
'pi': u'मीडिया',
'pnt': u'Μέσον',
'ps': u'رسنۍ',
'pt': u'Multimédia',
'qu': u'Midya',
'rm': u'Multimedia',
'rmy': u'Mediya',
'ru': u'Медиа',
'rue': u'Медіа',
'sa': [u'माध्यमम्', u'माध्यम'],
'sah': u'Медиа',
'scn': u'Mèdia',
'sd': u'ذريعات',
'sg': u'Média',
'si': u'මාධ්යය',
'sk': u'Médiá',
'sl': u'Datoteka',
'sr': [u'Медиј', u'Medija', u'Медија'],
'su': u'Média',
'sw': u'Faili',
'ta': u'ஊடகம்',
'te': u'మీడియా',
'tg': u'Медиа',
'th': u'สื่อ',
'tl': u'Midya',
'tlh': u'Doch',
'tr': [u'Ortam', u'Medya'],
'tt': u'Медиа',
'ty': u'Média',
'udm': u'Медиа',
'ug': u'ۋاسىتە',
'uk': u'Медіа',
'ur': [u'وسیط', u'زریعہ'],
'uz': [u'Media', u'Mediya'],
'vi': u'Phương tiện',
'vo': u'Nünamakanäd',
'war': u'Medya',
'wo': u'Xibaarukaay',
'wuu': [u'媒体文件', u'媒体'],
'xal': u'Аһар',
'xmf': u'მედია',
'yi': u'מעדיע',
'yo': u'Amóhùnmáwòrán',
'za': [u'媒体文件', u'媒体'],
'zh': [u'Media', u'媒体', u'媒體'],
'zh-yue': [u'Media', u'媒體', u'媒体'],
},
-1: {
'_default': u'Special',
'ab': [u'Цастәи', u'Служебная'],
'ace': [u'Kusuih', u'Istimewa'],
'af': u'Spesiaal',
'ak': u'Spesial',
'als': u'Spezial',
'am': u'ልዩ',
'an': [u'Especial', u'Espezial'],
'ang': u'Syndrig',
'ar': u'خاص',
'arc': u'ܕܝܠܢܝܐ',
'arz': u'خاص',
'as': [u'বিশেষ', u'विशेष'],
'ast': u'Especial',
'av': u'Служебная',
'ay': u'Especial',
'az': u'Xüsusi',
'ba': [u'Махсус', u'Ярҙамсы'],
'bar': u'Spezial',
'bat-smg': [u'Specēlos', u'Specialus'],
'bcl': u'Espesyal',
'be': u'Адмысловае',
'be-x-old': u'Спэцыяльныя',
'bg': u'Специални',
'bh': u'विशेष',
'bjn': [u'Istimiwa', u'Istimewa'],
'bm': u'Spécial',
'bn': u'বিশেষ',
'bpy': u'বিশেষ',
'br': u'Dibar',
'bs': u'Posebno',
'bug': u'Istimewa',
'bxr': u'Тусхай',
'ca': u'Especial',
'cbk-zam': u'Especial',
'ce': [u'Белхан', u'Башхо'],
'ceb': u'Espesyal',
'ch': u'Espesiat',
'ckb': u'تایبەت',
'crh': [u'Mahsus', u'Махсус'],
'cs': u'Speciální',
'csb': u'Specjalnô',
'cu': u'Нарочьна',
'cv': u'Ятарлă',
'cy': u'Arbennig',
'da': u'Speciel',
'de': u'Spezial',
'diq': u'Xısusi',
'dsb': u'Specialne',
'dv': [u'ޚާއްސަ', u'ހާއްޞަ'],
'el': u'Ειδικό',
'eml': u'Speciale',
'eo': [u'Specialaĵo', u'Speciala'],
'es': u'Especial',
'et': u'Eri',
'eu': [u'Berezi', u'Aparteko'],
'fa': u'ویژه',
'ff': u'Spécial',
'fi': u'Toiminnot',
'fiu-vro': u'Tallituslehekülg',
'fo': [u'Serstakt', u'Serstakur'],
'fr': u'Spécial',
'frp': u'Spèciâl',
'frr': u'Spezial',
'fur': u'Speciâl',
'fy': u'Wiki',
'ga': u'Speisialta',
'gag': [u'Maasus', u'Özel'],
'gan': u'特別',
'gd': u'Sònraichte',
'gl': u'Especial',
'glk': u'ویژه',
'gn': u"Mba'echĩchĩ",
'gu': u'વિશેષ',
'gv': u'Er lheh',
'haw': u'Papa nui',
'he': u'מיוחד',
'hi': u'विशेष',
'hif': u'khaas',
'hr': u'Posebno',
'hsb': u'Specialnje',
'ht': u'Espesyal',
'hu': u'Speciális',
'hy': u'Սպասարկող',
'id': u'Istimewa',
'ig': u'Ihü kárírí',
'ilo': u'Espesial',
'io': u'Specala',
'is': u'Kerfissíða',
'it': u'Speciale',
'ja': u'特別',
'jv': u'Astamiwa',
'ka': u'სპეციალური',
'kaa': [u'Arnawlı', u'Арнайы', u'ارنايى'],
'kab': u'Uslig',
'kbd': u'Служебная',
'kk': [u'Арнайы', u'Arnaýı', u'ارنايى'],
'kl': [u'Immikkut', u'Speciel'],
'km': u'ពិសេស',
'kn': u'ವಿಶೇಷ',
'ko': [u'특수기능', u'특'],
'koi': u'Служебная',
'krc': u'Къуллукъ',
'ks': u'خاص',
'ksh': [u'Extra', u'Spezial', u'Shpezjal'],
'ku': u'Taybet',
'kv': [u'Отсасян', u'Служебная'],
'kw': [u'Arbednek', u'Arbennek'],
'ky': u'Атайын',
'la': u'Specialis',
'lad': u'Especial',
'lb': u'Spezial',
'lbe': u'Къуллугъирал лажин',
'lez': u'Служебная',
'li': u'Speciaal',
'lij': [u'Speçiale', u'Speciale'],
'lmo': [u'Special', u'Speciale'],
'ln': u'Spécial',
'lo': u'ພິເສດ',
'lt': u'Specialus',
'ltg': u'Seviškuo',
'map-bms': u'Astamiwa',
'mdf': [u'Башка', u'Служебная'],
'mg': u'Manokana',
'mhr': [u'Лӱмын ыштыме', u'Служебная'],
'min': u'Istimewa',
'mk': [u'Специјална', u'Специјални'],
'ml': [u'പ്രത്യേകം', u'പ്രത്യേ'],
'mn': u'Тусгай',
'mr': u'विशेष',
'mrj': u'Спецӹлӹштӓш',
'ms': [u'Khas', u'Istimewa'],
'mt': u'Speċjali',
'mwl': [u'Special', u'Especial'],
'myv': u'Башка тевень',
'mzn': [u'شا', u'ویژه'],
'nah': [u'Nōncuahquīzqui', u'Especial'],
'nap': [u'Speciàle', u'Speciale'],
'nds': u'Spezial',
'nds-nl': [u'Spesiaal', u'Speciaol', u'Speciaal'],
'ne': u'विशेष',
'new': u'विशेष',
'nl': u'Speciaal',
'nn': u'Spesial',
'no': u'Spesial',
'oc': u'Especial',
'or': u'ବିଶେଷ',
'os': u'Сæрмагонд',
'pa': [u'ਖ਼ਾਸ', u'ਖਾਸ'],
'pcd': u'Spécial',
'pdc': u'Spezial',
'pfl': [u'Schbezial', u'Spezial'],
'pi': u'विसेस',
'pl': u'Specjalna',
'pnt': u'Ειδικόν',
'ps': u'ځانګړی',
'pt': u'Especial',
'qu': u'Sapaq',
'rm': u'Spezial',
'rmy': u'Uzalutno',
'ru': u'Служебная',
'rue': u'Шпеціална',
'sa': [u'विशेषम्', u'विशेष'],
'sah': u'Аналлаах',
'sc': [u'Ispetziale', u'Speciale'],
'scn': u'Spiciali',
'sd': u'خاص',
'se': [u'Erenoamáš', u'Doaimmat'],
'sg': u'Spécial',
'sh': u'Posebno',
'si': u'විශේෂ',
'sk': u'Špeciálne',
'sl': u'Posebno',
'sq': u'Speciale',
'sr': [u'Посебно', u'Posebno'],
'srn': [u'Spesyal', u'Speciaal'],
'stq': u'Spezial',
'su': u'Husus',
'sw': u'Maalum',
'szl': [u'Szpecyjalna', u'Specjalna'],
'ta': u'சிறப்பு',
'te': u'ప్రత్యేక',
'tet': u'Espesiál',
'tg': u'Вижа',
'th': u'พิเศษ',
'tk': u'Ýörite',
'tl': u'Natatangi',
'tlh': u"le'",
'tpi': u'Sipesol',
'tr': u'Özel',
'tt': [u'Махсус', u'Служебная', u'Maxsus'],
'ty': u'Spécial',
'udm': u'Панель',
'ug': u'ئالاھىدە',
'uk': [u'Спеціальна', u'Спеціальні'],
'ur': u'خاص',
'uz': u'Maxsus',
'vec': u'Speciale',
'vep': u'Specialine',
'vi': u'Đặc biệt',
'vls': u'Specioal',
'vo': u'Patikos',
'wa': u'Sipeciås',
'war': u'Pinaurog',
'wo': u'Jagleel',
'wuu': u'特殊',
'xal': [u'Көдлхнә', u'Көдлхнə'],
'xmf': u'სპეციალური',
'yi': [u'באַזונדער', u'באזונדער'],
'yo': u'Pàtàkì',
'za': u'特殊',
'zea': u'Speciaol',
'zh': [u'Special', u'特殊'],
'zh-yue': [u'Special', u'特別', u'特殊'],
},
0: {
'_default': None,
},
1: {
'_default': u'Talk',
'ab': [u'Ахцәажәара', u'Обсуждение'],
'ace': [u'Marit', u'Pembicaraan', u'Bicara'],
'af': u'Bespreking',
'ak': u'Nkɔmbɔ',
'als': u'Diskussion',
'am': u'ውይይት',
'an': u'Descusión',
'ang': u'Gesprec',
'ar': u'نقاش',
'arc': u'ܡܡܠܠܐ',
'arz': u'نقاش',
'as': [u'বাৰ্তা', u'वार्ता', u'বার্তা'],
'ast': [u'Alderique', u'Discusión'],
'av': u'Обсуждение',
'ay': u'Discusión',
'az': u'Müzakirə',
'ba': [u'Фекерләшеү', u'Фекер алышыу'],
'bar': [u'Dischkrian', u'Diskussion'],
'bat-smg': [u'Aptarėms', u'Aptarimas'],
'bcl': u'Olay',
'be': u'Размовы',
'be-x-old': u'Абмеркаваньне',
'bg': u'Беседа',
'bh': u'वार्तालाप',
'bjn': [u'Pamandiran', u'Pembicaraan'],
'bm': [u'Discussion', u'Discuter'],
'bn': u'আলাপ',
'bpy': u'য়্যারী',
'br': u'Kaozeal',
'bs': u'Razgovor',
'bug': [u'Pembicaraan', u'Bicara'],
'bxr': [u'Хэлэлсэхэ', u'Обсуждение'],
'ca': u'Discussió',
'cbk-zam': u'Discusión',
'ce': [u'Дийцаре', u'Дийца'],
'ceb': u'Hisgot',
'ch': u'Kombetsasion',
'ckb': [u'وتووێژ', u'لێدوان'],
'crh': [u'Muzakere', u'Музакере'],
'cs': u'Diskuse',
'csb': u'Diskùsëjô',
'cu': [u'Бєсѣда', u'Бесѣда'],
'cv': u'Сӳтсе явасси',
'cy': u'Sgwrs',
'da': u'Diskussion',
'de': u'Diskussion',
'diq': [u'Mesac', u'Werênayış'],
'dsb': u'Diskusija',
'dv': u'ޚިޔާލު',
'el': u'Συζήτηση',
'eml': u'Discussione',
'eo': u'Diskuto',
'es': u'Discusión',
'et': u'Arutelu',
'eu': u'Eztabaida',
'fa': u'بحث',
'ff': [u'Discussion', u'Discuter'],
'fi': u'Keskustelu',
'fiu-vro': u'Arotus',
'fo': u'Kjak',
'fr': [u'Discussion', u'Discuter'],
'frp': [u'Discussion', u'Discutar'],
'frr': u'Diskussion',
'fur': u'Discussion',
'fy': u'Oerlis',
'ga': u'Plé',
'gag': [u'Dartışma', u'Tartışma'],
'gan': u'談詑',
'gd': u'Deasbaireachd',
'gl': u'Conversa',
'glk': u'بحث',
'gn': u'Myangekõi',
'gu': u'ચર્ચા',
'gv': u'Resooney',
'haw': u'Kūkākūkā',
'he': u'שיחה',
'hi': u'वार्ता',
'hif': u'baat',
'hr': u'Razgovor',
'hsb': u'Diskusija',
'ht': u'Diskite',
'hu': u'Vita',
'hy': u'Քննարկում',
'ia': u'Discussion',
'id': [u'Pembicaraan', u'Bicara'],
'ie': u'Discussion',
'ig': u'Okwu',
'ilo': u'Tungtungan',
'io': u'Debato',
'is': u'Spjall',
'it': u'Discussione',
'ja': [u'トーク', u'ノート'],
'jv': u'Dhiskusi',
'ka': u'განხილვა',
'kaa': [u'Sa\'wbet', u'Талқылау', u'تالقىلاۋ'],
'kab': u'Mmeslay',
'kbd': [u'Тепсэлъэхьыгъуэ', u'Обсуждение'],
'kg': u'Disolo',
'kk': [u'Талқылау', u'Talqılaw', u'تالقىلاۋ'],
'kl': [u'Oqallinneq', u'Diskussion'],
'km': [u'ការពិភាក្សា', u'ពិភាក្សា'],
'kn': u'ಚರ್ಚೆಪುಟ',
'ko': u'토론',
'koi': u'Обсуждение',
'krc': u'Сюзюу',
'ks': u'بَحَژ',
'ksh': u'Klaaf',
'ku': [u'Gotûbêj', u'Nîqaş'],
'kv': [u'Сёрнитанiн', u'Обсуждение'],
'kw': [u'Keskows', u'Cows', u'Kescows'],
'ky': u'Баарлашуу',
'la': u'Disputatio',
'lad': [u'Diskusyón', u'Diskussión', u'Discusión'],
'lb': u'Diskussioun',
'lbe': u'Ихтилат',
'lez': [u'веревирд авун', u'Обсуждение'],
'li': u'Euverlèk',
'lij': [u'Discûscion', u'Discussione'],
'lmo': [u'Ciciarada', u'Discussione'],
'ln': [u'Discussion', u'Discuter'],
'lo': u'ສົນທະນາ',
'lt': u'Aptarimas',
'ltg': u'Sprīža',
'lv': u'Diskusija',
'map-bms': u'Dhiskusi',
'mdf': [u'Корхнема', u'Обсуждение'],
'mg': [u'Dinika', u'Discuter'],
'mhr': [u'Каҥашымаш', u'Обсуждение'],
'min': [u'Maota', u'Pembicaraan'],
'mk': u'Разговор',
'ml': [u'സംവാദം', u'സം'],
'mn': u'Хэлэлцүүлэг',
'mr': u'चर्चा',
'mrj': u'Кӓнгӓшӹмӓш',
'ms': [u'Perbincangan', u'Perbualan'],
'mt': [u'Diskussjoni', u'Diskuti'],
'mwl': [u'Cumbersa', u'Discussão'],
'myv': u'Кортамо',
'mzn': [u'گپ', u'بحث'],
'nah': [u'Tēixnāmiquiliztli', u'Discusión'],
'nap': [u'Chiàcchiera', u'Discussione'],
'nds': [u'Diskuschoon', u'Diskussion'],
'nds-nl': u'Overleg',
'ne': u'वार्तालाप',
'new': u'खँलाबँला',
'nl': u'Overleg',
'nn': u'Diskusjon',
'no': u'Diskusjon',
'nso': u'Bolediša',
'nv': u'Naaltsoos baa yáshtiʼ',
'oc': u'Discutir',
'or': u'ଆଲୋଚନା',
'os': [u'Тæрхон', u'Дискусси'],
'pa': [u'ਗੱਲ-ਬਾਤ', u'ਚਰਚਾ'],
'pcd': [u'Discussion', u'Discuter'],
'pdc': [u'Dischbedutt', u'Diskussion'],
'pfl': [u'Babble', u'Diskussion'],
'pi': u'सम्भासित',
'pl': u'Dyskusja',
'pms': u'Discussion',
'pnt': u'Καλάτσεμαν',
'ps': u'خبرې اترې',
'pt': u'Discussão',
'qu': u'Rimanakuy',
'rm': u'Discussiun',
'rmy': [u'Vakyarimata', u'Discuţie'],
'ro': [u'Discuție', u'Discuţie'],
'ru': u'Обсуждение',
'rue': [u'Діскузія', u'Діскузіа'],
'sa': [u'सम्भाषणम्', u'संभाषणं'],
'sah': u'Ырытыы',
'sc': [u'Cuntierra', u'Contièndha'],
'scn': u'Discussioni',
'sd': u'بحث',
'se': u'Ságastallan',
'sg': [u'Discussion', u'Discuter'],
'sh': u'Razgovor',
'si': u'සාකච්ඡාව',
'sk': [u'Diskusia', u'Komentár'],
'sl': u'Pogovor',
'sq': u'Diskutim',
'sr': [u'Разговор', u'Razgovor'],
'srn': [u'Taki', u'Overleg'],
'stq': u'Diskussion',
'su': u'Obrolan',
'sv': u'Diskussion',
'sw': u'Majadiliano',
'szl': [u'Dyskusyjo', u'Dyskusja'],
'ta': u'பேச்சு',
'te': u'చర్చ',
'tet': u'Diskusaun',
'tg': u'Баҳс',
'th': u'พูดคุย',
'tk': u'Çekişme',
'tl': u'Usapan',
'tlh': u"ja'chuq",
'tpi': u'Toktok',
'tr': u'Tartışma',
'tt': [u'Бәхәс', u'Обсуждение', u'Фикер алышу', u'Bäxäs'],
'ty': [u'Discussion', u'Discuter'],
'udm': u'Вераськон',
'ug': u'مۇنازىرە',
'uk': u'Обговорення',
'ur': u'تبادلۂ خیال',
'uz': u'Munozara',
'vec': u'Discussion',
'vep': u'Lodu',
'vi': u'Thảo luận',
'vls': u'Discuusje',
'vo': u'Bespik',
'wa': [u'Copene', u'Discuter'],
'war': u'Hiruhimangraw',
'wo': [u'Waxtaan', u'Discuter'],
'wuu': [u'讨论', u'对话'],
'xal': [u'Меткән', u'Ухалвр'],
'xmf': u'განხილვა',
'yi': u'רעדן',
'yo': u'Ọ̀rọ̀',
'za': [u'讨论', u'对话'],
'zea': u'Overleg',
'zh': [u'Talk', u'讨论', u'討論', u'对话', u'對話'],
'zh-yue': [u'Talk', u'傾偈', u'對話', u'对话', u'討論', u'讨论'],
},
2: {
'_default': u'User',
'ab': [u'Алахәыла', u'Иалахә', u'Участник'],
'ace': [u'Ureuëng Nguy', u'Pengguna'],
'af': u'Gebruiker',
'als': [u'Benutzer', u'Benutzerin'],
'am': u'አባል',
'an': u'Usuario',
'ar': [u'مستخدم', u'مستخدمة'],
'arc': [u'ܡܦܠܚܢܐ', u'ܡܬܚܫܚܢܐ'],
'arz': [u'مستخدم', u'مستخدمة'],
'as': [u'সদস্য', u'सदस्य'],
'ast': u'Usuariu',
'av': [u'Участник', u'Участница'],
'ay': [u'Usuario', u'Usuaria'],
'az': u'İstifadəçi',
'ba': u'Ҡатнашыусы',
'bar': [u'Nutza', u'Benutzer', u'Benutzerin'],
'bat-smg': [u'Nauduotuos', u'Naudotojas'],
'bcl': u'Paragamit',
'be': u'Удзельнік',
'be-x-old': [u'Удзельнік', u'Удзельніца'],
'bg': u'Потребител',
'bh': u'प्रयोगकर्ता',
'bjn': [u'Pamakai', u'Pengguna'],
'bm': u'Utilisateur',
'bn': u'ব্যবহারকারী',
'bpy': u'আতাকুরা',
'br': u'Implijer',
'bs': u'Korisnik',
'bug': u'Pengguna',
'bxr': [u'Хэрэглэгшэ', u'Участник'],
'ca': [u'Usuari', u'Usuària'],
'cbk-zam': [u'Usuario', u'Usuaria'],
'ce': [u'Декъашхо', u'Юзер'],
'ceb': u'Gumagamit',
'ch': u'Muna\'sesetbi',
'ckb': u'بەکارھێنەر',
'crh': [u'Qullanıcı', u'Къулланыджы'],
'cs': [u'Wikipedista', u'Uživatel', u'Wikipedistka'],
'csb': [u'Brëkòwnik', u'Użytkownik', u'Użytkowniczka'],
'cu': [u'Польꙃєватєл҄ь', u'Участник', u'Польѕевател҄ь'],
'cv': u'Хутшăнакан',
'cy': u'Defnyddiwr',
'da': u'Bruger',
'de': [u'Benutzer', u'Benutzerin'],
'diq': u'Karber',
'dsb': [u'Wužywaŕ', u'Wužywarka'],
'dv': u'މެމްބަރު',
'el': u'Χρήστης',
'eml': u'Utente',
'eo': [u'Uzanto', u'Vikipediisto', u'Uzulo', u'Uzantino'],
'es': [u'Usuario', u'Usuaria'],
'et': u'Kasutaja',
'eu': u'Lankide',
'fa': [u'کاربر', u'كاربر'],
'ff': u'Utilisateur',
'fi': u'Käyttäjä',
'fiu-vro': u'Pruukja',
'fo': u'Brúkari',
'fr': [u'Utilisateur', u'Utilisatrice'],
'frp': [u'Utilisator', u'Usanciér'],
'frr': [u'Benutzer', u'Benutzerin'],
'fur': u'Utent',
'fy': [u'Meidogger', u'Brûker'],
'ga': u'Úsáideoir',
'gag': u'Kullanıcı',
'gan': u'用戶',
'gd': u'Cleachdaiche',
'gl': [u'Usuario', u'Usuaria'],
'glk': u'کاربر',
'gn': u'Puruhára',
'got': u'𐌽𐌹𐌿𐍄𐌰𐌽𐌳𐍃',
'gu': u'સભ્ય',
'gv': u'Ymmydeyr',
'haw': u'Mea hoʻohana',
'he': [u'משתמש', u'משתמשת'],
'hi': u'सदस्य',
'hif': u'sadasya',
'hr': [u'Suradnik', u'Suradnica'],
'hsb': [u'Wužiwar', u'Wužiwarka'],
'ht': [u'Itilizatè', u'Imaj'],
'hu': u'Szerkesztő',
'hy': u'Մասնակից',
'ia': u'Usator',
'id': u'Pengguna',
'ie': u'Usator',
'ig': [u'Ọbanife', u'Ọ\'bànifé'],
'ilo': u'Agar-aramat',
'io': u'Uzanto',
'is': u'Notandi',
'it': u'Utente',
'ja': u'利用者',
'jv': u'Panganggo',
'ka': [u'მომხმარებელი', u'მონაწილე'],
'kaa': [u'Paydalanıwshı', u'Қатысушы', u'قاتىسۋشى'],
'kab': u'Amseqdac',
'kbd': [u'ЦӀыхухэт', u'Участник'],
'kg': u'Kisadi',
'kk': [u'Қатысушы', u'Qatıswşı', u'قاتىسۋشى'],
'kl': [u'Atuisoq', u'Bruger'],
'km': u'អ្នកប្រើប្រាស់',
'kn': u'ಸದಸ್ಯ',
'ko': u'사용자',
'koi': [u'Участник', u'Участница'],
'krc': u'Къошулуучу',
'ks': u'رُکُن',
'ksh': [u'Metmaacher', u'Medmaacher', u'Metmaacherin', u'Medmaacherin', u'Metmaacheren', u'Medmaacheren', u'Benutzer', u'Benutzerin'],
'ku': u'Bikarhêner',
'kv': [u'Пырысь', u'Участник', u'Участница'],
'kw': u'Devnydhyer',
'ky': u'Колдонуучу',
'la': u'Usor',
'lad': [u'Usador', u'Usuario', u'Empleador'],
'lb': [u'Benotzer', u'Benutzer', u'Benutzerin'],
'lbe': u'Гьуртту хьума',
'lez': [u'Уртах', u'Участник'],
'li': u'Gebroeker',
'lij': u'Utente',
'lmo': [u'Druvadur', u'Utente', u'Druvat', u'Dovrat'],
'ln': u'Utilisateur',
'lo': u'ຜູ້ໃຊ້',
'lt': [u'Naudotojas', u'Naudotoja'],
'ltg': u'Lītuotuojs',
'lv': u'Lietotājs',
'map-bms': u'Panganggo',
'mdf': [u'Тиись', u'Участник'],
'mg': [u'Mpikambana', u'Utilisateur'],
'mhr': [u'Пайдаланыше', u'Участник'],
'min': [u'Pangguno', u'Pengguna'],
'mk': u'Корисник',
'ml': [u'ഉപയോക്താവ്', u'അംഗം', u'ഉ'],
'mn': u'Хэрэглэгч',
'mr': u'सदस्य',
'mrj': u'Сирӹшӹ',
'ms': u'Pengguna',
'mt': u'Utent',
'mwl': [u'Outelizador', u'Usuário', u'Utilizador', u'Utilizadora'],
'myv': u'Теиця',
'mzn': [u'کارور', u'کاربر'],
'nah': [u'Tlatequitiltilīlli', u'Usuario'],
'nap': u'Utente',
'nds': [u'Bruker', u'Benutzer', u'Benutzerin'],
'nds-nl': u'Gebruker',
'ne': u'प्रयोगकर्ता',
'new': u'छ्येलेमि',
'nl': u'Gebruiker',
'nn': u'Brukar',
'no': u'Bruker',
'nso': u'Mošomi',
'nv': u'Choyoołʼįįhí',
'oc': [u'Utilizaire', u'Utilisator'],
'or': [u'ବ୍ୟବହାରକାରୀ', u'ବ୍ୟବହାରକାରି', u'ବ୍ୟବାହାରକାରୀ'],
'os': [u'Архайæг', u'Участник', u'Участница'],
'pa': [u'ਵਰਤੌਂਕਾਰ', u'ਮੈਂਬਰ'],
'pcd': u'Utilisateur',
'pdc': [u'Yuuser', u'Benutzer', u'Benutzerin'],
'pfl': [u'Middawaida', u'Benutzer', u'Benudzer', u'Benutzerin'],
'pi': u'अवयव',
'pl': [u'Wikipedysta', u'Wikipedystka'],
'pms': u'Utent',
'pnt': u'Χρήστες',
'ps': [u'کارن', u'کارونکی'],
'pt': [u'Usuário(a)', u'Utilizador', u'Usuário', u'Usuária'],
'qu': u'Ruraq',
'rm': u'Utilisader',
'rmy': u'Jeno',
'ro': u'Utilizator',
'ru': [u'Участник', u'Участница'],
'rue': u'Хоснователь',
'sa': u'योजकः',
'sah': u'Кыттааччы',
'sc': [u'Usuàriu', u'Utente'],
'scn': u'Utenti',
'sd': u'يوزر',
'se': u'Geavaheaddji',
'sg': u'Utilisateur',
'sh': u'Korisnik',
'si': u'පරිශීලක',
'sk': u'Redaktor',
'sl': u'Uporabnik',
'sq': [u'Përdoruesi', u'Perdoruesi', u'Përdoruesja'],
'sr': [u'Корисник', u'Korisnik'],
'srn': [u'Masyin', u'Gebruiker'],
'stq': [u'Benutser', u'Benutserske'],
'su': u'Pamaké',
'sv': u'Användare',
'sw': u'Mtumiaji',
'szl': [u'Używacz', u'Użytkownik'],
'ta': u'பயனர்',
'te': [u'వాడుకరి', u'సభ్యులు', u'సభ్యుడు'],
'tet': u'Uza-na\'in',
'tg': u'Корбар',
'th': u'ผู้ใช้',
'tk': u'Ulanyjy',
'tl': u'Tagagamit',
'tpi': u'Yusa',
'tr': u'Kullanıcı',
'tt': [u'Кулланучы', u'Участница', u'Участник', u'Äğzä'],
'ty': u'Utilisateur',
'udm': u'Викиавтор',
'ug': u'ئىشلەتكۈچى',
'uk': u'Користувач',
'ur': u'صارف',
'uz': u'Foydalanuvchi',
'vec': u'Utente',
'vep': u'Kävutai',
'vi': u'Thành viên',
'vls': u'Gebruker',
'vo': u'Geban',
'wa': u'Uzeu',
'war': u'Gumaramit',
'wo': [u'Jëfandikukat', u'Utilisateur'],
'wuu': u'用户',
'xal': [u'Демнч', u'Орлцач'],
'xmf': [u'მომხმარებელი', u'მონაწილე'],
'yi': [u'באַניצער', u'באנוצער', u'משתמש', u'משתמשת', u'באניצער', u'באַניצערין'],
'yo': u'Oníṣe',
'za': u'用户',
'zea': u'Gebruker',
'zh': [u'User', u'用户', u'用戶'],
'zh-yue': [u'User', u'用戶', u'用户'],
},
3: {
'_default': u'User talk',
'ab': [u'Алахәыла ахцәажәара', u'Обсуждение участника'],
'ace': [u'Marit Ureuëng Nguy', u'Pembicaraan Pengguna', u'Bicara Pengguna'],
'af': u'Gebruikerbespreking',
'ak': u'User nkɔmbɔ',
'als': [u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'am': u'አባል ውይይት',
'an': u'Descusión usuario',
'ar': [u'نقاش المستخدم', u'نقاش المستخدمة'],
'arc': [u'ܡܡܠܠܐ ܕܡܦܠܚܢܐ', u'ܡܡܠܠܐ ܕܡܬܚܫܚܢܐ'],
'arz': [u'نقاش المستخدم', u'نقاش المستخدمة'],
'as': [u'সদস্য বাৰ্তা', u'सदस्य वार्ता', u'সদস্য বার্তা'],
'ast': [u'Usuariu alderique', u'Usuariu discusión'],
'av': [u'Обсуждение участника', u'Обсуждение участницы'],
'ay': [u'Usuario discusión', u'Usuario Discusión', u'Usuaria Discusión'],
'az': u'İstifadəçi müzakirəsi',
'ba': [u'Ҡатнашыусы менән һөйләшеү', u'Ҡатнашыусы м-н фекер алышыу'],
'bar': [u'Nutza Dischkrian', u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'bat-smg': [u'Nauduotuojė aptarėms', u'Naudotojo aptarimas'],
'bcl': u'Olay kan paragamit',
'be': u'Размовы з удзельнікам',
'be-x-old': [u'Гутаркі ўдзельніка', u'Гутаркі ўдзельніцы'],
'bg': u'Потребител беседа',
'bh': u'प्रयोगकर्ता वार्ता',
'bjn': [u'Pamandiran Pamakai', u'Pembicaraan Pengguna'],
'bm': [u'Discussion utilisateur', u'Discussion Utilisateur'],
'bn': u'ব্যবহারকারী আলাপ',
'bpy': u'আতাকুরার য়্যারী',
'br': u'Kaozeadenn Implijer',
'bs': u'Razgovor sa korisnikom',
'bug': [u'Pembicaraan Pengguna', u'Bicara Pengguna'],
'bxr': [u'Хэрэглэгшые хэлэлсэхэ', u'Обсуждение участника'],
'ca': [u'Usuari Discussió', u'Usuària discussió'],
'cbk-zam': [u'Usuario discusión', u'Usuario Discusión', u'Usuaria Discusión'],
'ce': [u'Декъашхон дийцаре', u'Юзери дийца', u'Дийцаре декъашхо'],
'ceb': u'Hisgot sa Gumagamit',
'ch': u'Kombetsasion ni muna\'sesetbi',
'ckb': [u'لێدوانی بەکارھێنەر', u'قسەی بەکارھێنەر'],
'crh': [u'Qullanıcı muzakeresi', u'Къулланыджы музакереси'],
'cs': [u'Diskuse s wikipedistou', u'Diskuse s uživatelem', u'Diskuse s wikipedistkou', u'Wikipedista diskuse', u'Wikipedistka diskuse', u'Uživatel diskuse', u'Uživatelka diskuse'],
'csb': [u'Diskùsëjô brëkòwnika', u'Dyskusja użytkownika', u'Dyskusja użytkowniczki'],
'cu': [u'Польꙃєватєлꙗ бєсѣда', u'Польѕевател� бесѣда'],
'cv': u'Хутшăнаканăн канашлу страници',
'cy': u'Sgwrs Defnyddiwr',
'da': u'Brugerdiskussion',
'de': [u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'diq': [u'Karber werênayış', u'Karber mesac'],
'dsb': [u'Diskusija wužywarja', u'Diskusija wužywarki'],
'dv': u'މެމްބަރުގެ ވާހަކަ',
'el': u'Συζήτηση χρήστη',
'eml': u'Discussioni utente',
'eo': [u'Uzanto-Diskuto', u'Vikipediista diskuto', u'Uzula diskuto', u'Uzanta diskuto', u'Uzantino-Diskuto'],
'es': [u'Usuario discusión', u'Usuario Discusión', u'Usuaria Discusión'],
'et': u'Kasutaja arutelu',
'eu': u'Lankide eztabaida',
'fa': [u'بحث کاربر', u'بحث كاربر'],
'ff': [u'Discussion utilisateur', u'Discussion Utilisateur'],
'fi': u'Keskustelu käyttäjästä',
'fiu-vro': u'Pruukja arotus',
'fo': [u'Brúkarakjak', u'Brúkari kjak'],
'fr': [u'Discussion utilisateur', u'Discussion Utilisatrice', u'Discussion Utilisateur'],
'frp': [u'Discussion utilisator', u'Discussion usanciér'],
'frr': [u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'fur': u'Discussion utent',
'fy': [u'Meidogger oerlis', u'Brûker oerlis'],
'ga': u'Plé úsáideora',
'gag': [u'Kullanıcı dartışma', u'Kullanıcı mesaj'],
'gan': u'用戶・談詑',
'gd': u'Deasbaireachd a\' chleachdaiche',
'gl': [u'Conversa usuario', u'Conversa Usuario', u'Conversa usuaria'],
'glk': u'بحث کاربر',
'gn': u'Puruhára myangekõi',
'got': u'𐌽𐌹𐌿𐍄𐌰𐌽𐌳𐌹𐍃 𐌲𐌰𐍅𐌰𐌿𐍂𐌳𐌾𐌰',
'gu': u'સભ્યની ચર્ચા',
'gv': u'Resooney ymmydeyr',
'haw': u'Kūkākūkā o mea hoʻohana',
'he': [u'שיחת משתמש', u'שיחת משתמשת'],
'hi': u'सदस्य वार्ता',
'hif': u'sadasya ke baat',
'hr': [u'Razgovor sa suradnikom', u'Razgovor sa suradnicom'],
'hsb': [u'Diskusija z wužiwarjom', u'Diskusija z wužiwarku'],
'ht': [u'Diskisyon Itilizatè', u'Diskisyon Imaj'],
'hu': [u'Szerkesztővita', u'User vita'],
'hy': u'Մասնակցի քննարկում',
'ia': u'Discussion Usator',
'id': [u'Pembicaraan Pengguna', u'Bicara Pengguna'],
'ie': u'Usator Discussion',
'ig': [u'Okwu ọbanife', u'Okwu ọ\'bànifé'],
'ilo': u'Agar-aramat tungtungan',
'io': u'Uzanto Debato',
'is': u'Notandaspjall',
'it': u'Discussioni utente',
'ja': [u'利用者・トーク', u'利用者‐会話'],
'jv': u'Dhiskusi Panganggo',
'ka': [u'მომხმარებლის განხილვა', u'მონაწილის განხილვა', u'მომხმარებელი განხილვა'],
'kaa': [u'Paydalanıwshı sa\'wbeti', u'Қатысушы талқылауы', u'قاتىسۋشى تالقىلاۋى'],
'kab': u'Amyannan umsqedac',
'kbd': [u'ЦӀыхухэт тепсэлъэхьыгъуэ', u'Обсуждение участника'],
'kg': u'Disolo kisadi',
'kk': [u'Қатысушы талқылауы', u'Qatıswşı talqılawı', u'قاتىسۋشى تالقىلاۋى'],
'kl': [u'Atuisup oqalliffia', u'Brugerdiskussion'],
'km': [u'ការពិភាក្សារបស់អ្នកប្រើប្រាស់', u'អ្នកប្រើប្រាស់-ពិភាក្សា'],
'kn': u'ಸದಸ್ಯರ ಚರ್ಚೆಪುಟ',
'ko': u'사용자토론',
'koi': [u'Обсуждение участника', u'Обсуждение участницы'],
'krc': u'Къошулуучуну сюзюу',
'ks': u'رُکُن بَحَژ',
'ksh': [u'Metmaacher Klaaf', u'Medmaacher Klaaf', u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'ku': [u'Gotûbêja bikarhêner', u'Bikarhêner nîqaş'],
'kv': [u'Пырыськӧд сёрнитанiн', u'Обсуждение участника', u'Обсуждение участницы'],
'kw': [u'Keskows Devnydhyer', u'Cows Devnydhyer', u'Kescows Devnydhyer'],
'ky': u'Колдонуучунун баарлашуулары',
'la': u'Disputatio Usoris',
'lad': [u'Messaje de Usador', u'Usuario Discusión', u'Message de Empleador'],
'lb': [u'Benotzer Diskussioun', u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'lbe': u'Гьуртту хьуминнал ихтилат',
'lez': [u'Уртахдин веревирд авун', u'Обсуждение участника'],
'li': u'Euverlèk gebroeker',
'lij': [u'Discûscioîn ûtente', u'Discussioni utente'],
'lmo': [u'Ciciarada Druvadur', u'Discussioni utente', u'Ciciarada Druvat', u'Ciciarada Dovrat'],
'ln': [u'Discussion utilisateur', u'Discussion Utilisateur'],
'lo': u'ສົນທະນາຂອງຜູ້ໃຊ້',
'lt': [u'Naudotojo aptarimas', u'Naudotojos aptarimas'],
'ltg': u'Sprīža ap lītuotuoju',
'lv': u'Lietotāja diskusija',
'map-bms': u'Dhiskusi Panganggo',
'mdf': [u'Тиись корхнема', u'Обсуждение участника'],
'mg': [u'Dinika amin\'ny mpikambana', u'Discussion Utilisateur'],
'mhr': [u'Пайдаланышын каҥашымашыже', u'Обсуждение участника', u'Пайдаланышын каҥашымаш'],
'min': [u'Rundiang Pangguno', u'Pembicaraan Pengguna'],
'mk': u'Разговор со корисник',
'ml': [u'ഉപയോക്താവിന്റെ സംവാദം', u'അംഗങ്ങളുടെ സംവാദം', u'ഉസം'],
'mn': u'Хэрэглэгчийн яриа',
'mr': u'सदस्य चर्चा',
'mrj': [u'Сирӹшӹм кӓнгӓшӹмӓш', u'Сирӹшӹн кӓнгӓшӹмӓшӹжӹ'],
'ms': [u'Perbincangan pengguna', u'Perbualan Pengguna'],
'mt': [u'Diskussjoni utent', u'Diskuti utent'],
'mwl': [u'Cumbersa outelizador', u'Usuário Discussão', u'Utilizador Discussão', u'Utilizadora Discussão'],
'myv': u'Теицянь кортамось',
'mzn': [u'کارور گپ', u'بحث کاربر'],
'nah': [u'Tlatequitiltilīlli tēixnāmiquiliztli', u'Usuario Discusión'],
'nap': [u'Utente chiàcchiera', u'Discussioni utente'],
'nds': [u'Bruker Diskuschoon', u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'nds-nl': u'Overleg gebruker',
'ne': u'प्रयोगकर्ता वार्ता',
'new': u'छ्येलेमि खँलाबँला',
'nl': u'Overleg gebruiker',
'nn': u'Brukardiskusjon',
'no': u'Brukerdiskusjon',
'nso': u'Boledišana le Mošomi',
'nv': u'Choyoołʼįįhí bichʼįʼ yáshtiʼ',
'oc': [u'Discussion Utilizaire', u'Discussion Utilisator', u'Discutida Utilisator'],
'or': [u'ବ୍ୟବହାରକାରୀଙ୍କ ଆଲୋଚନା', u'ବ୍ୟବହାରକାରିଁକ ଆଲୋଚନା', u'ବ୍ୟବାହାରକାରୀଙ୍କ ଆଲୋଚନା'],
'os': [u'Архайæджы ныхас', u'Архайæджы дискусси', u'Обсуждение участника', u'Обсуждение участницы'],
'pa': [u'ਵਰਤੌਂਕਾਰ ਗੱਲ-ਬਾਤ', u'ਮੈਂਬਰ ਚਰਚਾ'],
'pcd': [u'Discussion utilisateur', u'Discussion Utilisateur'],
'pdc': [u'Yuuser Dischbedutt', u'Benutzer Diskussion', u'Benutzerin Diskussion'],
'pfl': [u'Middawaida Dischbediere', u'Benutzer Diskussion', u'Benudzer Dischbediere', u'Benutzerin Diskussion'],
'pi': u'अवयव सम्भासित',
'pl': [u'Dyskusja wikipedysty', u'Dyskusja wikipedystki'],
'pms': u'Ciaciarade',
'pnt': u'Καλάτσεμαν χρήστε',
'ps': [u'د کارن خبرې اترې', u'د کارونکي خبرې اترې'],
'pt': [u'Usuário(a) Discussão', u'Utilizador Discussão', u'Usuário Discussão', u'Usuária Discussão'],
'qu': u'Ruraq rimanakuy',
'rm': u'Utilisader discussiun',
'rmy': [u'Jeno vakyarimata', u'Discuţie Utilizator'],
'ro': [u'Discuție Utilizator', u'Discuţie Utilizator'],
'ru': [u'Обсуждение участника', u'Обсуждение участницы', u'Обсуждение участницы'],
'rue': [u'Діскузія з хоснователём', u'Діскузіа з хоснователём'],
'sa': [u'योजकसम्भाषणम्', u'योजकसंभाषणं'],
'sah': u'Кыттааччы ырытыыта',
'sc': [u'Cuntierra usuàriu', u'Utente discussioni'],
'scn': [u'Discussioni utenti', u'Discussioni Utenti'],
'sd': u'يوزر بحث',
'se': u'Geavaheaddjeságastallan',
'sg': [u'Discussion utilisateur', u'Discussion Utilisateur'],
'sh': u'Razgovor sa korisnikom',
'si': u'පරිශීලක සාකච්ඡාව',
'sk': [u'Diskusia s redaktorom', u'Komentár k redaktorovi'],
'sl': u'Uporabniški pogovor',
'sq': [u'Përdoruesi diskutim', u'Perdoruesi diskutim', u'Përdoruesja diskutim'],
'sr': [u'Разговор са корисником', u'Razgovor sa korisnikom'],
'srn': [u'Taki fu masyin', u'Overleg gebruiker'],
'stq': [u'Benutser Diskussion', u'Benutserske Diskussion'],
'su': u'Obrolan pamaké',
'sv': u'Användardiskussion',
'sw': u'Majadiliano ya mtumiaji',
'szl': [u'Dyskusyjo używacza', u'Dyskusja użytkownika'],
'ta': u'பயனர் பேச்சு',
'te': [u'వాడుకరి చర్చ', u'సభ్యులపై చర్చ', u'సభ్యునిపై చర్చ'],
'tet': u'Diskusaun Uza-na\'in',
'tg': u'Баҳси корбар',
'th': u'คุยกับผู้ใช้',
'tk': u'Ulanyjy çekişme',
'tl': u'Usapang tagagamit',
'tpi': u'Toktok bilong yusa',
'tr': u'Kullanıcı mesaj',
'tt': [u'Кулланучы бәхәсе', u'Обсуждение участницы', u'Обсуждение участника', u'Äğzä bäxäse'],
'ty': [u'Discussion utilisateur', u'Discussion Utilisateur'],
'udm': u'Викиавтор сярысь вераськон',
'ug': u'ئىشلەتكۈچى مۇنازىرىسى',
'uk': u'Обговорення користувача',
'ur': u'تبادلۂ خیال صارف',
'uz': u'Foydalanuvchi munozarasi',
'vec': u'Discussion utente',
'vep': u'Lodu kävutajas',
'vi': u'Thảo luận Thành viên',
'vls': u'Discuusje gebruker',
'vo': u'Gebanibespik',
'wa': [u'Uzeu copene', u'Discussion Utilisateur'],
'war': u'Hiruhimangaw hiton gumaramit',
'wo': [u'Waxtaani jëfandikukat', u'Discussion Utilisateur'],
'wuu': [u'用户讨论', u'用户对话'],
'xal': [u'Демнчна туск меткән', u'Орлцачна тускар ухалвр'],
'xmf': [u'მომხმარებლის განხილვა', u'მონაწილის განხილვა', u'მომხმარებელი განხილვა'],
'yi': [u'באַניצער רעדן', u'באנוצער רעדן', u'שיחת משתמש', u'שיחת משתמשת', u'באניצער רעדן', u'באַניצערין רעדן'],
'yo': u'Ọ̀rọ̀ oníṣe',
'za': [u'用户讨论', u'用户对话'],
'zea': u'Overleg gebruker',
'zh': [u'User talk', u'用户对话', u'用戶對話', u'用户讨论', u'用戶討論'],
'zh-yue': [u'User talk', u'用戶傾偈', u'用戶 對話', u'用户 对话', u'用戶 討論', u'用户 讨论'],
},
4: {
'_default': u'Project',
},
5: {
'_default': u'Project talk',
},
6: {
'_default': [u'File', u'Image'],
'ab': [u'Афаил', u'Файл'],
'ace': [u'Beureukaih', u'Berkas', u'Gambar'],
'af': [u'Lêer', u'Beeld'],
'ak': u'Fayl',
'als': [u'Datei', u'Bild'],
'am': u'ስዕል',
'an': u'Imachen',
'ang': u'Biliþ',
'ar': [u'ملف', u'صورة'],
'arc': u'ܠܦܦܐ',
'arz': [u'ملف', u'صورة'],
'as': [u'চিত্ৰ', u'चित्र', u'চিত্র'],
'ast': [u'Ficheru', u'Imaxe', u'Imaxen', u'Archivu'],
'av': [u'Файл', u'Изображение'],
'ay': [u'Archivo', u'Imagen'],
'az': u'Şəkil',
'ba': [u'Файл', u'Рәсем'],
'bar': u'Datei',
'bat-smg': [u'Abruozdielis', u'Vaizdas'],
'bcl': u'Ladawan',
'be': [u'Файл', u'Выява'],
'be-x-old': [u'Файл', u'Выява'],
'bg': [u'Файл', u'Картинка'],
'bh': u'चित्र',
'bjn': [u'Barakas', u'Berkas'],
'bm': u'Fichier',
'bn': u'চিত্র',
'bpy': u'ছবি',
'br': [u'Restr', u'Skeudenn'],
'bs': [u'Datoteka', u'Slika'],
'bug': [u'Berkas', u'Gambar'],
'bxr': u'Файл',
'ca': [u'Fitxer', u'Imatge'],
'cbk-zam': [u'Archivo', u'Imagen'],
'cbs': u'Òbrôzk',
'ce': [u'Файл', u'Сурт', u'Хlум'],
'ceb': [u'Payl', u'Imahen'],
'ch': u'Litratu',
'ckb': u'پەڕگە',
'crh': [u'Fayl', u'Resim', u'Ресим'],
'cs': u'Soubor',
'csb': [u'Òbrôzk', u'Grafika'],
'cu': [u'Дѣло', u'Ви́дъ', u'Видъ'],
'cv': [u'Ӳкерчĕк', u'Изображение'],
'cy': u'Delwedd',
'da': [u'Fil', u'Billede'],
'de': [u'Datei', u'Bild'],
'diq': u'Dosya',
'dsb': [u'Dataja', u'Wobraz'],
'dv': [u'ފައިލު', u'ފައިލް'],
'el': [u'Αρχείο', u'Εικόνα'],
'eml': [u'File', u'Immagine'],
'eo': u'Dosiero',
'es': [u'Archivo', u'Imagen'],
'et': u'Pilt',
'eu': [u'Fitxategi', u'Irudi'],
'fa': [u'پرونده', u'تصویر'],
'ff': u'Fichier',
'fi': [u'Tiedosto', u'Kuva'],
'fiu-vro': u'Pilt',
'fo': u'Mynd',
'fr': u'Fichier',
'frp': [u'Fichiér', u'Émâge'],
'frr': [u'Datei', u'Bild'],
'fur': [u'Figure', u'Immagine'],
'fy': u'Ofbyld',
'ga': u'Íomhá',
'gag': [u'Dosye', u'Dosya'],
'gan': u'文檔',
'gd': u'Faidhle',
'gl': [u'Ficheiro', u'Imaxe'],
'glk': [u'پرونده', u'تصویر'],
'gn': [u'Ta\'ãnga', u'Imagen'],
'got': u'𐍆𐌴𐌹𐌻𐌰',
'gu': u'ચિત્ર',
'gv': u'Coadan',
'haw': [u'Waihona', u'Kiʻi'],
'he': [u'קובץ', u'תמונה'],
'hi': u'चित्र',
'hif': u'file',
'hr': [u'Datoteka', u'Slika'],
'hsb': [u'Dataja', u'Wobraz'],
'ht': u'Fichye',
'hu': [u'Fájl', u'Kép'],
'hy': u'Պատկեր',
'ia': [u'File', u'Imagine'],
'id': [u'Berkas', u'Gambar'],
'ig': [u'Usòrò', u'Ákwúkwó orünotu'],
'ilo': u'Papeles',
'io': [u'Arkivo', u'Imajo'],
'is': u'Mynd',
'it': [u'File', u'Immagine'],
'ja': [u'ファイル', u'Image', u'画像'],
'jv': u'Gambar',
'ka': [u'ფაილი', u'სურათი'],
'kaa': [u'Su\'wret', u'Сурет', u'سۋرەت'],
'kab': u'Tugna',
'kbd': u'Файл',
'kg': u'Fisye',
'kk': [u'Сурет', u'Swret', u'سۋرەت'],
'kl': [u'Fiileq', u'Fil', u'Billede'],
'km': [u'ឯកសារ', u'រូបភាព'],
'kn': u'ಚಿತ್ರ',
'ko': [u'파일', u'그림'],
'koi': [u'Файл', u'Изображение'],
'krc': [u'Файл', u'Изображение'],
'ks': u'فَیِل',
'ksh': [u'Datei', u'Beld', u'Belld'],
'ku': u'Wêne',
'kv': u'Файл',
'kw': u'Restren',
'ky': u'Файл',
'la': [u'Fasciculus', u'Imago'],
'lad': [u'Dosya', u'Dossia', u'Archivo'],
'lb': [u'Fichier', u'Bild'],
'lbe': [u'Сурат', u'Изображение'],
'lez': u'Файл',
'li': [u'Plaetje', u'Aafbeilding'],
'lij': [u'Immaggine', u'Immagine'],
'lmo': [u'Archivi', u'Immagine'],
'ln': u'Fichier',
'lo': u'ຮູບ',
'lt': u'Vaizdas',
'ltg': u'Fails',
'lv': u'Attēls',
'map-bms': u'Gambar',
'mdf': [u'Няйф', u'Изображение'],
'mg': u'Sary',
'mhr': u'Файл',
'min': u'Berkas',
'mk': [u'Податотека', u'Слика'],
'ml': [u'പ്രമാണം', u'ചി', u'ചിത്രം', u'പ്ര'],
'mn': [u'Файл', u'Зураг'],
'mr': u'चित्र',
'mrj': u'Файл',
'ms': [u'Fail', u'Imej'],
'mt': u'Stampa',
'mwl': [u'Fexeiro', u'Ficheiro', u'Imagem'],
'myv': [u'Артовкс', u'Изображение'],
'mzn': [u'پرونده', u'تصویر'],
'nah': [u'Īxiptli', u'Imagen'],
'nap': [u'Fiùra', u'Immagine'],
'nds': [u'Bild', u'Datei'],
'nds-nl': [u'Bestaand', u'Ofbeelding'],
'ne': u'चित्र',
'new': u'किपा',
'nl': [u'Bestand', u'Afbeelding'],
'nn': u'Fil',
'no': [u'Fil', u'Bilde'],
'nso': u'Seswantšho',
'nv': u'Eʼelyaaígíí',
'oc': [u'Fichièr', u'Imatge'],
'or': u'ଫାଇଲ',
'os': [u'Файл', u'Ныв'],
'pa': u'ਤਸਵੀਰ',
'pcd': u'Fichier',
'pdc': [u'Feil', u'Datei'],
'pfl': [u'Dadai', u'Datei'],
'pi': u'पटिमा',
'pl': [u'Plik', u'Grafika'],
'pms': [u'Figura', u'Immagine'],
'pnt': [u'Αρχείον', u'Εικόναν'],
'ps': [u'دوتنه', u'انځور'],
'pt': [u'Ficheiro', u'Imagem', u'Arquivo'],
'qu': [u'Rikcha', u'Imagen'],
'rm': u'Datoteca',
'rmy': [u'Chitro', u'Imagine', u'Fişier'],
'ro': [u'Fișier', u'Fişier', u'Imagine'],
'ru': [u'Файл', u'Изображение'],
'rue': u'Файл',
'sa': [u'चित्रम्', u'चित्रं'],
'sah': [u'Билэ', u'Ойуу'],
'sc': [u'File', u'Immàgini'],
'scn': [u'File', u'Mmàggini'],
'sd': u'عڪس',
'se': u'Fiila',
'sg': u'Fichier',
'sh': u'Datoteka',
'si': [u'ගොනුව', u'රූපය'],
'sk': [u'Súbor', u'Obrázok'],
'sl': u'Slika',
'sq': [u'Skeda', u'Figura'],
'sr': [u'Датотека', u'Slika', u'Слика'],
'srn': [u'Gefre', u'Afbeelding'],
'stq': [u'Bielde', u'Bild'],
'su': u'Gambar',
'sv': [u'Fil', u'Bild'],
'sw': u'Picha',
'szl': u'Plik',
'ta': u'படிமம்',
'te': [u'దస్త్రం', u'ఫైలు', u'బొమ్మ'],
'tet': u'Imajen',
'tg': u'Акс',
'th': [u'ไฟล์', u'ภาพ'],
'tk': u'Faýl',
'tl': u'Talaksan',
'tlh': u'nagh beQ',
'tpi': u'Fail',
'tr': [u'Dosya', u'Resim'],
'tt': [u'Файл', u'Изображение', u'Рәсем', u'Räsem'],
'ty': u'Fichier',
'udm': [u'Файл', u'Суред'],
'ug': u'ھۆججەت',
'uk': [u'Файл', u'Зображення'],
'ur': [u'ملف', u'تصویر'],
'uz': [u'Fayl', u'Tasvir'],
'vec': [u'File', u'Immagine', u'Imagine'],
'vep': [u'Fail', u'Image'],
'vi': [u'Tập tin', u'Hình'],
'vls': [u'Ofbeeldienge', u'Afbeelding'],
'vo': [u'Ragiv', u'Magod'],
'wa': u'Imådje',
'war': [u'Paypay', u'Fayl'],
'wo': [u'Dencukaay'],
'wuu': [u'文件', u'图像', u'档案'],
'xal': [u'Боомг', u'Зург'],
'xmf': [u'ფაილი', u'სურათი'],
'yi': [u'טעקע', u'בילד'],
'yo': [u'Fáìlì', u'Àwòrán'],
'za': [u'文件', u'图像', u'档案'],
'zea': [u'Plaetje', u'Afbeelding'],
'zh': [u'File', u'图像', u'圖像', u'档案', u'檔案', u'文件'],
'zh-yue': [u'File', u'文件', u'檔', u'檔案', u'档', u'档案', u'圖', u'圖像', u'图', u'图像'],
},
7: {
'_default': [u'File talk', u'Image talk'],
'ab': [u'Афаил ахцәажәара', u'Обсуждение файла'],
'ace': [u'Marit Beureukaih', u'Pembicaraan Berkas', u'Gambar Pembicaraan', u'Pembicaraan Gambar'],
'af': [u'Lêerbespreking', u'Beeldbespreking'],
'ak': u'Fayl nkɔmbɔ',
'als': [u'Datei Diskussion', u'Bild Diskussion'],
'am': u'ስዕል ውይይት',
'an': u'Descusión imachen',
'ang': u'Biliþgesprec',
'ar': [u'نقاش الملف', u'نقاش الصورة'],
'arc': u'ܡܡܠܠܐ ܕܠܦܦܐ',
'arz': [u'نقاش الملف', u'نقاش الصورة'],
'as': [u'চিত্ৰ বাৰ্তা', u'चित्र वार्ता', u'চিত্র বার্তা'],
'ast': [u'Ficheru alderique', u'Imaxe alderique', u'Imaxen discusión', u'Archivu alderique'],
'av': [u'Обсуждение файла', u'Обсуждение изображения'],
'ay': [u'Archivo discusión', u'Imagen Discusión'],
'az': u'Şəkil müzakirəsi',
'ba': [u'Файл буйынса фекерләшеү', u'Рәсем буйынса фекерләшеү', u'Рәсем б-са фекер алышыу'],
'bar': [u'Datei Dischkrian', u'Datei Diskussion'],
'bat-smg': [u'Abruozdielė aptarėms', u'Vaizdo aptarimas'],
'bcl': u'Olay sa ladawan',
'be': [u'Размовы пра файл', u'Размовы пра выяву'],
'be-x-old': [u'Абмеркаваньне файла', u'Абмеркаваньне выявы'],
'bg': [u'Файл беседа', u'Картинка беседа'],
'bh': u'चित्र वार्ता',
'bjn': [u'Pamandiran Barakas', u'Pembicaraan Berkas'],
'bm': [u'Discussion fichier', u'Discussion Fichier', u'Discussion Image'],
'bn': [u'চিত্র আলোচনা', u'চিত্র আলাপ', u'MediaWiki আলাপ'],
'bpy': [u'ছবি য়্যারী', u'চিত্র আলাপ', u'MediaWiki আলাপ'],
'br': [u'Kaozeadenn Restr', u'Kaozeadenn Skeudenn'],
'bs': u'Razgovor o datoteci',
'bug': [u'Pembicaraan Berkas', u'Gambar Pembicaraan', u'Pembicaraan Gambar'],
'bxr': [u'Файл хэлэлсэхэ', u'Обсуждение файла'],
'ca': [u'Fitxer Discussió', u'Imatge Discussió'],
'cbk-zam': [u'Archivo discusión', u'Imagen Discusión'],
'ce': [u'Файлан дийцаре', u'Сурти дийца', u'Хlуман дийцаре', u'Дийцаре хlуман'],
'ceb': [u'Hisgot sa Payl', u'Hisgot sa Imahen'],
'ch': u'Kombetsasion ni litratu',
'ckb': [u'وتووێژی پەڕگە', u'لێدوانی پەڕگە'],
'crh': [u'Fayl muzakeresi', u'Resim muzakeresi', u'Ресим музакереси'],
'cs': [u'Diskuse k souboru', u'Soubor diskuse'],
'csb': [u'Diskùsëjô òbrôzków', u'Dyskusja grafiki'],
'cu': [u'Дѣла бєсѣда', u'Ви́да бєсѣ́да', u'Вида бесѣда'],
'cv': [u'Ӳкерчĕке сӳтсе явмалли', u'Обсуждение изображения'],
'cy': u'Sgwrs Delwedd',
'da': [u'Fildiskussion', u'Billeddiskussion'],
'de': [u'Datei Diskussion', u'Bild Diskussion'],
'diq': [u'Dosya mesac', u'Dosya werênayış'],
'dsb': [u'Diskusija wó dataji', u'Diskusija wó wobrazu'],
'dv': [u'ފައިލުގެ ޚިޔާލު', u'ފައިލް ޚިޔާލު'],
'el': [u'Συζήτηση αρχείου', u'Συζήτηση εικόνας'],
'eml': [u'Discussioni file', u'Discussioni immagine'],
'eo': [u'Dosiero-Diskuto', u'Dosiera diskuto'],
'es': [u'Archivo discusión', u'Imagen Discusión'],
'et': u'Pildi arutelu',
'eu': [u'Fitxategi eztabaida', u'Irudi eztabaida'],
'fa': [u'بحث پرونده', u'بحث تصویر'],
'ff': [u'Discussion fichier', u'Discussion Fichier', u'Discussion Image'],
'fi': [u'Keskustelu tiedostosta', u'Keskustelu kuvasta'],
'fiu-vro': u'Pildi arotus',
'fo': [u'Myndakjak', u'Mynd kjak'],
'fr': [u'Discussion fichier', u'Discussion Fichier', u'Discussion Image'],
'frp': [u'Discussion fichiér', u'Discussion Émâge'],
'frr': [u'Datei Diskussion', u'Bild Diskussion'],
'fur': [u'Discussion figure', u'Discussioni immagine'],
'fy': u'Ofbyld oerlis',
'ga': [u'Plé íomhá', u'Plé íomhá'],
'gag': [u'Dosye dartışma', u'Dosya tartışma'],
'gan': u'文檔・談詑',
'gd': u'Deasbaireachd an fhaidhle',
'gl': [u'Conversa ficheiro', u'Conversa Imaxe'],
'glk': [u'بحث پرونده', u'بحث تصویر'],
'gn': [u'Ta\'ãnga myangekõi', u'Imagen Discusión'],
'got': u'𐍆𐌴𐌹𐌻𐌹𐌽𐍃 𐌲𐌰𐍅𐌰𐌿𐍂𐌳𐌾𐌰',
'gu': u'ચિત્રની ચર્ચા',
'gv': u'Resooney coadan',
'haw': [u'Kūkākūkā o waihona', u'Kūkākūkā o kiʻi'],
'he': [u'שיחת קובץ', u'שיחת תמונה'],
'hi': u'चित्र वार्ता',
'hif': u'file ke baat',
'hr': [u'Razgovor o datoteci', u'Razgovor o slici'],
'hsb': [u'Diskusija k dataji', u'Diskusija k wobrazej'],
'ht': u'Diskisyon Fichye',
'hu': [u'Fájlvita', u'Képvita', u'Kép vita'],
'hy': u'Պատկերի քննարկում',
'ia': [u'Discussion File', u'Discussion Imagine'],
'id': [u'Pembicaraan Berkas', u'Gambar Pembicaraan', u'Pembicaraan Gambar'],
'ie': u'File Discussion',
'ig': [u'Okwu usòrò', u'Okwu ákwúkwó orünotu'],
'ilo': u'Papeles tungtungan',
'io': [u'Arkivo Debato', u'Imajo Debato'],
'is': u'Myndaspjall',
'it': [u'Discussioni file', u'Discussioni immagine'],
'ja': [u'ファイル・トーク', u'Image talk', u'画像‐ノート', u'ファイル‐ノート'],
'jv': [u'Dhiskusi Gambar', u'Gambar Dhiskusi'],
'ka': [u'ფაილის განხილვა', u'სურათი განხილვა'],
'kaa': [u'Su\'wret sa\'wbeti', u'Сурет талқылауы', u'سۋرەت تالقىلاۋى'],
'kab': u'Amyannan n tugna',
'kbd': [u'Файл тепсэлъэхьыгъуэ', u'Обсуждение файла'],
'kg': u'Disolo fisye',
'kk': [u'Сурет талқылауы', u'Swret talqılawı', u'سۋرەت تالقىلاۋى'],
'kl': [u'Fiilip oqalliffia', u'Fildiskussion', u'Billeddiskussion'],
'km': [u'ការពិភាក្សាអំពីឯកសារ', u'ការពិភាក្សាអំពីរូបភាព', u'រូបភាព-ពិភាក្សា'],
'kn': u'ಚಿತ್ರ ಚರ್ಚೆಪುಟ',
'ko': u'파일토론',
'koi': [u'Обсуждение файла', u'Обсуждение изображения'],
'krc': [u'Файлны сюзюу', u'Обсуждение изображения'],
'ks': u'فَیِل بَحَژ',
'ksh': [u'Dateie Klaaf', u'Belder Klaaf', u'Bellder Klaaf'],
'ku': [u'Gotûbêja wêneyî', u'Wêne nîqaş'],
'kv': [u'Файл донъялӧм', u'Обсуждение файла'],
'kw': [u'Keskows Restren', u'Cows Restren', u'Kescows Restren'],
'la': [u'Disputatio Fasciculi', u'Disputatio Imaginis'],
'lad': [u'Diskusyón de Dosya', u'Diskussión de Dossia', u'Archivo Discusión'],
'lb': [u'Fichier Diskussioun', u'Bild Diskussioun'],
'lbe': [u'Суратраясса ихтилат', u'Обсуждение изображения'],
'lez': [u'Файл веревирд авун', u'Обсуждение файла'],
'li': [u'Euverlèk plaetje', u'Euverlèk afbeelding'],
'lij': [u'Discûscioîn immaggine', u'Discussioni immagine'],
'lmo': [u'Ciciarada Archivi', u'Discussioni file', u'Discussioni immagine'],
'ln': [u'Discussion fichier', u'Discussion Fichier', u'Discussion Image'],
'lo': u'ສົນທະນາກ່ຽວກັບຮູບ',
'lt': u'Vaizdo aptarimas',
'ltg': u'Sprīža ap failu',
'lv': u'Attēla diskusija',
'map-bms': [u'Dhiskusi Gambar', u'Gambar Dhiskusi'],
'mdf': [u'Няйф корхнема', u'Обсуждение изображения'],
'mg': [u'Dinika amin\'ny sary', u'Discussion Image'],
'mhr': [u'Файл шотышто каҥашымаш', u'Обсуждение файла', u'Файлын каҥашымаш'],
'min': [u'Rundiang Berkas', u'Pembicaraan Berkas'],
'mk': [u'Разговор за податотека', u'Разговор за слика'],
'ml': [u'പ്രമാണത്തിന്റെ സംവാദം', u'ചിസം', u'ചിത്രത്തിന്റെ സംവാദം', u'പ്രസം'],
'mn': [u'Файлын хэлэлцүүлэг', u'Зургийн хэлэлцүүлэг'],
'mr': u'चित्र चर्चा',
'mrj': [u'Файлым кӓнгӓшӹмӓш', u'Файл кӓнгӓшӹмӓш'],
'ms': [u'Perbincangan fail', u'Perbincangan Imej', u'Imej Perbualan'],
'mt': [u'Diskussjoni stampa', u'Diskuti stampa'],
'mwl': [u'Cumbersa fexeiro', u'Ficheiro Discussão', u'Imagem Discussão'],
'myv': [u'Артовксто кортамось', u'Обсуждение изображения'],
'mzn': [u'پرونده گپ', u'بحث تصویر', u'بحث پرونده'],
'nah': [u'Īxiptli tēixnāmiquiliztli', u'Imagen Discusión'],
'nap': [u'Fiùra chiàcchiera', u'Discussioni immagine'],
'nds': [u'Bild Diskuschoon', u'Bild Diskussion', u'Datei Diskuschoon'],
'nds-nl': [u'Overleg bestaand', u'Overleg ofbeelding'],
'ne': u'चित्र वार्ता',
'new': u'किपा खँलाबँला',
'nl': [u'Overleg bestand', u'Overleg afbeelding'],
'nn': u'Fildiskusjon',
'no': [u'Fildiskusjon', u'Bildediskusjon'],
'nso': u'Poledišano ya Seswantšho',
'nv': u'Eʼelyaaígíí baa yáshtiʼ',
'oc': [u'Discussion Fichièr', u'Discutida Imatge', u'Discussion Imatge'],
'or': u'ଫାଇଲ ଆଲୋଚନା',
'os': [u'Файлы тæрхон', u'Нывы тæрхон', u'Нывы тыххæй дискусси'],
'pa': [u'ਤਸਵੀਰ ਗੱਲ-ਬਾਤ', u'ਤਸਵੀਰ ਚਰਚਾ'],
'pcd': [u'Discussion fichier', u'Discussion Fichier', u'Discussion Image'],
'pdc': [u'Feil Dischbedutt', u'Datei Diskussion'],
'pfl': [u'Dadai Dischbediere', u'Datei Diskussion'],
'pi': u'पटिमा सम्भासित',
'pl': [u'Dyskusja pliku', u'Dyskusja grafiki'],
'pms': [u'Discussion dla figura', u'Discussioni immagine'],
'pnt': [u'Καλάτσεμαν αρχείονος', u'Καλάτσεμαν εικόνας'],
'ps': [u'د دوتنې خبرې اترې', u'د انځور خبرې اترې'],
'pt': [u'Ficheiro Discussão', u'Imagem Discussão', u'Arquivo Discussão'],
'qu': [u'Rikcha rimanakuy', u'Imagen Discusión'],
'rm': u'Datoteca discussiun',
'rmy': [u'Chitro vakyarimata', u'Discuţie Imagine', u'Discuţie Fişier'],
'ro': [u'Discuție Fișier', u'Discuţie Imagine', u'Discuţie Fişier'],
'ru': [u'Обсуждение файла', u'Обсуждение изображения'],
'rue': u'Діскузія ку файлу',
'sa': [u'चित्रसम्भाषणम्', u'चित्रसंभाषणं'],
'sah': [u'Билэ ырытыыта', u'Ойуу ырытыыта'],
'sc': [u'Cuntierra file', u'Immàgini contièndha'],
'scn': [u'Discussioni file', u'Discussioni mmàggini'],
'sd': u'عڪس بحث',
'se': u'Fiilaságastallan',
'sg': [u'Discussion fichier', u'Discussion Fichier', u'Discussion Image'],
'sh': u'Razgovor o datoteci',
'si': [u'ගොනුව සාකච්ඡාව', u'රූපය සාකච්ඡාව'],
'sk': [u'Diskusia k súboru', u'Diskusia k obrázku', u'Komentár k obrázku'],
'sl': u'Pogovor o sliki',
'sq': [u'Skeda diskutim', u'Figura diskutim'],
'sr': [u'Разговор о датотеци', u'Razgovor o slici', u'Разговор о слици'],
'srn': [u'Taki fu gefre', u'Overleg afbeelding'],
'stq': [u'Bielde Diskussion', u'Bild Diskussion'],
'su': u'Obrolan gambar',
'sv': [u'Fildiskussion', u'Bilddiskussion'],
'sw': [u'Majadiliano ya faili', u'Majadiliano faili'],
'szl': [u'Dyskusyjo plika', u'Dyskusja pliku'],
'ta': [u'படிமப் பேச்சு', u'உருவப் பேச்சு'],
'te': [u'దస్త్రంపై చర్చ', u'ఫైలుపై చర్చ', u'బొమ్మపై చర్చ'],
'tet': u'Diskusaun Imajen',
'tg': u'Баҳси акс',
'th': [u'คุยเรื่องไฟล์', u'คุยเรื่องภาพ'],
'tk': u'Faýl çekişme',
'tl': u'Usapang talaksan',
'tlh': u"nagh beQ ja'chuq",
'tpi': u'Toktok bilong fail',
'tr': [u'Dosya tartışma', u'Resim tartışma'],
'tt': [u'Файл бәхәсе', u'Обсуждение изображения', u'Обсуждение файла', u'Рәсем бәхәсе', u'Räsem bäxäse'],
'ty': [u'Discussion fichier', u'Discussion Fichier', u'Discussion Image'],
'udm': [u'Файл сярысь вераськон', u'Суред сярысь вераськон'],
'ug': u'ھۆججەت مۇنازىرىسى',
'uk': [u'Обговорення файлу', u'Обговорення зображення'],
'ur': [u'تبادلۂ خیال ملف', u'تبادلۂ خیال تصویر'],
'uz': [u'Fayl munozarasi', u'Tasvir munozarasi'],
'vec': [u'Discussion file', u'Discussion imagine'],
'vep': [u'Lodu failas', u'Image talk'],
'vi': [u'Thảo luận Tập tin', u'Thảo luận Hình'],
'vls': [u'Discuusje ofbeeldienge', u'Overleg afbeelding'],
'vo': [u'Ragivibespik', u'Magodibespik'],
'wa': [u'Imådje copene', u'Discussion Fichier', u'Discussion Image'],
'war': [u'Hiruhimangraw hiton paypay', u'Hiruhimangraw hiton fayl'],
'wo': [u'Waxtaani dencukaay', u'Discussion Image'],
'wuu': [u'文件讨论', u'图像对话', u'图像讨论', u'档案对话', u'档案讨论', u'文件对话'],
'xal': [u'Боомгин туск меткән', u'Зургин тускар ухалвр'],
'xmf': [u'ფაილის განხილვა', u'სურათი განხილვა'],
'yi': [u'טעקע רעדן', u'בילד רעדן'],
'yo': [u'Ọ̀rọ̀ fáìlì', u'Ọ̀rọ̀ àwòrán'],
'za': [u'文件讨论', u'图像对话', u'图像讨论', u'档案对话', u'档案讨论', u'文件对话'],
'zea': [u'Overleg plaetje', u'Overleg afbeelding'],
'zh': [u'File talk', u'图像对话', u'圖像對話', u'图像讨论', u'圖像討論', u'档案对话', u'檔案對話', u'档案讨论', u'檔案討論', u'文件对话', u'文件對話', u'文件讨论', u'文件討論'],
'zh-yue': [u'File talk', u'文件傾偈', u'MediaWiki 傾偈', u'檔 討論', u'档 讨论', u'檔案 討論', u'档案 讨论', u'圖 討論', u'图 讨论', u'圖像 討論', u'图像 讨论'],
},
8: {
'_default': u'MediaWiki',
'ab': u'Амедиавики',
'ace': [u'MediaWiki', u'AlatWiki'],
'am': u'መልዕክት',
'ar': u'ميدياويكي',
'arc': u'ܡܝܕܝܐܘܝܩܝ',
'arz': u'ميدياويكى',
'as': [u'মিডিয়াৱিকি', u'মেডিয়াৱিকি'],
'az': [u'MediaWiki', u'MediyaViki'],
'bg': u'МедияУики',
'bh': u'मीडियाविकि',
'bn': u'মিডিয়াউইকি',
'bpy': u'মিডিয়াউইকি',
'bs': [u'MediaWiki', u'MedijaViki'],
'ce': [u'MediaWiki', u'МедйаВики'],
'ckb': u'میدیاویکی',
'crh': [u'MediaViki', u'МедиаВики'],
'cy': u'MediaWici',
'dv': u'މީޑިއާވިކީ',
'eo': [u'MediaWiki', u'MediaVikio'],
'fa': u'مدیاویکی',
'fi': u'Järjestelmäviesti',
'fo': [u'MediaWiki', u'MidiaWiki'],
'glk': u'مدیاویکی',
'gu': u'મીડિયાવિકિ',
'he': u'מדיה ויקי',
'hi': [u'मीडियाविकि', u'मी'],
'ht': u'MedyaWiki',
'ig': [u'MidiaWiki', u'NkáWiki'],
'is': u'Melding',
'ka': u'მედიავიკი',
'kaa': [u'MediaWiki', u'МедиаУики', u'مەدياۋيكي'],
'kk': [u'МедиаУики', u'MedïaWïkï', u'مەدياۋيكي'],
'km': [u'មេឌាវិគី', u'មីឌាវិគី'],
'kn': u'ಮೀಡಿಯವಿಕಿ',
'ko': u'미디어위키',
'ks': u'میڈیاوکی',
'ksh': [u'MediaWiki', u'MedijaWikki'],
'kv': u'МедиаВики',
'lad': u'MedyaViki',
'lo': u'ມີເດຍວິກິ',
'mdf': u'МедиаВики',
'mk': u'МедијаВики',
'ml': [u'മീഡിയവിക്കി', u'മീ'],
'mn': u'МедиаВики',
'mr': u'मिडियाविकी',
'mt': [u'MediaWiki', u'MedjaWiki'],
'mwl': u'Biqui',
'mzn': [u'مدیاویکی', u'مهدیا ویکی', u'مهدیاویکی'],
'nah': u'Huiquimedia',
'ne': u'मीडियाविकि',
'new': u'मिडियाविकि',
'oc': [u'MediaWiki', u'Mediaòiqui'],
'or': u'ମିଡ଼ିଆଉଇକି',
'pa': [u'ਮੀਡੀਆਵਿਕੀ', u'ਮੀਡੀਆਵਿਕਿ'],
'pi': u'मीडियाविकि',
'ps': u'ميډياويکي',
'rmy': u'MediyaViki',
'sa': u'मिडीयाविकी',
'sd': u'ذريعات وڪي',
'si': [u'මාධ්යවිකි', u'විකිමාධ්ය'],
'sr': [u'Медијавики', u'MedijaViki', u'МедијаВики'],
'su': [u'MédiaWiki'],
'ta': u'மீடியாவிக்கி',
'te': u'మీడియావికీ',
'tg': u'Медиавики',
'th': u'มีเดียวิกิ',
'tr': [u'MediaWiki', u'MedyaViki'],
'tt': [u'МедиаВики', u'Медиа Вики'],
'ur': [u'میڈیاویکی', u'میڈیاوکی'],
'uz': [u'MediaWiki', u'MediyaViki'],
'vo': u'Sitanuns',
'war': u'MedyaWiki',
'xmf': u'მედიავიკი',
'yi': [u'מעדיעװיקי', u'מעדיעוויקי'],
},
9: {
'_default': u'MediaWiki talk',
'ab': [u'Амедиавики ахцәажәара', u'Обсуждение MediaWiki'],
'ace': [u'Marit MediaWiki', u'Marit AlatWiki', u'Pembicaraan MediaWiki', u'MediaWiki Pembicaraan'],
'af': u'MediaWikibespreking',
'ak': u'MediaWiki nkɔmbɔ',
'als': u'MediaWiki Diskussion',
'am': u'መልዕክት ውይይት',
'an': u'Descusión MediaWiki',
'ar': u'نقاش ميدياويكي',
'arc': u'ܡܡܠܠܐ ܕܡܝܕܝܐܘܝܩܝ',
'arz': u'نقاش ميدياويكى',
'as': [u'মিডিয়াৱিকি আলোচনা', u'মেডিয়াৱিকি বাৰ্তা', u'MediaWiki বার্তা'],
'ast': [u'MediaWiki alderique', u'MediaWiki discusión'],
'av': u'Обсуждение MediaWiki',
'ay': u'MediaWiki discusión',
'az': [u'MediaWiki müzakirəsi', u'MediyaViki müzakirəsi'],
'ba': [u'MediaWiki буйынса фекерләшеү', u'MediaWiki б-са фекер алышыу'],
'bar': [u'MediaWiki Dischkrian', u'MediaWiki Diskussion'],
'bat-smg': [u'MediaWiki aptarėms', u'MediaWiki aptarimas'],
'bcl': u'Olay sa MediaWiki',
'be': u'Размовы пра MediaWiki',
'be-x-old': u'Абмеркаваньне MediaWiki',
'bg': u'МедияУики беседа',
'bh': u'मीडियाविकि वार्ता',
'bjn': [u'Pamandiran MediaWiki', u'Pembicaraan MediaWiki'],
'bm': u'Discussion MediaWiki',
'bn': u'মিডিয়াউইকি আলোচনা',
'bpy': u'মিডিয়াউইকির য়্যারী',
'br': u'Kaozeadenn MediaWiki',
'bs': [u'MediaWiki razgovor', u'Razgovor o MedijaVikiju'],
'bug': [u'Pembicaraan MediaWiki', u'MediaWiki Pembicaraan'],
'bxr': [u'MediaWiki хэлэлсэхэ', u'Обсуждение MediaWiki'],
'ca': u'MediaWiki Discussió',
'cbk-zam': u'MediaWiki discusión',
'ce': [u'MediaWiki дийцаре', u'МедйаВики дийца', u'MediaWiki Дийцаре'],
'ceb': u'Hisgot sa MediaWiki',
'ckb': [u'وتووێژی میدیاویکی', u'لێدوانی میدیاویکی'],
'crh': [u'MediaViki muzakeresi', u'МедиаВики музакереси'],
'cs': [u'Diskuse k MediaWiki', u'MediaWiki diskuse'],
'csb': u'Diskùsëjô MediaWiki',
'cu': [u'MediaWiki бєсѣда', u'MediaWiki бесѣда'],
'cv': u'MediaWiki сӳтсе явмалли',
'cy': u'Sgwrs MediaWici',
'da': u'MediaWiki-diskussion',
'de': u'MediaWiki Diskussion',
'diq': [u'MediaWiki mesac', u'MediaWiki werênayış'],
'dsb': u'MediaWiki diskusija',
'dv': [u'މިޑިއާވިކީ ޚިޔާލު', u'މީޑިޔާވިކި ޚިޔާލު'],
'el': u'Συζήτηση MediaWiki',
'eml': u'Discussioni MediaWiki',
'eo': [u'MediaWiki-Diskuto', u'MediaWiki diskuto', u'MediaVikia diskuto'],
'es': u'MediaWiki discusión',
'et': u'MediaWiki arutelu',
'eu': u'MediaWiki eztabaida',
'fa': u'بحث مدیاویکی',
'ff': u'Discussion MediaWiki',
'fi': u'Keskustelu järjestelmäviestistä',
'fiu-vro': u'MediaWiki arotus',
'fo': [u'MediaWiki-kjak', u'MidiaWiki kjak'],
'fr': u'Discussion MediaWiki',
'frp': u'Discussion MediaWiki',
'frr': u'MediaWiki Diskussion',
'fur': u'Discussion MediaWiki',
'fy': u'MediaWiki oerlis',
'ga': u'Plé MediaWiki',
'gag': [u'MediaWiki dartışma', u'MediaWiki tartışma'],
'gan': u'MediaWiki・談詑',
'gd': u'Deasbaireachd MediaWiki',
'gl': u'Conversa MediaWiki',
'glk': u'بحث مدیاویکی',
'gn': u'MediaWiki myangekõi',
'gu': u'મીડિયાવિકિ ચર્ચા',
'gv': 'Resooney MediaWiki',
'haw': u'Kūkākūkā o MediaWiki',
'he': u'שיחת מדיה ויקי',
'hi': [u'मीडियाविकि वार्ता', u'मीवा'],
'hif': u'Mediawiki ke baat',
'hr': u'MediaWiki razgovor',
'hsb': u'MediaWiki diskusija',
'ht': u'Diskisyon MedyaWiki',
'hu': [u'MediaWiki-vita', u'MediaWiki vita'],
'hy': u'MediaWiki քննարկում',
'ia': u'Discussion MediaWiki',
'id': [u'Pembicaraan MediaWiki', u'MediaWiki Pembicaraan'],
'ie': u'MediaWiki Discussion',
'ig': [u'Okwu MidiaWiki', u'Okwu NkáWiki'],
'ilo': u'MediaWiki tungtungan',
'io': u'MediaWiki Debato',
'is': u'Meldingarspjall',
'it': u'Discussioni MediaWiki',
'ja': [u'MediaWiki・トーク', u'MediaWiki‐ノート'],
'jv': [u'Dhiskusi MediaWiki', u'MediaWiki Dhiskusi'],
'ka': [u'მედიავიკის განხილვა', u'მედიავიკი განხილვა'],
'kaa': [u'MediaWiki sa\'wbeti', u'МедиаУики талқылауы', u'مەدياۋيكي تالقىلاۋى'],
'kab': u'Amyannan n MediaWiki',
'kbd': [u'MediaWiki тепсэлъэхьыгъуэ', u'Обсуждение MediaWiki'],
'kk': [u'МедиаУики талқылауы', u'MedïaWïkï talqılawı', u'مەدياۋيكي تالقىلاۋى'],
'kl': [u'Mediawikip oqalliffia', u'MediaWiki-diskussion'],
'km': [u'ការពិភាក្សាអំពីមេឌាវិគី', u'មីឌាវិគី-ពិភាក្សា'],
'kn': u'ಮೀಡೀಯವಿಕಿ ಚರ್ಚೆ',
'ko': [u'미디어위키토론', u'MediaWiki토론'],
'koi': u'Обсуждение MediaWiki',
'krc': u'MediaWiki-ни сюзюу',
'ks': u'میڈیاوکی بَحَژ',
'ksh': [u'MediaWiki Klaaf', u'MedijaWikki Klaaf'],
'ku': [u'Gotûbêja MediaWiki', u'MediaWiki nîqaş'],
'kv': [u'МедиаВики донъялӧм', u'Обсуждение MediaWiki'],
'kw': [u'Keskows MediaWiki', u'Cows MediaWiki', u'Kescows MediaWiki'],
'la': u'Disputatio MediaWiki',
'lad': [u'Diskusyón de MedyaViki', u'MediaWiki Discusión', u'Diskussión de Xabblón'],
'lb': u'MediaWiki Diskussioun',
'lbe': u'MediaWikiлиясса ихтилат',
'lez': [u'MediaWiki веревирд авун', u'Обсуждение MediaWiki'],
'li': u'Euverlèk MediaWiki',
'lij': [u'Discûscioîn MediaWiki', u'Discussioni MediaWiki'],
'lmo': [u'Ciciarada MediaWiki', u'Discussioni MediaWiki'],
'ln': u'Discussion MediaWiki',
'lo': u'ສົນທະນາກ່ຽວກັບມີເດຍວິກິ',
'lt': u'MediaWiki aptarimas',
'ltg': u'Sprīža ap MediaWiki',
'lv': u'MediaWiki diskusija',
'map-bms': [u'Dhiskusi MediaWiki', u'MediaWiki Dhiskusi'],
'mdf': [u'МедиаВики корхнема', u'Обсуждение MediaWiki'],
'mg': [u'Dinika amin\'ny MediaWiki', u'Discussion MediaWiki'],
'mhr': u'Обсуждение MediaWiki',
'min': [u'Rundiang MediaWiki', u'Pembicaraan MediaWiki'],
'mk': u'Разговор за МедијаВики',
'ml': [u'മീഡിയവിക്കി സംവാദം', u'മീസം'],
'mn': u'МедиаВикигийн хэлэлцүүлэг',
'mr': u'मिडियाविकी चर्चा',
'mrj': [u'MediaWiki-м кӓнгӓшӹмӓш', u'MediaWiki кӓнгӓшӹмӓш'],
'ms': [u'Perbincangan MediaWiki', u'MediaWiki Perbualan'],
'mt': [u'Diskussjoni MediaWiki', u'Diskuti MedjaWiki'],
'mwl': [u'Cumbersa Biqui', u'MediaWiki Discussão'],
'myv': u'MediaWiki-нь кортамось',
'mzn': [u'مدیاویکی گپ', u'مهدیاویکی گپ', u'بحث مدیاویکی', u'مهدیا ویکی گپ'],
'nah': [u'Huiquimedia tēixnāmiquiliztli', u'MediaWiki Discusión'],
'nap': [u'MediaWiki chiàcchiera', u'Discussioni MediaWiki'],
'nds': [u'MediaWiki Diskuschoon', u'MediaWiki Diskussion'],
'nds-nl': u'Overleg MediaWiki',
'ne': u'मीडियाविकि वार्ता',
'new': u'मिडियाविकि खँलाबँला',
'nl': u'Overleg MediaWiki',
'nn': u'MediaWiki-diskusjon',
'no': u'MediaWiki-diskusjon',
'nso': u'Poledišano ya MediaWiki',
'nv': u'MediaWiki baa yáshtiʼ',
'oc': [u'Discussion MediaWiki', u'Discussion Mediaòiqui', u'Discutida Mediaòiqui'],
'or': u'ମିଡ଼ିଆଉଇକି ଆଲୋଚନା',
'os': [u'MediaWiki-йы тæрхон', u'Дискусси MediaWiki', u'Тæрхон MediaWiki'],
'pa': [u'ਮੀਡੀਆਵਿਕੀ ਗੱਲ-ਬਾਤ', u'ਮੀਡੀਆਵਿਕਿ ਚਰਚਾ'],
'pcd': u'Discussion MediaWiki',
'pdc': [u'MediaWiki Dischbedutt', u'MediaWiki Diskussion'],
'pfl': [u'MediaWiki Dischbediere', u'MediaWiki Diskussion'],
'pi': u'मीडियाविकि सम्भासित',
'pl': u'Dyskusja MediaWiki',
'pms': u'Discussion dla MediaWiki',
'ps': u'د ميډياويکي خبرې اترې',
'pt': u'MediaWiki Discussão',
'qu': u'MediaWiki rimanakuy',
'rm': u'MediaWiki discussiun',
'rmy': [u'MediyaViki vakyarimata', u'Discuţie MediaWiki'],
'ro': [u'Discuție MediaWiki', u'Discuţie MediWiki', u'Discuţie MediaWiki'],
'ru': u'Обсуждение MediaWiki',
'rue': [u'Діскузія ку MediaWiki', u'Дізкузія ку MediaWiki'],
'sa': [u'मिडियाविकीसम्भाषणम्', u'मिडियाविकीसंभाषणं'],
'sah': u'Обсуждение MediaWiki',
'sc': u'Cuntierra MediaWiki',
'scn': u'Discussioni MediaWiki',
'sd': u'ذريعات وڪي بحث',
'se': u'MediaWiki-ságastallan',
'sg': u'Discussion MediaWiki',
'sh': u'Mediawiki razgovor',
'si': [u'මාධ්යවිකි සාකච්ඡාව', u'විකිමාධ්ය සාකච්ඡාව'],
'sk': [u'Diskusia k MediaWiki', u'Komentár k MediaWiki'],
'sl': u'Pogovor o MediaWiki',
'sq': u'MediaWiki diskutim',
'sr': [u'Разговор о Медијавикију', u'Razgovor o MedijaVikiju', u'Разговор о МедијаВикију'],
'srn': [u'Taki fu MediaWiki', u'Overleg MediaWiki'],
'stq': u'MediaWiki Diskussion',
'su': [u'Obrolan MédiaWiki', u'Obrolan MediaWiki'],
'sv': [u'MediaWiki-diskussion', u'MediaWiki diskussion'],
'sw': [u'Majadiliano ya MediaWiki', u'MediaWiki majadiliano'],
'szl': [u'Dyskusyjo MediaWiki', u'Dyskusja MediaWiki'],
'ta': u'மீடியாவிக்கி பேச்சு',
'te': u'మీడియావికీ చర్చ',
'tet': u'Diskusaun MediaWiki',
'tg': u'Баҳси медиавики',
'th': u'คุยเรื่องมีเดียวิกิ',
'tk': u'MediaWiki çekişme',
'tl': u'Usapang MediaWiki',
'tlh': u"MediaWiki ja'chuq",
'tpi': u'Toktok bilong mediawiki',
'tr': [u'MediaWiki tartışma', u'MedyaViki tartışma'],
'tt': [u'МедиаВики бәхәсе', u'Обсуждение MediaWiki', u'Медиа Вики бәхәсе', u'MediaWiki bäxäse'],
'ty': u'Discussion MediaWiki',
'udm': u'MediaWiki сярысь вераськон',
'ug': u'MediaWiki مۇنازىرىسى',
'uk': u'Обговорення MediaWiki',
'ur': [u'تبادلۂ خیال میڈیاویکی', u'تبادلۂ خیال میڈیاوکی'],
'uz': [u'MediaWiki munozarasi', u'MediyaViki munozarasi'],
'vec': u'Discussion MediaWiki',
'vep': u'Lodu MediaWikiš',
'vi': u'Thảo luận MediaWiki',
'vls': u'Discuusje MediaWiki',
'vo': u'Bespik dö sitanuns',
'wa': u'MediaWiki copene',
'war': [u'Hiruhimangraw hiton MedyaWiki', u'Hiruhimangraw hiton MediaWiki'],
'wo': [u'Waxtaani MediaWiki', u'Discussion MediaWiki'],
'wuu': u'MediaWiki讨论',
'xal': [u'MediaWiki туск меткән', u'MediaWiki тускар ухалвр'],
'xmf': [u'მედიავიკის განხილვა', u'მედიავიკი განხილვა'],
'yi': [u'מעדיעװיקי רעדן', u'מעדיעוויקי רעדן'],
'yo': u'Ọ̀rọ̀ mediaWiki',
'za': u'MediaWiki讨论',
'zea': u'Overleg MediaWiki',
'zh': u'MediaWiki talk',
'zh-yue': u'MediaWiki talk',
},
10: {
'_default': u'Template',
'ab': [u'Ашаблон', u'Шаблон'],
'ace': [u'Pola', u'Templat'],
'af': u'Sjabloon',
'ak': u'Şablon',
'als': u'Vorlage',
'am': [u'መለጠፊያ', u'መልጠፊያ'],
'an': u'Plantilla',
'ang': u'Bysen',
'ar': u'قالب',
'arc': u'ܩܠܒܐ',
'arz': u'قالب',
'as': [u'সাঁচ', u'साँचा'],
'ast': [u'Plantía', u'Plantilla'],
'av': u'Шаблон',
'ay': u'Plantilla',
'az': u'Şablon',
'ba': u'Ҡалып',
'bar': [u'Vorlog', u'Vorlage'],
'bat-smg': [u'Šabluons', u'Šablonas'],
'bcl': u'Plantilya',
'be': u'Шаблон',
'be-x-old': u'Шаблён',
'bg': u'Шаблон',
'bh': u'टेम्पलेट',
'bjn': [u'Citakan', u'Templat'],
'bm': u'Modèle',
'bn': u'টেমপ্লেট',
'bpy': u'মডেল',
'br': u'Patrom',
'bs': u'Šablon',
'bug': u'Templat',
'bxr': u'Загбар',
'ca': u'Plantilla',
'cbk-zam': u'Plantilla',
'cbs': u'Szablóna',
'ce': [u'Кеп', u'Дакъан', u'Куцкеп'],
'ceb': u'Plantilya',
'ckb': [u'داڕێژە', u'قاڵب'],
'crh': [u'Şablon', u'Шаблон'],
'cs': u'Šablona',
'csb': u'Szablóna',
'cu': [u'Обраꙁьць', u'Шаблон', u'Образьць'],
'cv': u'Шаблон',
'cy': u'Nodyn',
'da': u'Skabelon',
'de': u'Vorlage',
'diq': u'Şablon',
'dsb': u'Pśedłoga',
'dv': u'ފަންވަތް',
'el': u'Πρότυπο',
'eo': u'Ŝablono',
'es': u'Plantilla',
'et': u'Mall',
'eu': u'Txantiloi',
'ext': u'Prantilla',
'fa': u'الگو',
'ff': u'Modèle',
'fi': u'Malline',
'fiu-vro': u'Näüdüs',
'fo': u'Fyrimynd',
'fr': u'Modèle',
'frp': u'Modèlo',
'frr': u'Vorlage',
'fur': u'Model',
'fy': u'Berjocht',
'ga': [u'Teimpléad', u'Múnla'],
'gag': u'Şablon',
'gan': u'模板',
'gd': u'Teamplaid',
'gl': u'Modelo',
'glk': u'الگو',
'gn': u'Tembiecharã',
'got': u'𐍆𐌰𐌿𐍂𐌰𐌼𐌴𐌻𐌴𐌹𐌽𐍃',
'gu': u'ઢાંચો',
'gv': u'Clowan',
'haw': u'Anakuhi',
'he': u'תבנית',
'hi': u'साँचा',
'hr': u'Predložak',
'hsb': u'Předłoha',
'ht': u'Modèl',
'hu': u'Sablon',
'hy': u'Կաղապար',
'ia': u'Patrono',
'id': u'Templat',
'ie': u'Avise',
'ig': u'Àtụ',
'ilo': u'Plantilia',
'io': [u'Shablono', u'Modelo'],
'is': u'Snið',
'ja': u'テンプレート',
'jv': u'Cithakan',
'ka': u'თარგი',
'kaa': [u'Shablon', u'Үлгі', u'ٷلگٸ', u'ٴۇلگٴى'],
'kab': [u'Talɣa', u'Talγa'],
'kbd': u'Шаблон',
'kk': [u'Үлгі', u'Ülgi', u'ٷلگٸ', u'ٴۇلگٴى'],
'kl': [u'Ilisserut', u'Skabelon'],
'km': u'ទំព័រគំរូ',
'kn': u'ಟೆಂಪ್ಲೇಟು',
'ko': u'틀',
'koi': u'Шаблон',
'krc': u'Шаблон',
'ks': u'فرما',
'ksh': u'Schablon',
'ku': u'Şablon',
'kv': u'Шаблон',
'kw': [u'Skantlyn', u'Scantlyn'],
'ky': u'Калып',
'la': u'Formula',
'lad': [u'Xablón', u'Plantilla', u'Xabblón'],
'lb': u'Schabloun',
'lbe': u'Шаблон',
'lez': u'Шаблон',
'li': u'Sjabloon',
'lmo': [u'Mudel', u'Model'],
'ln': u'Modèle',
'lo': u'ແມ່ແບບ',
'lt': u'Šablonas',
'ltg': u'Taiss',
'lv': u'Veidne',
'map-bms': u'Cithakan',
'mdf': u'Шаблон',
'mg': [u'Endrika', u'Modèle'],
'mhr': [u'Кышкар', u'Шаблон', u'Ямдылык'],
'min': u'Templat',
'mk': u'Шаблон',
'ml': [u'ഫലകം', u'ഫ'],
'mn': u'Загвар',
'mr': u'साचा',
'mrj': u'Шаблон',
'ms': u'Templat',
'mt': u'Mudell',
'mwl': [u'Modelo', u'Predefinição'],
'myv': u'ЛопаПарцун',
'mzn': [u'شابلون', u'الگو'],
'nah': [u'Nemachiyōtīlli', u'Plantilla'],
'nap': u'Modello',
'nds': [u'Vörlaag', u'Vorlage'],
'nds-nl': [u'Mal', u'Sjabloon'],
'ne': u'ढाँचा',
'nl': u'Sjabloon',
'nn': u'Mal',
'no': u'Mal',
'nv': u'Bee álnééhí',
'oc': u'Modèl',
'or': [u'ଛାଞ୍ଚ', u'ଟେଁପଲେଟ', u'ଟେମ୍ପଲେଟ'],
'os': [u'Хуызæг', u'Шаблон'],
'pa': [u'ਫਰਮਾ', u'ਨਮੂਨਾ'],
'pcd': u'Modèle',
'pdc': [u'Moddel', u'Vorlage'],
'pfl': [u'Vorlach', u'Vorlage'],
'pi': u'पटिरूप',
'pl': u'Szablon',
'pms': u'Stamp',
'pnt': u'Πρότυπον',
'ps': u'کينډۍ',
'pt': u'Predefinição',
'qu': u'Plantilla',
'rm': u'Model',
'rmy': u'Sikavno',
'ro': u'Format',
'ru': u'Шаблон',
'rue': u'Шаблона',
'sa': [u'फलकम्', u'बिंबधर'],
'sah': u'Халыып',
'sd': u'سانچو',
'se': u'Málle',
'sg': u'Modèle',
'sh': u'Šablon',
'si': u'සැකිල්ල',
'sk': u'Šablóna',
'sl': u'Predloga',
'sq': u'Stampa',
'sr': [u'Шаблон', u'Šablon'],
'srn': [u'Ankra', u'Sjabloon'],
'stq': u'Foarloage',
'su': u'Citakan',
'sv': u'Mall',
'sw': u'Kigezo',
'szl': [u'Muster', u'Szablon'],
'ta': u'வார்ப்புரு',
'te': u'మూస',
'tg': u'Шаблон',
'th': u'แม่แบบ',
'tk': u'Şablon',
'tl': [u'Padron', u'Suleras'],
'tlh': u"chen'ay'",
'tpi': u'Templet',
'tr': u'Şablon',
'tt': [u'Калып', u'Үрнәк', u'Шаблон', u'Ürnäk'],
'ty': u'Modèle',
'udm': u'Шаблон',
'ug': u'قېلىپ',
'uk': u'Шаблон',
'ur': u'سانچہ',
'uz': [u'Andoza', u'Shablon'],
'vec': u'Modèl',
'vep': u'Šablon',
'vi': [u'Bản mẫu', u'Tiêu bản'],
'vls': u'Patrôon',
'vo': u'Samafomot',
'wa': u'Modele',
'war': u'Batakan',
'wo': [u'Royuwaay', u'Modèle'],
'wuu': u'模板',
'xal': [u'Кевләр', u'Зура'],
'xmf': u'თარგი',
'yi': u'מוסטער',
'yo': u'Àdàkọ',
'za': u'模板',
'zea': u'Sjabloon',
'zh': [u'Template', u'T', u'模板', u'样板', u'樣板'],
'zh-yue': [u'Template', u'T', u'模'],
},
11: {
'_default': u'Template talk',
'ab': [u'Ашаблон ахцәажәара', u'Обсуждение шаблона'],
'ace': [u'Marit Pola', u'Pembicaraan Templat', u'Templat Pembicaraan'],
'af': u'Sjabloonbespreking',
'ak': u'Şablon nkɔmbɔ',
'als': u'Vorlage Diskussion',
'am': [u'መለጠፊያ ውይይት', u'መልጠፊያ ውይይት'],
'an': u'Descusión plantilla',
'ang': u'Bysengesprec',
'ar': u'نقاش القالب',
'arc': u'ܡܡܠܠܐ ܕܩܠܒܐ',
'arz': u'نقاش القالب',
'as': [u'সাঁচ বাৰ্তা', u'साँचा वार्ता', u'সাঁচ বার্তা'],
'ast': [u'Plantía alderique', u'Plantilla discusión'],
'av': u'Обсуждение шаблона',
'ay': u'Plantilla discusión',
'az': u'Şablon müzakirəsi',
'ba': [u'Ҡалып буйынса фекерләшеү', u'Ҡалып б-са фекер алышыу'],
'bar': [u'Vorlog Dischkrian', u'Vorlage Diskussion'],
'bat-smg': [u'Šabluona aptarėms', u'Šablono aptarimas'],
'bcl': u'Olay sa plantilya',
'be': u'Размовы пра шаблон',
'be-x-old': u'Абмеркаваньне шаблёну',
'bg': u'Шаблон беседа',
'bh': u'टेम्पलेट वार्ता',
'bjn': [u'Pamandiran Citakan', u'Pembicaraan Templat'],
'bm': [u'Discussion modèle', u'Discussion Modèle'],
'bn': u'টেমপ্লেট আলোচনা',
'bpy': u'মডেলর য়্যারী',
'br': u'Kaozeadenn Patrom',
'bs': u'Razgovor o šablonu',
'bug': [u'Pembicaraan Templat', u'Templat Pembicaraan'],
'bxr': [u'Загбар хэлэлсэхэ', u'Обсуждение шаблона'],
'ca': u'Plantilla Discussió',
'cbk-zam': u'Plantilla discusión',
'ce': [u'Кепан дийцаре', u'Дакъан дийца', u'Куцкеп дийцаре'],
'ceb': u'Hisgot sa Plantilya',
'ckb': [u'وتووێژی داڕێژە', u'لێدوانی قاڵب', u'لێدوانی داڕێژە'],
'crh': [u'Şablon muzakeresi', u'Шаблон музакереси'],
'cs': [u'Diskuse k šabloně', u'Šablona diskuse'],
'csb': u'Diskùsëjô Szablónë',
'cu': [u'Обраꙁьца бєсѣда', u'Образьца бесѣда'],
'cv': u'Шаблона сӳтсе явмалли',
'cy': u'Sgwrs Nodyn',
'da': u'Skabelondiskussion',
'de': u'Vorlage Diskussion',
'diq': [u'Şablon mesac', u'Şablon werênayış'],
'dsb': u'Diskusija wó pśedłoze',
'dv': [u'ފަންވަތުގެ ޚިޔާލު', u'ފަންވަތް ޚިޔާލު'],
'el': u'Συζήτηση προτύπου',
'eml': u'Discussioni template',
'eo': [u'Ŝablono-Diskuto', u'Ŝablona diskuto'],
'es': u'Plantilla discusión',
'et': u'Malli arutelu',
'eu': u'Txantiloi eztabaida',
'fa': u'بحث الگو',
'ff': [u'Discussion modèle', u'Discussion Modèle'],
'fi': u'Keskustelu mallineesta',
'fiu-vro': u'Näüdüse arotus',
'fo': [u'Fyrimyndakjak', u'Fyrimynd kjak'],
'fr': [u'Discussion modèle', u'Discussion Modèle'],
'frp': [u'Discussion modèlo', u'Discussion Modèlo'],
'frr': u'Vorlage Diskussion',
'fur': u'Discussion model',
'fy': u'Berjocht oerlis',
'ga': [u'Plé teimpléid', u'Plé múnla'],
'gag': [u'Şablon dartışma', u'Şablon tartışma'],
'gan': u'模板・談詑',
'gd': u'Deasbaireachd na teamplaid',
'gl': [u'Conversa modelo', u'Conversa Modelo'],
'glk': u'بحث الگو',
'gn': u'Tembiecharã myangekõi',
'got': u'𐍆𐌰𐌿𐍂𐌰𐌼𐌴𐌻𐌴𐌹𐌽𐌰𐌹𐍃 𐌲𐌰𐍅𐌰𐌿𐍂𐌳𐌾𐌰',
'gu': u'ઢાંચાની ચર્ચા',
'gv': u'Resooney clowan',
'haw': u'Kūkākūkā o anakuhi',
'he': u'שיחת תבנית',
'hi': u'साँचा वार्ता',
'hif': u'Template ke baat',
'hr': u'Razgovor o predlošku',
'hsb': u'Diskusija k předłoze',
'ht': u'Diskisyon Modèl',
'hu': [u'Sablonvita', u'Sablon vita'],
'hy': u'Կաղապարի քննարկում',
'ia': u'Discussion Patrono',
'id': [u'Pembicaraan Templat', u'Templat Pembicaraan'],
'ie': u'Avise Discussion',
'ig': u'Okwu àtụ',
'ilo': u'Plantilia tungtungan',
'io': [u'Shablono Debato', u'Modelo Debato'],
'is': u'Sniðaspjall',
'it': u'Discussioni template',
'ja': [u'テンプレート・トーク', u'Template‐ノート'],
'jv': [u'Dhiskusi Cithakan', u'Cithakan Dhiskusi'],
'ka': [u'თარგის განხილვა', u'თარგი განხილვა'],
'kaa': [u'Shablon sa\'wbeti', u'Үлгі талқылауы', u'ٷلگٸ تالقىلاۋى', u'ٴۇلگٴى تالقىلاۋى'],
'kab': [u'Amyannan n talɣa', u'Amyannan n talγa'],
'kbd': [u'Шаблон тепсэлъэхьыгъуэ', u'Обсуждение шаблона'],
'kk': [u'Үлгі талқылауы', u'Ülgi talqılawı', u'ٷلگٸ تالقىلاۋى', u'ٴۇلگٴى تالقىلاۋى'],
'kl': [u'Ilisserummi oqallinneq', u'Skabelondiskussion'],
'km': [u'ការពិភាក្សាអំពីទំព័រគំរូ', u'ទំព័រគំរូ-ពិភាក្សា'],
'kn': u'ಟೆಂಪ್ಲೇಟು ಚರ್ಚೆ',
'ko': u'틀토론',
'koi': u'Обсуждение шаблона',
'krc': u'Шаблонну сюзюу',
'ks': u'فرما بَحَژ',
'ksh': u'Schablone Klaaf',
'ku': [u'Gotûbêja şablonê', u'Şablon nîqaş'],
'kv': [u'Шаблон донъялӧм', u'Обсуждение шаблона'],
'kw': [u'Keskows Skantlyn', u'Cows Scantlyn', u'Kescows Skantlyn'],
'la': u'Disputatio Formulae',
'lad': [u'Diskusyón de Xablón', u'Plantilla Discusión', u'Diskusyón de Xabblón'],
'lb': u'Schabloun Diskussioun',
'lbe': u'Шаблондалиясса ихтилат',
'lez': [u'Шаблон веревирд авун', u'Обсуждение шаблона'],
'li': u'Euverlèk sjabloon',
'lij': [u'Discûscioîn template', u'Discussioni template'],
'lmo': [u'Ciciarada Mudel', u'Discussioni template', u'Ciciarada Model'],
'ln': [u'Discussion modèle', u'Discussion Modèle'],
'lo': u'ສົນທະນາກ່ຽວກັບແມ່ແບບ',
'lt': u'Šablono aptarimas',
'ltg': u'Sprīža ap taisu',
'lv': u'Veidnes diskusija',
'map-bms': [u'Dhiskusi Cithakan', u'Cithakan Dhiskusi'],
'mdf': [u'Шаблон корхнема', u'Обсуждение шаблона'],
'mg': [u'Dinika amin\'ny endrika', u'Discussion Modèle'],
'mhr': [u'Кышкар шотышто каҥашымаш', u'Обсуждение шаблона', u'Ямдылык шотышто каҥашымаш', u'Ямдылыкын каҥашымаш'],
'min': [u'Rundiang Templat', u'Pembicaraan Templat'],
'mk': u'Разговор за шаблон',
'ml': [u'ഫലകത്തിന്റെ സംവാദം', u'ഫസം'],
'mn': u'Загварын хэлэлцүүлэг',
'mr': u'साचा चर्चा',
'mrj': [u'Шаблоным кӓнгӓшӹмӓш', u'Шаблон кӓнгӓшӹмӓш'],
'ms': [u'Perbincangan templat', u'Perbualan Templat'],
'mt': [u'Diskussjoni mudell', u'Diskuti template'],
'mwl': [u'Cumbersa Modelo', u'Predefinição Discussão'],
'myv': u'ЛопаПарцундо кортамось',
'mzn': [u'شابلون گپ', u'بحث الگو'],
'nah': [u'Nemachiyōtīlli tēixnāmiquiliztli', u'Plantilla Discusión'],
'nap': [u'Modello chiàcchiera', u'Discussioni template'],
'nds': [u'Vörlaag Diskuschoon', u'Vorlage Diskussion'],
'nds-nl': [u'Overleg mal', u'Overleg sjabloon'],
'ne': u'ढाँचा वार्ता',
'nl': u'Overleg sjabloon',
'nn': u'Maldiskusjon',
'no': u'Maldiskusjon',
'nso': u'Poledišano ya Template',
'nv': u'Bee álnééhí baa yáshtiʼ',
'oc': [u'Discussion Modèl', u'Discutida Modèl'],
'or': [u'ଛାଞ୍ଚ ଆଲୋଚନା', u'ଟେଁପଲେଟ ଆଲୋଚନା', u'ଟେମ୍ପଲେଟ ଆଲୋଚନା'],
'os': [u'Хуызæджы тæрхон', u'Шаблоны тæрхон', u'Шаблоны тыххæй дискусси'],
'pa': [u'ਫਰਮਾ ਗੱਲ-ਬਾਤ', u'ਨਮੂਨਾ ਚਰਚਾ'],
'pcd': [u'Discussion modèle', u'Discussion Modèle'],
'pdc': [u'Moddel Dischbedutt', u'Vorlage Diskussion'],
'pfl': [u'Vorlach Dischbediere', u'Vorlage Diskussion'],
'pi': u'पटिरूप सम्भासित',
'pl': u'Dyskusja szablonu',
'pms': u'Discussion dlë stamp',
'pnt': u'Καλάτσεμαν πρότυπι',
'ps': u'د کينډۍ خبرې اترې',
'pt': u'Predefinição Discussão',
'qu': u'Plantilla rimanakuy',
'rm': u'Model discussiun',
'rmy': [u'Sikavno vakyarimata', u'Discuţie Format'],
'ro': [u'Discuție Format', u'Discuţie Format'],
'ru': u'Обсуждение шаблона',
'rue': u'Діскузія ку шаблонї',
'sa': [u'फलकस्य सम्भाषणम्', u'बिंबधर संभाषणं'],
'sah': u'Халыып ырытыыта',
'sc': u'Cuntierra template',
'scn': [u'Discussioni template', u'Discussioni Template'],
'sd': u'سنچو بحث',
'se': u'Málleságastallan',
'sg': [u'Discussion modèle', u'Discussion Modèle'],
'sh': u'Razgovor o šablonu',
'si': u'සැකිලි සාකච්ඡාව',
'sk': u'Diskusia k šablóne',
'sl': u'Pogovor o predlogi',
'sq': u'Stampa diskutim',
'sr': [u'Разговор о шаблону', u'Razgovor o šablonu'],
'srn': [u'Taki fu ankra', u'Overleg sjabloon'],
'stq': u'Foarloage Diskussion',
'su': u'Obrolan citakan',
'sv': u'Malldiskussion',
'sw': [u'Majadiliano ya kigezo', u'Kigezo majadiliano'],
'szl': [u'Dyskusyjo mustra', u'Dyskusja szablonu'],
'ta': u'வார்ப்புரு பேச்சு',
'te': u'మూస చర్చ',
'tet': u'Diskusaun Template',
'tg': u'Баҳси шаблон',
'th': u'คุยเรื่องแม่แบบ',
'tk': u'Şablon çekişme',
'tl': [u'Usapang padron', u'Usapang suleras'],
'tlh': u"chen'ay' ja'chuq",
'tpi': u'Toktok bilong templet',
'tr': u'Şablon tartışma',
'tt': [u'Калып бәхәсе', u'Үрнәк бәхәсе', u'Обсуждение шаблона', u'Шаблон бәхәсе', u'Ürnäk bäxäse'],
'ty': [u'Discussion modèle', u'Discussion Modèle'],
'udm': u'Шаблон сярысь вераськон',
'ug': u'قېلىپ مۇنازىرىسى',
'uk': [u'Обговорення шаблону', u'Обговорення шаблона'],
'ur': u'تبادلۂ خیال سانچہ',
'uz': [u'Andoza munozarasi', u'Shablon munozarasi'],
'vec': [u'Discussion modèl', u'Discussion template'],
'vep': u'Lodu šablonas',
'vi': [u'Thảo luận Bản mẫu', u'Thảo luận Tiêu bản'],
'vls': u'Discuusje patrôon',
'vo': u'Samafomotibespik',
'wa': [u'Modele copene', u'Discussion Modèle'],
'war': u'Hiruhimangraw hiton batakan',
'wo': [u'Waxtaani royuwaay', u'Discussion Modèle'],
'wuu': [u'模板讨论', u'模板对话'],
'xal': [u'Зуран туск меткән', u'Зуран тускар ухалвр'],
'xmf': [u'თარგის განხილვა', u'თარგი განხილვა'],
'yi': u'מוסטער רעדן',
'yo': u'Ọ̀rọ̀ àdàkọ',
'za': [u'模板讨论', u'模板对话'],
'zea': u'Overleg sjabloon',
'zh': [u'Template talk', u'模板对话', u'模板對話', u'模板讨论', u'模板討論', u'样板对话', u'樣板對話', u'样板讨论', u'樣板討論'],
'zh-yue': [u'Template talk', u'模傾偈', u'模 討論', u'模 讨论'],
},
12: {
'_default': u'Help',
'ab': [u'Ацхыраара', u'Справка'],
'ace': [u'Beunantu', u'Bantuan'],
'af': u'Hulp',
'als': u'Hilfe',
'am': u'እርዳታ',
'an': u'Aduya',
'ar': u'مساعدة',
'arc': u'ܥܘܕܪܢܐ',
'arz': u'مساعدة',
'as': u'সহায়',
'ast': [u'Aida', u'Ayuda'],
'av': u'Справка',
'ay': u'Ayuda',
'az': u'Kömək',
'ba': u'Белешмә',
'bar': [u'Huif', u'Hilfe'],
'bat-smg': [u'Pagelba', u'Pagalba'],
'bcl': u'Tabang',
'be': u'Даведка',
'be-x-old': u'Дапамога',
'bg': u'Помощ',
'bh': u'मदद',
'bjn': [u'Patulung', u'Bantuan'],
'bm': u'Aide',
'bn': u'সাহায্য',
'bpy': u'পাংলাক',
'br': u'Skoazell',
'bs': u'Pomoć',
'bug': u'Bantuan',
'bxr': [u'Туһаламжа', u'Справка'],
'ca': u'Ajuda',
'cbk-zam': u'Ayuda',
'cbs': u'Pòmòc',
'ce': [u'ГӀо', u'ГІо', u'Гlо'],
'ceb': u'Tabang',
'ch': u'Ayudo',
'ckb': u'یارمەتی',
'crh': [u'Yardım', u'Ярдым'],
'cs': u'Nápověda',
'csb': u'Pòmòc',
'cu': u'Помощь',
'cv': u'Пулăшу',
'cy': u'Cymorth',
'da': u'Hjælp',
'de': u'Hilfe',
'diq': [u'Peşti', u'Desteg'],
'dsb': u'Pomoc',
'dv': u'އެހީ',
'el': u'Βοήθεια',
'eml': u'Aiuto',
'eo': u'Helpo',
'es': u'Ayuda',
'et': u'Juhend',
'eu': u'Laguntza',
'fa': u'راهنما',
'ff': u'Aide',
'fi': u'Ohje',
'fiu-vro': u'Oppus',
'fo': u'Hjálp',
'fr': u'Aide',
'frp': u'Éde',
'frr': u'Hilfe',
'fur': u'Jutori',
'fy': u'Hulp',
'ga': u'Cabhair',
'gag': u'Yardım',
'gan': u'幫助',
'gd': u'Cobhair',
'gl': u'Axuda',
'glk': u'راهنما',
'gn': u'Pytyvõ',
'got': u'𐌷𐌹𐌻𐍀𐌰',
'gu': u'મદદ',
'gv': u'Cooney',
'haw': u'Kōkua',
'he': u'עזרה',
'hi': u'सहायता',
'hif': u'madat',
'hr': u'Pomoć',
'hsb': u'Pomoc',
'ht': u'Èd',
'hu': u'Segítség',
'hy': u'Օգնություն',
'ia': u'Adjuta',
'id': u'Bantuan',
'ie': u'Auxilie',
'ig': [u'Nkwadọ', u'Nkwádọ'],
'ilo': u'Tulong',
'io': u'Helpo',
'is': u'Hjálp',
'it': u'Aiuto',
'ja': u'ヘルプ',
'jv': u'Pitulung',
'ka': u'დახმარება',
'kaa': [u'Anıqlama', u'Анықтама', u'انىقتاما'],
'kab': u'Tallat',
'kbd': [u'ДэӀэпыкъуэгъуэ', u'Справка'],
'kg': u'Lusadisu',
'kk': [u'Анықтама', u'Anıqtama', u'انىقتاما'],
'kl': [u'Ikiuutit', u'Hjælp'],
'km': u'ជំនួយ',
'kn': u'ಸಹಾಯ',
'ko': u'도움말',
'koi': u'Справка',
'krc': u'Болушлукъ',
'ks': u'پَلزُن',
'ksh': [u'Hölp', u'Hülp'],
'ku': u'Alîkarî',
'kv': u'Справка',
'kw': u'Gweres',
'ky': u'Жардам',
'la': u'Auxilium',
'lad': [u'Ayudo', u'Ayuda'],
'lb': u'Hëllef',
'lbe': u'Кумаг',
'lez': u'Справка',
'lij': [u'Agiûtto', u'Aiuto'],
'lmo': [u'Jüt', u'Aiuto', u'Aida'],
'ln': u'Aide',
'lo': u'ຊ່ວຍເຫຼືອ',
'lt': u'Pagalba',
'ltg': u'Paleigs',
'lv': u'Palīdzība',
'map-bms': u'Pitulung',
'mdf': [u'Лезкс', u'Справка'],
'mg': [u'Fanoroana', u'Aide', u'Fanampiana'],
'mhr': [u'Полшык', u'Справка'],
'min': u'Bantuan',
'mk': u'Помош',
'ml': [u'സഹായം', u'സ'],
'mn': u'Тусламж',
'mr': [u'सहाय्य', u'साहाय्य'],
'mrj': u'Палшык',
'ms': u'Bantuan',
'mt': u'Għajnuna',
'mwl': u'Ajuda',
'myv': u'Лезкс',
'mzn': [u'رانما', u'راهنما', u'رانهما'],
'nah': [u'Tēpalēhuiliztli', u'Ayuda'],
'nap': [u'Ajùto', u'Aiuto'],
'nds': [u'Hülp', u'Hilfe'],
'nds-nl': u'Hulpe',
'ne': u'मद्दत',
'new': u'ग्वाहालि',
'nl': u'Help',
'nn': u'Hjelp',
'no': u'Hjelp',
'nso': u'Thušo',
'nv': u'Anáʼálwoʼ',
'oc': u'Ajuda',
'or': [u'ସହଯୋଗ', u'ସାହାଯ୍ୟ'],
'os': u'Æххуыс',
'pa': u'ਮਦਦ',
'pcd': u'Aide',
'pdc': [u'Hilf', u'Hilfe'],
'pfl': [u'Hilf', u'Hilfe'],
'pi': u'अवस्सय',
'pl': u'Pomoc',
'pms': u'Agiut',
'pnt': u'Βοήθειαν',
'ps': u'لارښود',
'pt': u'Ajuda',
'qu': u'Yanapa',
'rm': u'Agid',
'rmy': u'Zhutipen',
'ro': u'Ajutor',
'ru': u'Справка',
'rue': u'Поміч',
'sa': [u'सहाय्यम्', u'सहाय्य', u'उपकारः'],
'sah': u'Көмө',
'sc': u'Agiudu',
'scn': u'Aiutu',
'sd': u'مدد',
'se': u'Veahkki',
'sg': u'Aide',
'sh': u'Pomoć',
'si': u'උදවු',
'sk': u'Pomoc',
'sl': u'Pomoč',
'sq': u'Ndihmë',
'sr': [u'Помоћ', u'Pomoć'],
'srn': u'Yepi',
'stq': u'Hälpe',
'su': u'Pitulung',
'sv': u'Hjälp',
'sw': u'Msaada',
'szl': [u'Půmoc', u'Pomoc'],
'ta': u'உதவி',
'te': [u'సహాయం', u'సహాయము'],
'tet': u'Ajuda',
'tg': u'Роҳнамо',
'th': u'วิธีใช้',
'tk': u'Ýardam',
'tl': u'Tulong',
'tlh': u'QaH',
'tpi': u'Halivim',
'tr': u'Yardım',
'tt': [u'Ярдәм', u'Справка', u'Yärdäm'],
'ty': u'Aide',
'udm': u'Валэктон',
'ug': u'ياردەم',
'uk': u'Довідка',
'ur': u'معاونت',
'uz': u'Yordam',
'vec': [u'Ajuto', u'Aiuto'],
'vep': u'Abu',
'vi': u'Trợ giúp',
'vls': u'Ulpe',
'vo': u'Yuf',
'wa': u'Aidance',
'war': u'Bulig',
'wo': [u'Ndimbal', u'Aide'],
'wuu': u'帮助',
'xal': [u'Цәәлһлһн', u'Цəəлһлһн'],
'xmf': u'დახმარება',
'yi': u'הילף',
'yo': u'Ìrànlọ́wọ́',
'za': u'帮助',
'zea': u'Ulpe',
'zh': [u'Help', u'帮助', u'幫助'],
'zh-yue': [u'Help', u'幫手', u'幫助', u'說明', u'帮手', u'帮助', u'说明'],
},
13: {
'_default': u'Help talk',
'ab': [u'Ацхыраара ахцәажәара', u'Обсуждение справки'],
'ace': [u'Marit Beunantu', u'Pembicaraan Bantuan', u'Bantuan Pembicaraan'],
'af': u'Hulpbespreking',
'ak': u'Help nkɔmbɔ',
'als': u'Hilfe Diskussion',
'am': u'እርዳታ ውይይት',
'an': u'Descusión aduya',
'ang': u'Helpgesprec',
'ar': u'نقاش المساعدة',
'arc': u'ܡܡܠܠܐ ܕܥܘܕܪܢܐ',
'arz': u'نقاش المساعدة',
'as': [u'সহায় বাৰ্তা', u'সহায় বার্তা'],
'ast': [u'Ayuda alderique', u'Ayuda discusión', u'Aida alderique'],
'av': u'Обсуждение справки',
'ay': u'Ayuda discusión',
'az': u'Kömək müzakirəsi',
'ba': [u'Белешмә буйынса фекерләшеү', u'Белешмә б-са фекер алышыу'],
'bar': [u'Huif Dischkrian', u'Hilfe Diskussion'],
'bat-smg': [u'Pagelbas aptarėms', u'Pagalbos aptarimas'],
'bcl': u'Olay sa tabang',
'be': u'Размовы пра даведку',
'be-x-old': u'Абмеркаваньне дапамогі',
'bg': u'Помощ беседа',
'bh': u'मदद वार्ता',
'bjn': [u'Pamandiran Patulung', u'Pembicaraan Bantuan'],
'bm': [u'Discussion aide', u'Discussion Aide'],
'bn': u'সাহায্য আলোচনা',
'bpy': u'পাংলাকর য়্যারী',
'br': u'Kaozeadenn Skoazell',
'bs': u'Razgovor o pomoći',
'bug': [u'Pembicaraan Bantuan', u'Bantuan Pembicaraan'],
'bxr': [u'Туһаламжа хэлэлсэл', u'Обсуждение справки'],
'ca': u'Ajuda Discussió',
'cbk-zam': u'Ayuda discusión',
'ce': [u'ГӀо дийцаре', u'ГІодан дийца', u'Гlон дийцаре'],
'ceb': u'Hisgot sa Tabang',
'ch': u'Kombetsasion ni ayudo',
'ckb': [u'وتووێژی یارمەتی', u'لێدوانی یارمەتی'],
'crh': [u'Yardım muzakeresi', u'Разговор о помоћи'],
'cs': [u'Diskuse k nápovědě', u'Nápověda diskuse'],
'csb': u'Diskùsëjô Pòmòcë',
'cu': [u'Помощи бєсѣда', u'Помощи бесѣда'],
'cv': u'Пулăшăва сӳтсе явмалли',
'cy': u'Sgwrs Cymorth',
'da': u'Hjælp-diskussion',
'de': u'Hilfe Diskussion',
'diq': [u'Peşti mesac', u'Desteg werênayış', u'Peşti werênayış'],
'dsb': u'Diskusija wó pomocy',
'dv': [u'އެހީގެ ޚިޔާލު', u'އެހީ ޚިޔާލު'],
'el': u'Συζήτηση βοήθειας',
'eml': u'Discussioni aiuto',
'eo': [u'Helpo-Diskuto', u'Helpa diskuto'],
'es': u'Ayuda discusión',
'et': u'Juhendi arutelu',
'eu': u'Laguntza eztabaida',
'fa': u'بحث راهنما',
'ff': [u'Discussion aide', u'Discussion Aide'],
'fi': u'Keskustelu ohjeesta',
'fiu-vro': u'Oppusõ arotus',
'fo': [u'Hjálparkjak', u'Hjálp kjak'],
'fr': [u'Discussion aide', u'Discussion Aide'],
'frp': [u'Discussion éde', u'Discussion Éde'],
'frr': u'Hilfe Diskussion',
'fur': u'Discussion jutori',
'fy': u'Hulp oerlis',
'ga': u'Plé cabhrach',
'gag': [u'Yardım dartışma', u'Yardım tartışma'],
'gan': u'幫助・談詑',
'gd': u'Deasbaireachd na cobharach',
'gl': [u'Conversa axuda', u'Conversa Axuda'],
'glk': u'بحث راهنما',
'gn': u'Pytyvõ myangekõi',
'got': u'𐌷𐌹𐌻𐍀𐍉𐍃 𐌲𐌰𐍅𐌰𐌿𐍂𐌳𐌾𐌰',
'gu': u'મદદની ચર્ચા',
'gv': u'Resooney cooney',
'haw': u'Kūkākūkā o kōkua',
'he': u'שיחת עזרה',
'hi': u'सहायता वार्ता',
'hif': u'madat ke baat',
'hr': u'Razgovor o pomoći',
'hsb': u'Pomoc diskusija',
'ht': u'Diskisyon Èd',
'hu': [u'Segítségvita', u'Segítség vita'],
'hy': u'Օգնության քննարկում',
'ia': u'Discussion Adjuta',
'id': [u'Pembicaraan Bantuan', u'Bantuan Pembicaraan'],
'ie': u'Auxilie Discussion',
'ig': [u'Okwu nkwadọ', u'Okwu nkwádọ'],
'ilo': u'Tulong tungtungan',
'io': u'Helpo Debato',
'is': u'Hjálparspjall',
'it': u'Discussioni aiuto',
'ja': [u'ヘルプ・トーク', u'Help‐ノート'],
'jv': [u'Dhiskusi Pitulung', u'Pitulung Dhiskusi'],
'ka': [u'დახმარების განხილვა', u'დახმარება განხილვა'],
'kaa': [u'Anıqlama sa\'wbeti', u'Анықтама талқылауы', u'انىقتاما تالقىلاۋى'],
'kab': u'Amyannan n tallat',
'kbd': [u'ДэӀэпыкъуэгъуэ тепсэлъэхьыгъуэ', u'Обсуждение справки'],
'kg': u'Disolo lusadisu',
'kk': [u'Анықтама талқылауы', u'Anıqtama talqılawı', u'انىقتاما تالقىلاۋى'],
'kl': [u'Ikiuutini oqallinneq', u'Hjælp-diskussion'],
'km': [u'ការពិភាក្សាអំពីជំនួយ', u'ជំនួយ-ពិភាក្សា'],
'kn': u'ಸಹಾಯ ಚರ್ಚೆ',
'ko': u'도움말토론',
'koi': u'Обсуждение справки',
'krc': u'Болушлукъну сюзюу',
'ks': u'پَلزُن بَحَژ',
'ksh': [u'Hölp Klaaf', u'Hülp Klaaf'],
'ku': [u'Gotûbêja alîkariyê', u'Alîkarî nîqaş'],
'kv': u'Обсуждение справки',
'kw': [u'Keskows Gweres', u'Cows Gweres', u'Kescows Gweres'],
'la': u'Disputatio Auxilii',
'lad': [u'Diskusyón de Ayudo', u'Diskussión de Ayudo', u'Ayuda Discusión'],
'lb': u'Hëllef Diskussioun',
'lbe': u'Кумаграясса ихтилат',
'lez': u'Обсуждение справки',
'li': u'Euverlèk help',
'lij': [u'Discûscioîn agiûtto', u'Discussioni aiuto'],
'lmo': [u'Ciciarada Jüt', u'Discussioni aiuto', u'Ciciarada Aida'],
'ln': [u'Discussion aide', u'Discussion Aide'],
'lo': u'ສົນທະນາກ່ຽວກັບຊ່ວຍເຫຼືອ',
'lt': u'Pagalbos aptarimas',
'ltg': u'Sprīža ap paleigu',
'lv': u'Palīdzības diskusija',
'map-bms': [u'Dhiskusi Pitulung', u'Pitulung Dhiskusi'],
'mdf': [u'Лезкс корхнема', u'Обсуждение справки'],
'mg': [u'Dinika amin\'ny fanoroana', u'Discussion Aide', u'Dinika amin\'ny fanampiana'],
'mhr': [u'Полшык шотышто каҥашымаш', u'Обсуждение справки', u'Полшыкын каҥашымаш'],
'min': [u'Rundiang Bantuan', u'Pembicaraan Bantuan'],
'mk': u'Разговор за помош',
'ml': [u'സഹായത്തിന്റെ സംവാദം', u'സസം'],
'mn': u'Тусламжийн хэлэлцүүлэг',
'mr': [u'सहाय्य चर्चा', u'साहाय्य चर्चा'],
'mrj': [u'Палшыкым кӓнгӓшӹмӓш', u'Палшыкын кӓнгӓшӹмӓш'],
'ms': [u'Perbincangan bantuan', u'Perbualan Bantuan'],
'mt': [u'Diskussjoni għajnuna', u'Diskuti għajnuna'],
'mwl': [u'Cumbersa ajuda', u'Ajuda Discussão'],
'myv': u'Лезкстэ кортамось',
'mzn': [u'رانما گپ', u'رانهمائه گپ', u'بحث راهنما', u'رانهمای گپ'],
'nah': [u'Tēpalēhuiliztli tēixnāmiquiliztli', u'Ayuda Discusión'],
'nap': [u'Ajùto chiàcchiera', u'Discussioni aiuto'],
'nds': [u'Hülp Diskuschoon', u'Hilfe Diskussion'],
'nds-nl': [u'Overleg hulpe', u'Overleg kattegerie'],
'ne': u'मद्दत वार्ता',
'new': u'ग्वाहालि खँलाबँला',
'nl': u'Overleg help',
'nn': u'Hjelpdiskusjon',
'no': u'Hjelpdiskusjon',
'nso': u'Poledišano ya Thušo',
'nv': u'Anáʼálwoʼ baa yáshtiʼ',
'oc': [u'Discussion Ajuda', u'Discutida Ajuda'],
'or': [u'ସହଯୋଗ ଆଲୋଚନା', u'ସାହାଯ୍ୟ ଆଲୋଚନା'],
'os': [u'Æххуысы тæрхон', u'Æххуысы тыххæй дискусси'],
'pa': [u'ਮਦਦ ਗੱਲ-ਬਾਤ', u'ਮਦਦ ਚਰਚਾ'],
'pcd': [u'Discussion aide', u'Discussion Aide'],
'pdc': [u'Hilf Dischbedutt', u'Hilfe Diskussion'],
'pfl': [u'Hilf Dischbediere', u'Hilfe Diskussion'],
'pi': u'अवस्सय सम्भासित',
'pl': u'Dyskusja pomocy',
'pms': u"Discussion ant sl'agiut",
'pnt': u'Καλάτσεμαν βοήθειας',
'ps': u'د لارښود خبرې اترې',
'pt': u'Ajuda Discussão',
'qu': u'Yanapa rimanakuy',
'rm': u'Agid discussiun',
'rmy': [u'Zhutipen vakyarimata', u'Discuţie Ajutor'],
'ro': [u'Discuție Ajutor', u'Discuţie Ajutor'],
'ru': u'Обсуждение справки',
'rue': u'Діскузія ку помочі',
'sa': [u'सहाय्यस्य सम्भाषणम्', u'सहाय्यसंभाषणं', u'उपकारसंभाषणं'],
'sah': u'Көмө ырытыыта',
'sc': u'Cuntierra agiudu',
'scn': [u'Discussioni aiutu', u'Discussioni Aiutu'],
'sd': u'مدد بحث',
'se': u'Veahkkeságastallan',
'sg': [u'Discussion aide', u'Discussion Aide'],
'sh': u'Razgovor o pomoći',
'si': [u'උදවු සාකච්ඡාව', u'උදව සාකච්ඡාව'],
'sk': u'Diskusia k pomoci',
'sl': u'Pogovor o pomoči',
'sq': u'Ndihmë diskutim',
'sr': [u'Разговор о помоћи', u'Razgovor o pomoći'],
'srn': [u'Taki fu yepi', u'Overleg help'],
'stq': u'Hälpe Diskussion',
'su': u'Obrolan pitulung',
'sv': [u'Hjälpdiskussion', u'Hjälp diskussion'],
'sw': [u'Majadiliano ya msaada', u'Msaada majadiliano'],
'szl': [u'Dyskusyjo půmocy', u'Dyskusja pomocy'],
'ta': u'உதவி பேச்சு',
'te': [u'సహాయం చర్చ', u'సహాయము చర్చ'],
'tet': u'Diskusaun Ajuda',
'tg': u'Баҳси роҳнамо',
'th': u'คุยเรื่องวิธีใช้',
'tk': u'Ýardam çekişme',
'tl': u'Usapang tulong',
'tlh': u"QaH ja'chuq",
'tpi': u'Toktok bilong halivim',
'tr': u'Yardım tartışma',
'tt': [u'Ярдәм бәхәсе', u'Обсуждение справки', u'Yärdäm bäxäse'],
'ty': [u'Discussion aide', u'Discussion Aide'],
'udm': u'Валэктон сярысь вераськон',
'ug': u'ياردەم مۇنازىرىسى',
'uk': u'Обговорення довідки',
'ur': u'تبادلۂ خیال معاونت',
'uz': u'Yordam munozarasi',
'vec': [u'Discussion ajuto', u'Discussion aiuto'],
'vep': u'Lodu abus',
'vi': u'Thảo luận Trợ giúp',
'vls': u'Discuusje ulpe',
'vo': u'Yufibespik',
'wa': [u'Aidance copene', u'Discussion Aide'],
'war': u'Hiruhimangaw hiton bulig',
'wo': [u'Waxtaani ndimbal', u'Discussion Aide'],
'wuu': [u'帮助讨论', u'帮助对话'],
'xal': [u'Цәәлһлһин туск меткән', u'Цəəлһлһин тускар ухалвр'],
'xmf': [u'დახმარების განხილვა', u'დახმარება განხილვა'],
'yi': u'הילף רעדן',
'yo': u'Ọ̀rọ̀ ìrànlọ́wọ́',
'za': [u'帮助讨论', u'帮助对话'],
'zea': u'Overleg ulpe',
'zh': [u'Help talk', u'帮助讨论', u'幫助討論', u'幫助對話', u'帮助对话'],
'zh-yue': [u'Help talk', u'幫手傾偈', u'幫手 討論', u'幫助 討論', u'說明 討論', u'帮手 讨论', u'帮助 讨论', u'说明 讨论'],
},
14: {
'_default': u'Category',
'ab': [u'Акатегориа', u'Категория'],
'ace': [u'Kawan', u'Kategori'],
'af': u'Kategorie',
'ak': u'Kategori',
'als': u'Kategorie',
'am': u'መደብ',
'an': u'Categoría',
'ang': u'Flocc',
'ar': u'تصنيف',
'arc': u'ܣܕܪܐ',
'arz': u'تصنيف',
'as': [u'শ্ৰেণী', u'CAT', u'श्रेणी', u'শ্রেণী'],
'ast': u'Categoría',
'av': u'Категория',
'ay': u'Categoría',
'az': u'Kateqoriya',
'ba': [u'Категория', u'Төркөм'],
'bar': u'Kategorie',
'bat-smg': [u'Kateguorėjė', u'Kategorija'],
'bcl': u'Kategorya',
'be': u'Катэгорыя',
'be-x-old': u'Катэгорыя',
'bg': u'Категория',
'bh': u'श्रेणी',
'bjn': [u'Tumbung', u'Kategori'],
'bm': u'Catégorie',
'bn': [u'বিষয়শ্রেণী', u'വിഭാഗം'],
'bpy': u'থাক',
'br': u'Rummad',
'bs': u'Kategorija',
'bug': u'Kategori',
'bxr': [u'Категори', u'Категория'],
'ca': u'Categoria',
'cbk-zam': u'Categoría',
'ce': [u'Категори', u'Тоба', u'Кадегар'],
'ceb': u'Kategoriya',
'ch': u'Katigoria',
'ckb': u'پۆل',
'crh': [u'Kategoriya', u'Категория'],
'cs': u'Kategorie',
'csb': u'Kategòrëjô',
'cu': [u'Катигорїꙗ', u'Категория', u'Катигорї�'],
'cv': u'Категори',
'cy': u'Categori',
'da': u'Kategori',
'de': u'Kategorie',
'diq': [u'Kategoriye', u'Kategori'],
'dsb': u'Kategorija',
'dv': u'ޤިސްމު',
'el': u'Κατηγορία',
'eml': u'Categoria',
'eo': u'Kategorio',
'es': u'Categoría',
'et': u'Kategooria',
'eu': u'Kategoria',
'fa': u'رده',
'ff': u'Catégorie',
'fi': u'Luokka',
'fiu-vro': u'Katõgooria',
'fo': u'Bólkur',
'fr': u'Catégorie',
'frp': u'Catègorie',
'frr': u'Kategorie',
'fur': u'Categorie',
'fy': u'Kategory',
'ga': [u'Catagóir', u'Rang'],
'gag': [u'Kategoriya', u'Kategori'],
'gan': u'分類',
'gd': u'Roinn-seòrsa',
'gl': u'Categoría',
'glk': u'رده',
'gn': u'Ñemohenda',
'got': u'𐌷𐌰𐌽𐍃𐌰',
'gu': u'શ્રેણી',
'gv': u'Ronney',
'haw': u'Māhele',
'he': u'קטגוריה',
'hi': u'श्रेणी',
'hif': u'vibhag',
'hr': u'Kategorija',
'hsb': u'Kategorija',
'ht': u'Kategori',
'hu': u'Kategória',
'hy': u'Կատեգորիա',
'ia': u'Categoria',
'id': u'Kategori',
'ie': u'Categorie',
'ig': [u'Òtù', u'Ébéonọr'],
'ilo': u'Kategoria',
'io': u'Kategorio',
'is': u'Flokkur',
'it': u'Categoria',
'ja': u'カテゴリ',
'jv': u'Kategori',
'ka': u'კატეგორია',
'kaa': [u'Kategoriya', u'Санат', u'سانات'],
'kab': u'Taggayt',
'kbd': [u'Категориэ', u'Категория'],
'kg': u'Kalasi',
'kk': [u'Санат', u'Sanat', u'سانات'],
'kl': [u'Sumut atassuseq', u'Kategori'],
'km': [u'ចំណាត់ថ្នាក់ក្រុម', u'ចំណាត់ក្រុម', u'ចំនាត់ថ្នាក់ក្រុម'],
'kn': u'ವರ್ಗ',
'ko': u'분류',
'koi': u'Категория',
'krc': u'Категория',
'ks': u'زٲژ',
'ksh': [u'Saachjrupp', u'Sachjrop', u'Saachjrop', u'Saachjropp', u'Kattejori', u'Kategorie', u'Katejori'],
'ku': u'Kategorî',
'kv': u'Категория',
'kw': [u'Klass', u'Class'],
'ky': u'Категория',
'la': u'Categoria',
'lad': [u'Katēggoría', u'Kateggoría', u'Categoría'],
'lb': u'Kategorie',
'lbe': u'Категория',
'lez': u'Категория',
'li': [u'Categorie', u'Kategorie'],
'lij': [u'Categorîa', u'Categoria'],
'lmo': [u'Categuria', u'Categoria'],
'ln': u'Catégorie',
'lo': u'ໝວດ',
'lt': u'Kategorija',
'ltg': u'Kategoreja',
'lv': u'Kategorija',
'map-bms': u'Kategori',
'mdf': [u'Категорие', u'Категория'],
'mg': [u'Sokajy', u'Catégorie'],
'mhr': [u'Категорий', u'Категория'],
'min': u'Kategori',
'mk': u'Категорија',
'ml': [u'വർഗ്ഗം', u'വി', u'വ', u'വിഭാഗം'],
'mn': u'Ангилал',
'mr': u'वर्ग',
'mrj': u'Категори',
'ms': u'Kategori',
'mt': u'Kategorija',
'mwl': [u'Catadorie', u'Categoria'],
'myv': u'Категория',
'mzn': [u'رج', u'رده'],
'nah': [u'Neneuhcāyōtl', u'Categoría'],
'nap': [u'Categurìa', u'Categoria'],
'nds': u'Kategorie',
'nds-nl': [u'Kategorie', u'Categorie', u'Kattegerie'],
'ne': u'श्रेणी',
'new': u'पुचः',
'nl': u'Categorie',
'nn': u'Kategori',
'no': u'Kategori',
'nso': u'Setensele',
'nv': u'Tʼááłáhági átʼéego',
'oc': u'Categoria',
'or': [u'ଶ୍ରେଣୀ', u'ବିଭାଗ'],
'os': u'Категори',
'pa': u'ਸ਼੍ਰੇਣੀ',
'pcd': u'Catégorie',
'pdc': [u'Abdeeling', u'Kategorie'],
'pfl': [u'Sachgrubb', u'Kategorie', u'Kadegorie'],
'pi': u'विभाग',
'pl': u'Kategoria',
'pms': u'Categorìa',
'pnt': u'Κατηγορίαν',
'ps': u'وېشنيزه',
'pt': u'Categoria',
'qu': u'Katiguriya',
'rm': u'Categoria',
'rmy': u'Shopni',
'ro': u'Categorie',
'ru': u'Категория',
'rue': u'Катеґорія',
'sa': u'वर्गः',
'sah': u'Категория',
'sc': u'Categoria',
'scn': u'Catigurìa',
'sd': u'زمرو',
'se': u'Kategoriija',
'sg': u'Catégorie',
'sh': u'Kategorija',
'si': u'ප්රවර්ගය',
'sk': u'Kategória',
'sl': u'Kategorija',
'sq': [u'Kategoria', u'Kategori'],
'sr': [u'Категорија', u'Kategorija'],
'srn': [u'Guru', u'Categorie'],
'stq': u'Kategorie',
'su': u'Kategori',
'sv': u'Kategori',
'sw': u'Jamii',
'szl': [u'Kategoryjo', u'Kategoria'],
'ta': u'பகுப்பு',
'te': u'వర్గం',
'tet': [u'Kategoria', u'Kategoría'],
'tg': u'Гурӯҳ',
'th': u'หมวดหมู่',
'tk': u'Kategoriýa',
'tl': [u'Kategorya', u'Kaurian'],
'tlh': u'Segh',
'tpi': u'Grup',
'tr': u'Kategori',
'tt': [u'Төркем', u'Категория', u'Törkem'],
'ty': u'Catégorie',
'udm': u'Категория',
'ug': u'تۈر',
'uk': u'Категорія',
'ur': u'زمرہ',
'uz': [u'Turkum', u'Kategoriya'],
'vec': u'Categoria',
'vep': u'Kategorii',
'vi': u'Thể loại',
'vls': u'Categorie',
'vo': u'Klad',
'wa': u'Categoreye',
'war': u'Kaarangay',
'wo': [u'Wàll', u'Catégorie'],
'wuu': u'分类',
'xal': [u'Әәшл', u'Янз'],
'xmf': u'კატეგორია',
'yi': [u'קאַטעגאָריע', u'קאטעגאריע'],
'yo': u'Ẹ̀ka',
'za': u'分类',
'zea': u'Categorie',
'zh': [u'Category', u'CAT', u'分类', u'分類'],
'zh-yue': [u'Category', u'分類', u'類', u'类', u'分类'],
},
15: {
'_default': u'Category talk',
'ab': [u'Акатегориа ахцәажәара', u'Обсуждение категории'],
'ace': [u'Marit Kawan', u'Pembicaraan Kategori', u'Kategori Pembicaraan'],
'af': u'Kategoriebespreking',
'ak': u'Kategori nkɔmbɔ',
'als': u'Kategorie Diskussion',
'am': u'መደብ ውይይት',
'an': u'Descusión categoría',
'ang': u'Floccgesprec',
'ar': u'نقاش التصنيف',
'arc': u'ܡܡܠܠܐ ܕܣܕܪܐ',
'arz': u'نقاش التصنيف',
'as': [u'শ্ৰেণী বাৰ্তা', u'श्रेणी वार्ता', u'শ্রেণী বার্তা'],
'ast': [u'Categoría alderique', u'Categoría discusión'],
'av': u'Обсуждение категории',
'ay': u'Categoría discusión',
'az': u'Kateqoriya müzakirəsi',
'ba': [u'Категория буйынса фекерләшеү', u'Төркөм буйынса фекерләшеү', u'Категория б-са фекер алышыу'],
'bar': [u'Kategorie Dischkrian', u'Kategorie Diskussion'],
'bat-smg': [u'Kateguorėjės aptarėms', u'Kategorijos aptarimas'],
'bcl': u'Olay sa kategorya',
'be': u'Размовы пра катэгорыю',
'be-x-old': u'Абмеркаваньне катэгорыі',
'bg': u'Категория беседа',
'bh': u'श्रेणी वार्ता',
'bjn': [u'Pamandiran Tumbung', u'Pembicaraan Kategori'],
'bm': [u'Discussion catégorie', u'Discussion Catégorie'],
'bn': [u'বিষয়শ্রেণী আলোচনা', u'വിഭാഗത്തിന്റെ സംവാദം'],
'bpy': u'থাকর য়্যারী',
'br': u'Kaozeadenn Rummad',
'bs': u'Razgovor o kategoriji',
'bug': [u'Pembicaraan Kategori', u'Kategori Pembicaraan'],
'bxr': [u'Категори хэлэлсэхэ', u'Обсуждение категории'],
'ca': u'Categoria Discussió',
'cbk-zam': u'Categoría discusión',
'ce': [u'Категорин дийцаре', u'Тобан дийца', u'Кадегар дийцаре'],
'ceb': u'Hisgot sa Kategoriya',
'ch': u'Kombetsasion ni katigoria',
'ckb': [u'وتووێژی پۆل', u'لێدوانی پۆل'],
'crh': [u'Kategoriya muzakeresi', u'Категория музакереси'],
'cs': [u'Diskuse ke kategorii', u'Kategorie diskuse'],
'csb': u'Diskùsëjô Kategòrëji',
'cu': [u'Катигорїѩ бєсѣда', u'Катигорїѩ бесѣда'],
'cv': u'Категорине сӳтсе явмалли',
'cy': u'Sgwrs Categori',
'da': u'Kategoridiskussion',
'de': u'Kategorie Diskussion',
'diq': [u'Kategoriye mesac', u'Kategori werênayış', u'Kategoriye werênayış'],
'dsb': u'Diskusija wó kategoriji',
'dv': [u'ޤިސްމުގެ ޚިޔާލު', u'ޤިސްމު ޚިޔާލު'],
'el': u'Συζήτηση κατηγορίας',
'eml': u'Discussioni categoria',
'eo': [u'Kategorio-Diskuto', u'Kategoria diskuto'],
'es': u'Categoría discusión',
'et': u'Kategooria arutelu',
'eu': u'Kategoria eztabaida',
'fa': u'بحث رده',
'ff': [u'Discussion catégorie', u'Discussion Catégorie'],
'fi': u'Keskustelu luokasta',
'fiu-vro': u'Katõgooria arotus',
'fo': [u'Bólkakjak', u'Bólkur kjak'],
'fr': [u'Discussion catégorie', u'Discussion Catégorie'],
'frp': [u'Discussion catègorie', u'Discussion Catègorie'],
'frr': u'Kategorie Diskussion',
'fur': u'Discussion categorie',
'fy': u'Kategory oerlis',
'ga': u'Plé catagóire',
'gag': [u'Kategoriya dartışma', u'Kategori tartışma'],
'gan': u'分類・談詑',
'gd': u'Deasbaireachd na roinn-seòrsa',
'gl': [u'Conversa categoría', u'Conversa Categoría'],
'glk': u'بحث رده',
'gn': u'Ñemohenda myangekõi',
'got': u'𐌷𐌰𐌽𐍃𐍉𐍃 𐌲𐌰𐍅𐌰𐌿𐍂𐌳𐌾𐌰',
'gu': u'શ્રેણીની ચર્ચા',
'gv': u'Resooney ronney',
'haw': u'Kūkākūkā o māhele',
'he': u'שיחת קטגוריה',
'hi': u'श्रेणी वार्ता',
'hif': u'voibhag ke baat',
'hr': u'Razgovor o kategoriji',
'hsb': u'Diskusija ke kategoriji',
'ht': u'Diskisyon Kategori',
'hu': [u'Kategóriavita', u'Kategória vita'],
'hy': u'Կատեգորիայի քննարկում',
'ia': u'Discussion Categoria',
'id': [u'Pembicaraan Kategori', u'Kategori Pembicaraan'],
'ie': u'Categorie Discussion',
'ig': [u'Okwu òtù', u'Okwu ébéonọr'],
'ilo': u'Kategoria tungtungan',
'io': u'Kategorio Debato',
'is': u'Flokkaspjall',
'it': u'Discussioni categoria',
'ja': [u'カテゴリ・トーク', u'Category‐ノート'],
'jv': [u'Dhiskusi Kategori', u'Kategori Dhiskusi'],
'ka': [u'კატეგორიის განხილვა', u'კატეგორია განხილვა'],
'kaa': [u'Kategoriya sa\'wbeti', u'Санат талқылауы', u'سانات تالقىلاۋى'],
'kab': u'Amyannan n taggayt',
'kbd': [u'Категориэ тепсэлъэхьыгъуэ', u'Обсуждение категории'],
'kg': u'Disolo kalasi',
'kk': [u'Санат талқылауы', u'Sanat talqılawı', u'سانات تالقىلاۋى'],
'kl': [u'Sumut atassusermi oqallinneq', u'Kategoridiskussion'],
'km': [u'ការពិភាក្សាអំពីចំណាត់ថ្នាក់ក្រុម', u'ចំណាត់ក្រុម-ពិភាក្សា', u'ការពិភាក្សាអំពីចំនាត់ថ្នាក់ក្រុម'],
'kn': u'ವರ್ಗ ಚರ್ಚೆ',
'ko': u'분류토론',
'koi': u'Обсуждение категории',
'krc': u'Категорияны сюзюу',
'ks': u'زٲژ بَحَژ',
'ksh': [u'Saachjruppe Klaaf', u'Sachjrop Klaaf', u'Saachjroppe Klaaf', u'Saachjrupp Klaaf', u'Kattejori Klaaf', u'Kattejorije Klaaf', u'Kategorie Klaaf', u'Katejorije Klaaf'],
'ku': [u'Gotûbêja kategoriyê', u'Kategorî nîqaş'],
'kv': u'Обсуждение категории',
'kw': [u'Keskows Klass', u'Cows Class', u'Kescows Class'],
'la': u'Disputatio Categoriae',
'lad': [u'Diskusyón de Kateggoría', u'Categoría Discusión', u'Diskusyón de Katēggoría'],
'lb': u'Kategorie Diskussioun',
'lbe': u'Категориялиясса ихтилат',
'lez': [u'Категория веревирд авун', u'Обсуждение категории'],
'li': [u'Euverlèk categorie', u'Euverlèk kategorie'],
'lij': [u'Discûscioîn categorîa', u'Discussioni categoria'],
'lmo': [u'Ciciarada Categuria', u'Discussioni categoria', u'Ciciarada Categoria'],
'ln': [u'Discussion catégorie', u'Discussion Catégorie'],
'lo': u'ສົນທະນາກ່ຽວກັບໝວດ',
'lt': u'Kategorijos aptarimas',
'ltg': u'Sprīža ap kategoreju',
'lv': u'Kategorijas diskusija',
'map-bms': [u'Dhiskusi Kategori', u'Kategori Dhiskusi'],
'mdf': [u'Категорие корхнема', u'Обсуждение категории'],
'mg': [u'Dinika amin\'ny sokajy', u'Discussion Catégorie'],
'mhr': [u'Категорий шотышто каҥашымаш', u'Обсуждение категории', u'Категорийын каҥашымаш'],
'min': [u'Rundiang Kategori', u'Pembicaraan Kategori'],
'mk': u'Разговор за категорија',
'ml': [u'വർഗ്ഗത്തിന്റെ സംവാദം', u'വിസം', u'വസം', u'വിഭാഗത്തിന്റെ സംവാദം'],
'mn': u'Ангиллын хэлэлцүүлэг',
'mr': u'वर्ग चर्चा',
'mrj': [u'Категорим кӓнгӓшӹмӓш', u'Категори кӓнгӓшӹмӓш'],
'ms': [u'Perbincangan kategori', u'Perbualan Kategori'],
'mt': [u'Diskussjoni kategorija', u'Diskuti kategorija'],
'mwl': [u'Cumbersa catadorie', u'Categoria Discussão'],
'myv': u'Категориядо кортамось',
'mzn': [u'رج گپ', u'بحث رده'],
'nah': [u'Neneuhcāyōtl tēixnāmiquiliztli', u'Categoría Discusión'],
'nap': [u'Categurìa chiàcchiera', u'Discussioni categoria'],
'nds': [u'Kategorie Diskuschoon', u'Kategorie Diskussion'],
'nds-nl': [u'Overleg kategorie', u'Overleg categorie'],
'ne': u'श्रेणी वार्ता',
'new': u'पुचः खँलाबँला',
'nl': u'Overleg categorie',
'nn': u'Kategoridiskusjon',
'no': u'Kategoridiskusjon',
'nso': u'Poledišano ya Setensele',
'nv': u'Tʼááłáhági átʼéego baa yáshtiʼ',
'oc': [u'Discussion Categoria', u'Discutida Categoria'],
'or': [u'ଶ୍ରେଣୀ ଆଲୋଚନା', u'ବିଭାଗିୟ ଆଲୋଚନା'],
'os': [u'Категорийы тæрхон', u'Категорийы тыххæй дискусси'],
'pa': [u'ਸ਼੍ਰੇਣੀ ਗੱਲ-ਬਾਤ', u'ਸ਼੍ਰੇਣੀ ਚਰਚਾ'],
'pcd': [u'Discussion catégorie', u'Discussion Catégorie'],
'pdc': [u'Abdeeling Dischbedutt', u'Kategorie Diskussion'],
'pfl': [u'Sachgrubb Dischbediere', u'Kategorie Diskussion', u'Kadegorie Dischbediere'],
'pi': u'विभाग सम्भासित',
'pl': u'Dyskusja kategorii',
'pms': u'Discussion ant sla categorìa',
'pnt': u'Καλάτσεμαν κατηγορίας',
'ps': u'د وېشنيزې خبرې اترې',
'pt': u'Categoria Discussão',
'qu': u'Katiguriya rimanakuy',
'rm': u'Categoria discussiun',
'rmy': [u'Shopni vakyarimata', u'Discuţie Categorie'],
'ro': [u'Discuție Categorie', u'Discuţie Categorie'],
'ru': u'Обсуждение категории',
'rue': u'Діскузія ку катеґорії',
'sa': [u'वर्गसम्भाषणम्', u'वर्गसंभाषणं'],
'sah': u'Категория ырытыыта',
'sc': u'Cuntierra categoria',
'scn': [u'Discussioni catigurìa', u'Discussioni Catigurìa'],
'sd': u'زمرو بحث',
'se': u'Kategoriijaságastallan',
'sg': [u'Discussion catégorie', u'Discussion Catégorie'],
'sh': u'Razgovor o kategoriji',
'si': u'ප්රවර්ග සාකච්ඡාව',
'sk': u'Diskusia ku kategórii',
'sl': u'Pogovor o kategoriji',
'sq': [u'Kategoria diskutim', u'Kategori Diskutim'],
'sr': [u'Разговор о категорији', u'Razgovor o kategoriji'],
'srn': [u'Taki fu guru', u'Overleg categorie'],
'stq': u'Kategorie Diskussion',
'su': u'Obrolan kategori',
'sv': u'Kategoridiskussion',
'sw': [u'Majadiliano ya jamii', u'Jamii majadiliano'],
'szl': [u'Dyskusyjo kategoryji', u'Dyskusja kategorii'],
'ta': u'பகுப்பு பேச்சு',
'te': u'వర్గం చర్చ',
'tet': [u'Diskusaun Kategoria', u'Diskusaun Kategoría'],
'tg': u'Баҳси гурӯҳ',
'th': u'คุยเรื่องหมวดหมู่',
'tk': u'Kategoriýa çekişme',
'tl': [u'Usapang kategorya', u'Usapang kaurian'],
'tlh': u"Segh ja'chuq",
'tpi': u'Toktok bilong grup',
'tr': u'Kategori tartışma',
'tt': [u'Төркем бәхәсе', u'Обсуждение категории', u'Törkem bäxäse'],
'ty': [u'Discussion catégorie', u'Discussion Catégorie'],
'udm': u'Категория сярысь вераськон',
'ug': u'تۈر مۇنازىرىسى',
'uk': u'Обговорення категорії',
'ur': u'تبادلۂ خیال زمرہ',
'uz': [u'Turkum munozarasi', u'Kategoriya munozarasi'],
'vec': u'Discussion categoria',
'vep': u'Lodu kategorijas',
'vi': u'Thảo luận Thể loại',
'vls': u'Discuusje categorie',
'vo': u'Kladibespik',
'wa': [u'Categoreye copene', u'Discussion Catégorie'],
'war': u'Hiruhimangraw hiton kaarangay',
'wo': [u'Waxtaani wàll', u'Discussion Catégorie'],
'wuu': [u'分类讨论', u'分类对话'],
'xal': [u'Әәшлин туск меткән', u'Янзин тускар ухалвр'],
'xmf': [u'კატეგორიის განხილვა', u'კატეგორია განხილვა'],
'yi': [u'קאַטעגאָריע רעדן', u'קאטעגאריע רעדן'],
'yo': u'Ọ̀rọ̀ ẹ̀ka',
'za': [u'分类讨论', u'分类对话'],
'zea': u'Overleg categorie',
'zh': [u'Category talk', u'分类讨论', u'分類討論', u'分类对话', u'分類對話'],
'zh-yue': [u'Category talk', u'分類傾偈', u'類 討論', u'分類 討論', u'类 讨论', u'分类 讨论'],
},
}
self.namespacesWithSubpage = [2] + range(1, 16, 2)
# letters that can follow a wikilink and are regarded as part of
# this link
# This depends on the linktrail setting in LanguageXx.php and on
# [[MediaWiki:Linktrail]].
# Note: this is a regular expression.
self.linktrails = {
'_default': u'[a-z]*',
'ca': u'[a-zàèéíòóúç·ïü]*',
'cs': u'[a-záčďéěíňóřšťúůýž]*',
'de': u'[a-zäöüß]*',
'da': u'[a-zæøå]*',
'es': u'[a-záéíóúñ]*',
'fa': u'[a-zابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة]*',
'fi': u'[a-zäö]*',
'fr': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'frr': u'[a-zäöüßåāđē]*',
'he': u'[a-zא-ת]*',
'hu': u'[a-záéíóúöüőűÁÉÍÓÚÖÜŐŰ]*',
'it': u'[a-zàéèíîìóòúù]*',
'ka': u'[a-zაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ“»]*',
'kk': u'[a-zäçéğıïñöşüýʺʹа-яёәғіқңөұүһٴابپتجحدرزسشعفقكلمنڭەوۇۋۆىيچھ“»]*',
'ksh': u'[a-zäöüėëijßəğåůæœç]*',
'mk': u'[a-zабвгдѓежзѕијклљмнњопрстќуфхцчџш]*',
'nl': u'[a-zäöüïëéèàë]*',
'pl': u'[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*',
'pt': u'[a-záâàãéêíóôõúüç]*',
'ro': u'[a-zăâîşţșțĂÂÎŞŢȘȚ]*',
'ru': u'[a-zа-я]*',
'sk': u'[a-záäčďéíľĺňóôŕšťúýž]*',
}
# Wikimedia wikis all use "bodyContent" as the id of the <div>
# element that contains the actual page content; change this for
# wikis that use something else (e.g., mozilla family)
self.content_id = "bodyContent"
# A dictionary where keys are family codes that can be used in
# inter-family interwiki links. Do not use it directly but
# get_known_families() instead.
# TODO: replace this with API interwikimap call
self.known_families = {
'abbenormal': 'abbenormal',
'acronym': 'acronym',
'advisory': 'advisory',
'advogato': 'advogato',
'aew': 'aew',
'airwarfare': 'airwarfare',
'aiwiki': 'aiwiki',
'allwiki': 'allwiki',
'appropedia': 'appropedia',
'aquariumwiki': 'aquariumwiki',
'arxiv': 'arxiv',
'aspienetwiki': 'aspienetwiki',
'atmwiki': 'atmwiki',
'b': 'wikibooks',
'battlestarwiki': 'battlestarwiki',
'bemi': 'bemi',
'benefitswiki': 'benefitswiki',
'betawiki': 'betawiki',
'betawikiversity': 'betawikiversity',
'biblewiki': 'biblewiki',
'bluwiki': 'bluwiki',
'botwiki': 'botwiki',
'boxrec': 'boxrec',
'brickwiki': 'brickwiki',
'bridgeswiki': 'bridgeswiki',
'bugzilla': 'bugzilla',
'buzztard': 'buzztard',
'bytesmiths': 'bytesmiths',
'c2': 'c2',
'c2find': 'c2find',
'cache': 'cache',
'canwiki': 'canwiki',
'canyonwiki': 'canyonwiki',
'Ĉej': 'Ĉej',
'cellwiki': 'cellwiki',
'centralwikia': 'centralwikia',
'chapter': 'chapter',
'chej': 'chej',
'choralwiki': 'choralwiki',
'ciscavate': 'ciscavate',
'citizendium': 'citizendium',
'ckwiss': 'ckwiss',
'closed-zh-tw': 'closed-zh-tw',
'cndbname': 'cndbname',
'cndbtitle': 'cndbtitle',
'colab': 'colab',
'comcom': 'comcom',
'comixpedia': 'comixpedia',
'commons': 'commons',
'communityscheme': 'communityscheme',
'comune': 'comune',
'consciousness': 'consciousness',
'corpknowpedia': 'corpknowpedia',
'crazyhacks': 'crazyhacks',
'creatureswiki': 'creatureswiki',
'cxej': 'cxej',
'dawiki': 'dawiki',
'dbdump': 'dbdump',
'dcc': 'dcc',
'dcdatabase': 'dcdatabase',
'dcma': 'dcma',
'dejanews': 'dejanews',
'delicious': 'delicious',
'demokraatia': 'demokraatia',
'devmo': 'devmo',
'dict': 'dict',
'dictionary': 'dictionary',
'disinfopedia': 'disinfopedia',
'distributedproofreaders': 'distributedproofreaders',
'distributedproofreadersca': 'distributedproofreadersca',
'dk': 'dk',
'dmoz': 'dmoz',
'dmozs': 'dmozs',
'docbook': 'docbook',
## 'doi': 'doi',
'doom_wiki': 'doom_wiki',
'download': 'download',
'drae': 'drae',
'dreamhost': 'dreamhost',
'drumcorpswiki': 'drumcorpswiki',
'dwjwiki': 'dwjwiki',
'eĉei': 'eĉei',
'echei': 'echei',
'ecoreality': 'ecoreality',
'ecxei': 'ecxei',
'efnetceewiki': 'efnetceewiki',
'efnetcppwiki': 'efnetcppwiki',
'efnetpythonwiki': 'efnetpythonwiki',
'efnetxmlwiki': 'efnetxmlwiki',
'elibre': 'elibre',
'emacswiki': 'emacswiki',
'energiewiki': 'energiewiki',
'eokulturcentro': 'eokulturcentro',
'epo': 'epo',
'ethnologue': 'ethnologue',
'evowiki': 'evowiki',
'exotica': 'exotica',
'fanimutationwiki': 'fanimutationwiki',
'finalempire': 'finalempire',
'finalfantasy': 'finalfantasy',
'finnix': 'finnix',
'flickruser': 'flickruser',
'floralwiki': 'floralwiki',
'flyerwiki-de': 'flyerwiki-de',
'foldoc': 'foldoc',
'forthfreak': 'forthfreak',
'foundation': 'foundation',
'foxwiki': 'foxwiki',
'freebio': 'freebio',
'freebsdman': 'freebsdman',
'freeculturewiki': 'freeculturewiki',
'freedomdefined': 'freedomdefined',
'freefeel': 'freefeel',
'freekiwiki': 'freekiwiki',
'ganfyd': 'ganfyd',
'gausswiki': 'gausswiki',
'gentoo-wiki': 'gentoo',
'genwiki': 'genwiki',
'globalvoices': 'globalvoices',
'glossarwiki': 'glossarwiki',
'glossarywiki': 'glossarywiki',
'golem': 'golem',
'google': 'google',
'googledefine': 'googledefine',
'googlegroups': 'googlegroups',
'gotamac': 'gotamac',
'greatlakeswiki': 'greatlakeswiki',
'guildwiki': 'guildwiki',
'gutenberg': 'gutenberg',
'gutenbergwiki': 'gutenbergwiki',
'h2wiki': 'h2wiki',
'hammondwiki': 'hammondwiki',
'heroeswiki': 'heroeswiki',
'herzkinderwiki': 'herzkinderwiki',
'hkmule': 'hkmule',
'holshamtraders': 'holshamtraders',
'hrfwiki': 'hrfwiki',
'hrwiki': 'hrwiki',
'humancell': 'humancell',
'hupwiki': 'hupwiki',
'imdbcharacter': 'imdbcharacter',
'imdbcompany': 'imdbcompany',
'imdbname': 'imdbname',
'imdbtitle': 'imdbtitle',
'incubator': 'incubator',
'infoanarchy': 'infoanarchy',
'infosecpedia': 'infosecpedia',
'infosphere': 'infosphere',
'iso639-3': 'iso639-3',
'iuridictum': 'iuridictum',
'jameshoward': 'jameshoward',
'javanet': 'javanet',
'javapedia': 'javapedia',
'jefo': 'jefo',
'jiniwiki': 'jiniwiki',
'jspwiki': 'jspwiki',
'jstor': 'jstor',
'kamelo': 'kamelo',
'karlsruhe': 'karlsruhe',
'kerimwiki': 'kerimwiki',
'kinowiki': 'kinowiki',
'kmwiki': 'kmwiki',
'kontuwiki': 'kontuwiki',
'koslarwiki': 'koslarwiki',
'kpopwiki': 'kpopwiki',
'linguistlist': 'linguistlist',
'linuxwiki': 'linuxwiki',
'linuxwikide': 'linuxwikide',
'liswiki': 'liswiki',
'literateprograms': 'literateprograms',
'livepedia': 'livepedia',
'lojban': 'lojban',
'lostpedia': 'lostpedia',
'lqwiki': 'lqwiki',
'lugkr': 'lugkr',
'luxo': 'luxo',
'lyricwiki': 'lyricwiki',
'm': 'meta',
'm-w': 'm-w',
'mail': 'mail',
'mailarchive': 'mailarchive',
'mariowiki': 'mariowiki',
'marveldatabase': 'marveldatabase',
'meatball': 'meatball',
'mediazilla': 'mediazilla',
'memoryalpha': 'memoryalpha',
'meta': 'meta',
'metawiki': 'metawiki',
'metawikipedia': 'metawikipedia',
'mineralienatlas': 'mineralienatlas',
'moinmoin': 'moinmoin',
'monstropedia': 'monstropedia',
'mosapedia': 'mosapedia',
'mozcom': 'mozcom',
'mozillawiki': 'mozillawiki',
'mozillazinekb': 'mozillazinekb',
'musicbrainz': 'musicbrainz',
'mw': 'mw',
'mwod': 'mwod',
'mwot': 'mwot',
'n': 'wikinews',
'netvillage': 'netvillage',
'nkcells': 'nkcells',
'nomcom': 'nomcom',
'nosmoke': 'nosmoke',
'nost': 'nost',
'oeis': 'oeis',
'oldwikisource': 'oldwikisource',
'olpc': 'olpc',
'onelook': 'onelook',
'openfacts': 'openfacts',
'openstreetmap': 'openstreetmap',
'openwetware': 'openwetware',
'openwiki': 'openwiki',
'opera7wiki': 'opera7wiki',
'organicdesign': 'organicdesign',
'orgpatterns': 'orgpatterns',
'orthodoxwiki': 'orthodoxwiki',
'osi reference model': 'osi reference model',
'otrs': 'otrs',
'otrswiki': 'otrswiki',
'ourmedia': 'ourmedia',
'paganwiki': 'paganwiki',
'panawiki': 'panawiki',
'pangalacticorg': 'pangalacticorg',
'patwiki': 'patwiki',
'perlconfwiki': 'perlconfwiki',
'perlnet': 'perlnet',
'personaltelco': 'personaltelco',
'phpwiki': 'phpwiki',
'phwiki': 'phwiki',
'planetmath': 'planetmath',
'pmeg': 'pmeg',
'pmwiki': 'pmwiki',
'psycle': 'psycle',
'purlnet': 'purlnet',
'pythoninfo': 'pythoninfo',
'pythonwiki': 'pythonwiki',
'pywiki': 'pywiki',
'q': 'wikiquote',
'qcwiki': 'qcwiki',
'quality': 'quality',
'qwiki': 'qwiki',
'r3000': 'r3000',
'raec': 'raec',
'rakwiki': 'rakwiki',
'reuterswiki': 'reuterswiki',
'rev': 'rev',
'revo': 'revo',
'rfc': 'rfc',
'rheinneckar': 'rheinneckar',
'robowiki': 'robowiki',
'rowiki': 'rowiki',
's': 'wikisource',
's23wiki': 's23wiki',
'scholar': 'scholar',
'schoolswp': 'schoolswp',
'scores': 'scores',
'scoutwiki': 'scoutwiki',
'scramble': 'scramble',
'seapig': 'seapig',
'seattlewiki': 'seattlewiki',
'seattlewireless': 'seattlewireless',
'senseislibrary': 'senseislibrary',
'silcode': 'silcode',
'slashdot': 'slashdot',
'slwiki': 'slwiki',
'smikipedia': 'smikipedia',
'sourceforge': 'sourceforge',
'spcom': 'spcom',
'species': 'species',
'squeak': 'squeak',
'stable': 'stable',
'strategywiki': 'strategywiki',
'sulutil': 'sulutil',
'susning': 'susning',
'svgwiki': 'svgwiki',
'svn': 'svn',
'swinbrain': 'swinbrain',
'swingwiki': 'swingwiki',
'swtrain': 'swtrain',
'tabwiki': 'tabwiki',
'takipedia': 'takipedia',
'tavi': 'tavi',
'tclerswiki': 'tclerswiki',
'technorati': 'technorati',
'tejo': 'tejo',
'tesoltaiwan': 'tesoltaiwan',
'testwiki': 'testwiki',
'thelemapedia': 'thelemapedia',
'theopedia': 'theopedia',
'theppn': 'theppn',
'thinkwiki': 'thinkwiki',
'tibiawiki': 'tibiawiki',
'ticket': 'ticket',
'tmbw': 'tmbw',
'tmnet': 'tmnet',
'tmwiki': 'tmwiki',
'tokyonights': 'tokyonights',
'tools': 'tools',
'translatewiki': 'translatewiki',
'trash!italia': 'trash!italia',
'tswiki': 'tswiki',
'turismo': 'turismo',
'tviv': 'tviv',
'tvtropes': 'tvtropes',
'twiki': 'twiki',
'twistedwiki': 'twistedwiki',
'tyvawiki': 'tyvawiki',
'uncyclopedia': 'uncyclopedia',
'unreal': 'unreal',
'urbandict': 'urbandict',
'usej': 'usej',
'usemod': 'usemod',
'v': 'wikiversity',
'valuewiki': 'valuewiki',
'veropedia': 'veropedia',
'vinismo': 'vinismo',
'vkol': 'vkol',
'vlos': 'vlos',
'voipinfo': 'voipinfo',
'w': 'wikipedia',
'warpedview': 'warpedview',
'webdevwikinl': 'webdevwikinl',
'webisodes': 'webisodes',
'webseitzwiki': 'webseitzwiki',
'wg': 'wg',
'wiki': 'wiki',
'wikia': 'wikia',
'wikianso': 'wikianso',
'wikiasite': 'wikiasite',
'wikible': 'wikible',
'wikibooks': 'wikibooks',
'wikichat': 'wikichat',
'wikichristian': 'wikichristian',
'wikicities': 'wikicities',
'wikicity': 'wikicity',
'wikif1': 'wikif1',
'wikifur': 'wikifur',
'wikihow': 'wikihow',
'wikiindex': 'wikiindex',
'wikilemon': 'wikilemon',
'wikilivres': 'wikilivres',
'wikimac-de': 'wikimac-de',
'wikimac-fr': 'wikimac-fr',
'wikimedia': 'wikimedia',
'wikinews': 'wikinews',
'wikinfo': 'wikinfo',
'wikinurse': 'wikinurse',
'wikinvest': 'wikinvest',
'wikipaltz': 'wikipaltz',
'wikipedia': 'wikipedia',
'wikipediawikipedia': 'wikipediawikipedia',
'wikiquote': 'wikiquote',
'wikireason': 'wikireason',
'wikischool': 'wikischool',
'wikisophia': 'wikisophia',
'wikisource': 'wikisource',
'wikispecies': 'wikispecies',
'wikispot': 'wikispot',
'wikiti': 'wikiti',
'wikitravel': 'wikitravel',
'wikitree': 'wikitree',
'wikiversity': 'wikiversity',
'wikiwikiweb': 'wikiwikiweb',
'wikt': 'wiktionary',
'wiktionary': 'wiktionary',
'wipipedia': 'wipipedia',
'wlug': 'wlug',
'wm2005': 'wm2005',
'wm2006': 'wm2006',
'wm2007': 'wm2007',
'wm2008': 'wm2008',
'wm2009': 'wm2009',
'wm2010': 'wm2010',
'wmania': 'wmania',
'wmcz': 'wmcz',
'wmf': 'wmf',
'wmrs': 'wmrs',
'wmse': 'wmse',
'wookieepedia': 'wookieepedia',
'world66': 'world66',
'wowwiki': 'wowwiki',
'wqy': 'wqy',
'wurmpedia': 'wurmpedia',
'wznan': 'wznan',
'xboxic': 'xboxic',
'zh-cfr': 'zh-cfr',
'zrhwiki': 'zrhwiki',
'zum': 'zum',
'zwiki': 'zwiki',
'zzz wiki': 'zzz wiki',
}
# A list of category redirect template names in different languages
# Note: It *is* necessary to list template redirects here
self.category_redirect_templates = {
'_default': []
}
# A list of languages that use hard (instead of soft) category redirects
self.use_hard_category_redirects = []
# A list of disambiguation template names in different languages
self.disambiguationTemplates = {
'_default': []
}
# A list of projects that share cross-project sessions.
self.cross_projects = []
# A list with the name for cross-project cookies.
# default for wikimedia centralAuth extensions.
self.cross_projects_cookies = ['centralauth_Session',
'centralauth_Token',
'centralauth_User']
self.cross_projects_cookie_username = 'centralauth_User'
# A list with the name in the cross-language flag permissions
self.cross_allowed = []
# A list with the name of the category containing disambiguation
# pages for the various languages. Only one category per language,
# and without the namespace, so add things like:
# 'en': "Disambiguation"
self.disambcatname = {}
# On most wikis page names must start with a capital letter, but some
# languages don't use this.
self.nocapitalize = []
# attop is a list of languages that prefer to have the interwiki
# links at the top of the page.
self.interwiki_attop = []
# on_one_line is a list of languages that want the interwiki links
# one-after-another on a single line
self.interwiki_on_one_line = []
# String used as separator between interwiki links and the text
self.interwiki_text_separator = config.line_separator * 2
# Similar for category
self.category_attop = []
# on_one_line is a list of languages that want the category links
# one-after-another on a single line
self.category_on_one_line = []
# String used as separator between category links and the text
self.category_text_separator = config.line_separator * 2
# When both at the bottom should categories come after interwikilinks?
self.categories_last = []
# Which languages have a special order for putting interlanguage
# links, and what order is it? If a language is not in
# interwiki_putfirst, alphabetical order on language code is used.
# For languages that are in interwiki_putfirst, interwiki_putfirst
# is checked first, and languages are put in the order given there.
# All other languages are put after those, in code-alphabetical
# order.
self.interwiki_putfirst = {}
# Languages in interwiki_putfirst_doubled should have a number plus
# a list of languages. If there are at least the number of interwiki
# links, all languages in the list should be placed at the front as
# well as in the normal list.
self.interwiki_putfirst_doubled = {} # THIS APPEARS TO BE UNUSED!
# Some families, e. g. commons and meta, are not multilingual and
# forward interlanguage links to another family (wikipedia).
# These families can set this variable to the name of the target
# family.
self.interwiki_forward = None
# Some families, e. g. wikipedia, receive forwarded interlanguage
# links from other families, e. g. incubator, commons, or meta.
# These families can set this variable to the names of their source
# families.
self.interwiki_forwarded_from = {}
# Which language codes no longer exist and by which language code
# should they be replaced. If for example the language with code xx:
# now should get code yy:, add {'xx':'yy'} to obsolete. If all
# links to language xx: should be removed, add {'xx': None}.
self.obsolete = {}
# Language codes of the largest wikis. They should be roughly sorted
# by size.
self.languages_by_size = []
# Some languages belong to a group where the possibility is high that
# equivalent articles have identical titles among the group.
self.language_groups = {
# languages using the arabic script (incomplete)
'arab': [
'ar', 'arz', 'ps', 'sd', 'ur', 'bjn', 'ckb',
# languages using multiple scripts, including arabic
'kk', 'ku', 'tt', 'ug', 'pnb'
],
# languages that use chinese symbols
'chinese': [
'wuu', 'zh', 'zh-classical', 'zh-yue', 'gan', 'ii',
# languages using multiple/mixed scripts, including chinese
'ja', 'za'
],
# languages that use the cyrillic alphabet
'cyril': [
'ab', 'av', 'ba', 'be', 'be-x-old', 'bg', 'bxr', 'ce', 'cu',
'cv', 'kbd', 'koi', 'kv', 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo',
'myv', 'mhr', 'mrj', 'os', 'ru', 'rue', 'sah', 'tg', 'tk',
'udm', 'uk', 'xal',
# languages using multiple scripts, including cyrillic
'ha', 'kk', 'sh', 'sr', 'tt'
],
# languages that use a greek script
'grec': [
'el', 'grc', 'pnt'
# languages using multiple scripts, including greek
],
# languages that use the latin alphabet
'latin': [
'aa', 'ace', 'af', 'ak', 'als', 'an', 'ang', 'ast', 'ay', 'bar',
'bat-smg', 'bcl', 'bi', 'bm', 'br', 'bs', 'ca', 'cbk-zam',
'cdo', 'ceb', 'ch', 'cho', 'chy', 'co', 'crh', 'cs', 'csb',
'cy', 'da', 'de', 'diq', 'dsb', 'ee', 'eml', 'en', 'eo', 'es',
'et', 'eu', 'ext', 'ff', 'fi', 'fiu-vro', 'fj', 'fo', 'fr',
'frp', 'frr', 'fur', 'fy', 'ga', 'gag', 'gd', 'gl', 'gn', 'gv',
'hak', 'haw', 'hif', 'ho', 'hr', 'hsb', 'ht', 'hu', 'hz', 'ia',
'id', 'ie', 'ig', 'ik', 'ilo', 'io', 'is', 'it', 'jbo', 'jv',
'kaa', 'kab', 'kg', 'ki', 'kj', 'kl', 'kr', 'ksh', 'kw', 'la',
'lad', 'lb', 'lg', 'li', 'lij', 'lmo', 'ln', 'lt', 'ltg', 'lv',
'map-bms', 'mg', 'mh', 'mi', 'ms', 'mt', 'mus', 'mwl', 'na',
'nah', 'nap', 'nds', 'nds-nl', 'ng', 'nl', 'nn', 'no', 'nov',
'nrm', 'nv', 'ny', 'oc', 'om', 'pag', 'pam', 'pap', 'pcd',
'pdc', 'pfl', 'pih', 'pl', 'pms', 'pt', 'qu', 'rm', 'rn', 'ro',
'roa-rup', 'roa-tara', 'rw', 'sc', 'scn', 'sco', 'se', 'sg',
'simple', 'sk', 'sl', 'sm', 'sn', 'so', 'sq', 'srn', 'ss', 'st',
'stq', 'su', 'sv', 'sw', 'szl', 'tet', 'tl', 'tn', 'to', 'tpi',
'tr', 'ts', 'tum', 'tw', 'ty', 'uz', 've', 'vec', 'vi', 'vls',
'vo', 'wa', 'war', 'wo', 'xh', 'yo', 'zea', 'zh-min-nan', 'zu',
# languages using multiple scripts, including latin
'az', 'chr', 'ckb', 'ha', 'iu', 'kk', 'ku', 'rmy', 'sh', 'sr',
'tt', 'ug', 'za'
],
# Scandinavian languages
'scand': [
'da', 'fo', 'is', 'nb', 'nn', 'no', 'sv'
],
}
# LDAP domain if your wiki uses LDAP authentication,
# http://www.mediawiki.org/wiki/Extension:LDAP_Authentication
self.ldapDomain = ()
# Allows crossnamespace interwiki linking.
# Lists the possible crossnamespaces combinations
# keys are originating NS
# values are dicts where:
# keys are the originating langcode, or _default
# values are dicts where:
# keys are the languages that can be linked to from the lang+ns, or
# '_default'; values are a list of namespace numbers
self.crossnamespace = {}
##
## Examples :
## Allowing linking to pt' 102 NS from any other lang' 0 NS is
# self.crossnamespace[0] = {
# '_default': { 'pt': [102]}
# }
## While allowing linking from pt' 102 NS to any other lang' = NS is
# self.crossnamespace[102] = {
# 'pt': { '_default': [0]}
# }
@property
def iwkeys(self):
if self.interwiki_forward:
return pywikibot.Family(self.interwiki_forward).langs.keys()
return self.langs.keys()
def _addlang(self, code, location, namespaces={}):
"""Add a new language to the langs and namespaces of the family.
This is supposed to be called in the constructor of the family.
"""
self.langs[code] = location
for num, val in namespaces.iteritems():
self.namespaces[num][code] = val
def get_known_families(self, site):
return self.known_families
def linktrail(self, code, fallback='_default'):
if code in self.linktrails:
return self.linktrails[code]
elif fallback:
return self.linktrails[fallback]
else:
raise KeyError(
"ERROR: linktrail in language %(language_code)s unknown"
% {'language_code': code})
def namespace(self, code, ns_number, fallback='_default', all=False):
if not self.isDefinedNS(ns_number):
raise KeyError('ERROR: Unknown namespace %d for %s:%s'
% (ns_number, code, self.name))
elif self.isNsI18N(ns_number, code):
v = self.namespaces[ns_number][code]
if type(v) == list:
v = v[:]
else:
v = [v]
if all and self.isNsI18N(ns_number, fallback):
v2 = self.namespaces[ns_number][fallback]
if type(v2) is list:
v.extend(v2)
else:
v.append(v2)
elif fallback and self.isNsI18N(ns_number, fallback):
v = self.namespaces[ns_number][fallback]
if type(v) == list:
v = v[:]
else:
v = [v]
else:
raise KeyError('ERROR: title for namespace %d in language %s unknown'
% (ns_number, code))
if all:
namespaces = []
# Unique list
for ns in v:
if ns not in namespaces:
namespaces.append(ns)
# Lowercase versions of namespaces
if code not in self.nocapitalize:
namespaces.extend([ns[0].lower() + ns[1:] for ns in namespaces
if ns and ns[0].lower() != ns[0].upper()])
# Underscore versions of namespaces
namespaces.extend([ns.replace(' ', '_') for ns in namespaces
if ns and ' ' in ns])
return tuple(namespaces)
else:
return v[0]
def isDefinedNS(self, ns_number):
"""Return True if the namespace has been defined in this family.
"""
return ns_number in self.namespaces
def isNsI18N(self, ns_number, code):
"""Return True if the namespace has been internationalized.
(it has a custom entry for a given language)"""
return code in self.namespaces[ns_number]
def isDefinedNSLanguage(self, ns_number, code, fallback='_default'):
"""Return True if the namespace has been defined in this family
for this language or its fallback.
"""
if not self.isDefinedNS(ns_number):
return False
elif self.isNsI18N(ns_number, code):
return True
elif fallback and self.isNsI18N(ns_number, fallback):
return True
else:
return False
def normalizeNamespace(self, code, value):
"""Given a value, attempt to match it with all available namespaces,
with default and localized versions. Sites may have more than one
way to write the same namespace - choose the first one in the list.
If nothing can be normalized, return the original value.
"""
for ns, localized_ns in self.namespaces.iteritems():
if code in localized_ns:
valid = localized_ns[code]
if isinstance(valid, basestring):
valid = [valid]
else:
valid = valid[:]
else:
valid = []
if '_default' in localized_ns:
default = localized_ns['_default']
if isinstance(default, basestring):
default = [default]
if default:
valid.extend(default)
if not valid:
continue
if value in valid:
return self.namespace(code, ns)
return value
def getNamespaceIndex(self, lang, namespace):
"""Given a namespace, attempt to match it with all available
namespaces. Sites may have more than one way to write the same
namespace - choose the first one in the list. Returns namespace
index or None.
"""
namespace = namespace.lower()
for n in self.namespaces.keys():
try:
nslist = self.namespaces[n][lang]
if type(nslist) is not list:
nslist = [nslist]
for ns in nslist:
if ns.lower() == namespace:
return n
except (KeyError, AttributeError):
# The namespace has no localized name defined
pass
if lang != '_default':
# This is not a localized namespace. Try if it
# is a default (English) namespace.
return self.getNamespaceIndex('_default', namespace)
else:
# give up
return None
def category_redirects(self, code, fallback="_default"):
if code in self.category_redirect_templates:
return self.category_redirect_templates[code]
elif fallback:
return self.category_redirect_templates[fallback]
else:
raise KeyError(
"ERROR: title for category redirect template in language '%s' unknown"
% code)
def disambig(self, code, fallback='_default'):
if code in self.disambiguationTemplates:
return self.disambiguationTemplates[code]
elif fallback:
return self.disambiguationTemplates[fallback]
else:
raise KeyError(
"ERROR: title for disambig template in language %s unknown"
% code)
# Returns the title of the special namespace in language 'code', taken from
# dictionary above.
# If the dictionary doesn't contain a translation, it will use language
# 'fallback' (or, if fallback isn't given, MediaWiki default).
# If you want the bot to crash in case of an unknown namespace name, use
# fallback = None.
def special_namespace(self, code, fallback='_default'):
return self.namespace(code, -1, fallback)
def special_namespace_url(self, code, fallback='_default'):
encoded_title = self.namespace(code, -1, fallback).encode(self.code2encoding(code))
return urllib.quote(encoded_title)
def image_namespace(self, code, fallback='_default'):
return self.namespace(code, 6, fallback)
def image_namespace_url(self, code, fallback='_default'):
encoded_title = self.namespace(code, 6, fallback).encode(self.code2encoding(code))
return urllib.quote(encoded_title)
def mediawiki_namespace(self, code, fallback='_default'):
return self.namespace(code, 8, fallback)
def template_namespace(self, code, fallback='_default'):
return self.namespace(code, 10, fallback)
def category_namespace(self, code, fallback='_default'):
return self.namespace(code, 14, fallback)
def category_namespaces(self, code):
return self.namespace(code, 14, all=True)
# Methods
def protocol(self, code):
"""
Can be overridden to return 'https'. Other protocols are not supported.
"""
return 'http%s' % ('', 's')[config.SSL_connection]
def hostname(self, code):
"""The hostname to use for standard http connections."""
return self.langs[code]
def scriptpath(self, code):
"""The prefix used to locate scripts on this wiki.
This is the value displayed when you enter {{SCRIPTPATH}} on a
wiki page (often displayed at [[Help:Variables]] if the wiki has
copied the master help page correctly).
The default value is the one used on Wikimedia Foundation wikis,
but needs to be overridden in the family file for any wiki that
uses a different value.
"""
return '/w'
def path(self, code):
return '%s/index.php' % self.scriptpath(code)
def querypath(self, code):
return '%s/query.php' % self.scriptpath(code)
def apipath(self, code):
return '%s/api.php' % self.scriptpath(code)
def nicepath(self, code):
return '/wiki/'
def dbName(self, code):
# returns the name of the MySQL database
return '%s%s' % (code, self.name)
# Which version of MediaWiki is used?
def version(self, code):
"""Return MediaWiki version number as a string."""
# Don't use this, use versionnumber() instead. This only exists
# to not break family files.
# Here we return the latest mw release for downloading
return '1.20wmf2'
def versionnumber(self, code, version=None):
"""Return an int identifying MediaWiki version.
Currently this is implemented as returning the minor version
number; i.e., 'X' in version '1.X.Y'
if version is given (e.g. from a mw page), extract that number
"""
R = re.compile(r"(\d+).(\d+)")
M = R.search(version or self.version(code))
if not M:
# Version string malformatted; assume it should have been 1.10
return 10
return 1000 * int(M.group(1)) + int(M.group(2)) - 1000
def page_action_address(self, code, name, action):
return '%s?title=%s&action=%s&useskin=monobook' % (
self.path(code), name, action)
def put_address(self, code, name):
return '%s?title=%s&action=submit&useskin=monobook' % (
self.path(code), name)
def get_address(self, code, name):
return '%s?title=%s&redirect=no&useskin=monobook' % (
self.path(code), name)
# The URL to get a page, in the format indexed by Google.
def nice_get_address(self, code, name):
return '%s%s' % (self.nicepath(code), name)
def edit_address(self, code, name):
return '%s?title=%s&action=edit&useskin=monobook' % (
self.path(code), name)
def watch_address(self, code, name):
return '%s?title=%s&action=watch&useskin=monobook' % (
self.path(code), name)
def unwatch_address(self, code, name):
return '%s?title=%s&action=unwatch&useskin=monobook' % (
self.path(code), name)
def purge_address(self, code, name):
return '%s?title=%s&redirect=no&action=purge&useskin=monobook' % (
self.path(code), name)
def references_address(self, code, name):
return '%s?title=%s:Whatlinkshere&target=%s&limit=%d&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), name, config.special_page_limit)
def upload_address(self, code):
return '%s?title=%s:Upload&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
def double_redirects_address(self, code, default_limit=True):
if default_limit:
return '%s?title=%s:DoubleRedirects&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
else:
return '%s?title=%s:DoubleRedirects&limit=%d&useskin=monobook' % (
self.path(code), self.special_namespace_url(code),
config.special_page_limit)
def broken_redirects_address(self, code, default_limit=True):
if default_limit:
return '%s?title=%s:BrokenRedirects&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
else:
return '%s?title=%s:BrokenRedirects&limit=%d&useskin=monobook' % (
self.path(code), self.special_namespace_url(code),
config.special_page_limit)
def random_address(self, code):
return "%s?title=%s:Random&useskin=monobook" % (
self.path(code), self.special_namespace_url(code))
def randomredirect_address(self, code):
return "%s?title=%s:RandomRedirect&useskin=monobook" % (
self.path(code), self.special_namespace_url(code))
def allmessages_address(self, code):
return "%s?title=%s:Allmessages&ot=html&useskin=monobook" % (
self.path(code), self.special_namespace_url(code))
def login_address(self, code):
return '%s?title=%s:Userlogin&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
def captcha_image_address(self, code, id):
return '%s?title=%s:Captcha/image&wpCaptchaId=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), id)
def watchlist_address(self, code):
return '%s?title=%s:Watchlist/edit&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
def contribs_address(self, code, target, limit=500, offset=''):
return '%s?title=%s:Contributions&target=%s&limit=%s&offset=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), target, limit, offset)
def move_address(self, code):
return '%s?title=%s:Movepage&action=submit&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
def delete_address(self, code, name):
return '%s?title=%s&action=delete&useskin=monobook' % (
self.path(code), name)
def undelete_view_address(self, code, name, ts=''):
return '%s?title=%s:Undelete&target=%s×tamp=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), name, ts)
def undelete_address(self, code):
return '%s?title=%s:Undelete&action=submit&useskin=monobook' % (self.path(code), self.special_namespace_url(code))
def protect_address(self, code, name):
return '%s?title=%s&action=protect&useskin=monobook' % (self.path(code),
name)
def unprotect_address(self, code, name):
return '%s?title=%s&action=unprotect&useskin=monobook' % (
self.path(code), name)
def block_address(self, code):
return '%s?title=%s:Blockip&action=submit&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
def unblock_address(self, code):
return '%s?title=%s:Ipblocklist&action=submit&useskin=monobook' % (
self.path(code), self.special_namespace_url(code))
def blocksearch_address(self, code, name):
return '%s?title=%s:Ipblocklist&action=search&ip=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), name)
def linksearch_address(self, code, link, limit=500, offset=0):
return '%s?title=%s:Linksearch&limit=%d&offset=%d&target=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), limit, offset,
link)
def version_history_address(self, code, name,
limit=config.special_page_limit):
return '%s?title=%s&action=history&limit=%d&useskin=monobook' % (
self.path(code), name, limit)
def export_address(self, code):
return '%s?title=%s:Export&useskin=monobook' % (
self.path(code), self.special_namespace_url('_default'))
def globalusers_address(self, code, target='', limit=500, offset='',
group=''):
return '%s?title=%s:GlobalUsers&username=%s&limit=%s&offset=%s&group=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), target, limit,
offset, group)
def query_address(self, code):
return '%s?' % self.querypath(code)
def api_address(self, code):
return '%s?' % self.apipath(code)
def search_address(self, code, query, limit=100, namespaces=None):
"""
Constructs a URL for searching using Special:Search
'namespaces' may be an int or a list; an empty list selects
all namespaces. Defaults to namespace 0
"""
namespace_params = ''
if namespaces is not None:
if isinstance(namespaces, int):
namespace_params = "&ns%d=1" % namespaces
elif isinstance(namespaces, list):
if len(namespaces) == 0:
# add all namespaces
namespaces = self.namespaces.keys()
for i in namespaces:
if i >= 0:
namespace_params = namespace_params + '&ns%d=1' % i
return "%s?title=%s:Search&search=%s&limit=%d%s&fulltext=1&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), query, limit,
namespace_params)
def allpages_address(self, code, start, namespace=0):
if self.version(code) == "1.2":
return '%s?title=%s:Allpages&printable=yes&from=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), start)
else:
return '%s?title=%s:Allpages&from=%s&namespace=%s&useskin=monobook' % (
self.path(code), self.special_namespace_url(code), start,
namespace)
def log_address(self, code, limit=50, mode='', user=''):
return "%s?title=Special:Log&type=%s&user=%s&page=&limit=%d&useskin=monobook" % (
self.path(code), mode, user, limit)
def newpages_address(self, code, limit=50, namespace=0):
return "%s?title=%s:Newpages&limit=%d&namespace=%s&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit, namespace)
def longpages_address(self, code, limit=500):
return "%s?title=%s:Longpages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def shortpages_address(self, code, limit=500):
return "%s?title=%s:Shortpages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def categories_address(self, code, limit=500):
return "%s?title=%s:Categories&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def unusedfiles_address(self, code, limit=500):
return "%s?title=%s:UnusedFiles&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def deadendpages_address(self, code, limit=500):
return "%s?title=%s:Deadendpages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def ancientpages_address(self, code, limit=500):
return "%s?title=%s:Ancientpages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def lonelypages_address(self, code, limit=500):
return "%s?title=%s:Lonelypages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def protectedpages_address(self, code, limit=500):
return "%s?title=%s:ProtectedPages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def unwatchedpages_address(self, code, limit=500):
return "%s?title=%s:Unwatchedpages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def uncategorizedcategories_address(self, code, limit=500):
return "%s?title=%s:Uncategorizedcategories&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def uncategorizedimages_address(self, code, limit=500):
return "%s?title=%s:Uncategorizedimages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def uncategorizedpages_address(self, code, limit=500):
return "%s?title=%s:Uncategorizedpages&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def uncategorizedtemplates_address(self, code, limit=500):
return "%s?title=%s:UncategorizedTemplates&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def unusedcategories_address(self, code, limit=500):
return "%s?title=%s:Unusedcategories&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def wantedcategories_address(self, code, limit=500):
return "%s?title=%s:wantedcategories&limit=%d&useskin=monobook&uselang=en" % (
self.path(code), self.special_namespace_url(code), limit)
def withoutinterwiki_address(self, code, limit=500):
return "%s?title=%s:Withoutinterwiki&limit=%d&useskin=monobook" % (
self.path(code), self.special_namespace_url(code), limit)
def code2encoding(self, code):
"""Return the encoding for a specific language wiki"""
return 'utf-8'
def code2encodings(self, code):
"""Return a list of historical encodings for a specific language
wiki"""
return self.code2encoding(code),
# aliases
def encoding(self, code):
"""Return the encoding for a specific language wiki"""
return self.code2encoding(code)
def encodings(self, code):
"""Return a list of historical encodings for a specific language
wiki"""
return self.code2encodings(code)
def __cmp__(self, otherfamily):
try:
return cmp(self.name, otherfamily.name)
except AttributeError:
return cmp(id(self), id(otherfamily))
def __hash__(self):
return hash(self.name)
def __repr__(self):
return 'Family("%s")' % self.name
def RversionTab(self, code):
"""Change this to some regular expression that shows the page we
found is an existing page, in case the normal regexp does not work.
"""
return None
def has_query_api(self, code):
"""Is query.php installed in the wiki?"""
return False
def shared_image_repository(self, code):
"""Return the shared image repository, if any."""
return (None, None)
def shared_data_repository(self, code, transcluded=False):
"""Return the shared wikidata repository, if any."""
return (None, None)
def server_time(self, code):
"""Return a datetime object representing server time"""
# TODO : If the local computer time is wrong, result will be wrong
return datetime.utcnow() + self.servergmtoffset
def isPublic(self, code):
"""Does the wiki require logging in before viewing it?"""
return True
def post_get_convert(self, site, getText):
"""Does a conversion on the retrieved text from the wiki
i.e. Esperanto X-conversion """
return getText
def pre_put_convert(self, site, putText):
"""Does a conversion on the text to insert on the wiki
i.e. Esperanto X-conversion """
return putText
# Parent class for all wikimedia families
class WikimediaFamily(Family):
def __init__(self):
super(WikimediaFamily, self).__init__()
self.namespaces[828] = {
'_default': u'Module',
'ab': u'Модуль',
'ace': u'Modul',
'als': u'Modul',
'an': u'Modulo',
'ar': u'وحدة',
'arz': u'وحدة',
'ast': u'Módulu',
'av': u'Модуль',
'ay': u'Módulo',
'ba': u'Модуль',
'bar': u'Modul',
'be-x-old': u'Модуль',
'bjn': u'Modul',
'bn': u'মডিউল',
'bpy': u'মডিউল',
'bs': u'Modul',
'bug': u'Modul',
'bxr': u'Модуль',
'ca': u'Mòdul',
'cbk-zam': u'Módulo',
'ce': u'Модуль',
'cs': u'Modul',
'csb': u'Moduł',
'cv': u'Модуль',
'cy': u'Modiwl',
'da': u'Modul',
'de': u'Modul',
'dsb': u'Modul',
'dv': u'މޮޑިއުލް',
'eml': u'Modulo',
'eo': u'Modulo',
'es': u'Módulo',
'et': u'Moodul',
'eu': u'Modulu',
'fa': u'پودمان',
'fiu-vro': u'Moodul',
'frr': u'Modul',
'fur': u'Modulo',
'gag': u'Modül',
'gan': u'模組',
'gl': u'Módulo',
'glk': u'پودمان',
'gn': u'Módulo',
'he': u'יחידה',
'hsb': u'Modul',
'hu': u'Modul',
'id': u'Modul',
'ilo': u'Modulo',
'it': u'Modulo',
'ja': u'モジュール',
'jv': u'Modul',
'ka': u'მოდული',
'kl': u'Modul',
'ko': u'모듈',
'koi': u'Модуль',
'krc': u'Модуль',
'ksh': u'Modul',
'kv': u'Модуль',
'lad': u'Módulo',
'lb': u'Modul',
'lbe': u'Модуль',
'lez': u'Модуль',
'lij': u'Modulo',
'lmo': u'Modulo',
'map-bms': u'Modul',
'mhr': u'Модуль',
'min': u'Modul',
'mrj': u'Модуль',
'ms': u'Modul',
'mwl': u'Módulo',
'myv': u'Модуль',
'mzn': u'پودمان',
'nah': u'Módulo',
'nap': u'Modulo',
'nds': u'Modul',
'nn': u'Modul',
'os': u'Модуль',
'pdc': u'Modul',
'pfl': u'Modul',
'pl': u'Moduł',
'pms': u'Modulo',
'pt': u'Módulo',
'qu': u'Módulo',
'ru': u'Модуль',
'rue': u'Модуль',
'sah': u'Модуль',
'scn': u'Modulo',
'sk': u'Modul',
'sl': u'Modul',
'stq': u'Modul',
'su': u'Modul',
'sv': u'Modul',
'szl': u'Moduł',
'tr': u'Modül',
'tt': u'Модуль',
'udm': u'Модуль',
'uk': u'Модуль',
'vec': u'Modulo',
'vep': u'Moodul',
'vi': u'Mô đun',
'wuu': u'模块',
'xal': u'Модуль',
'xmf': u'მოდული',
'yi': u'יחידה',
'za': u'模块',
'zh': u'模块',
'zh-yue': u'模組',
}
self.namespaces[829] = {
'_default': u'Module talk',
'ab': u'Обсуждение модуля',
'ace': u'Pembicaraan Modul',
'als': u'Modul Diskussion',
'an': u'Descusión modulo',
'ar': u'نقاش الوحدة',
'arz': u'نقاش الوحدة',
'ast': u'Alderique módulu',
'av': u'Обсуждение модуля',
'ay': u'Módulo discusión',
'ba': u'Модуль буйынса фекерләшеү',
'bar': u'Modul Diskussion',
'be-x-old': u'Абмеркаваньне модулю',
'bjn': u'Pembicaraan Modul',
'bm': u'Discussion module',
'bn': u'মডিউল আলাপ',
'bpy': u'মডিউল আলাপ',
'bs': u'Razgovor o modulu',
'bug': u'Pembicaraan Modul',
'bxr': u'Обсуждение модуля',
'ca': u'Mòdul Discussió',
'cbk-zam': u'Módulo discusión',
'ce': u'Обсуждение модуля',
'cs': u'Diskuse k modulu',
'csb': u'Dyskusja modułu',
'cv': u'Обсуждение модуля',
'cy': u'Sgwrs modiwl',
'da': u'Moduldiskussion',
'de': u'Modul Diskussion',
'dsb': u'Modul diskusija',
'dv': u'މޮޑިއުލް ޚިޔާލު',
'eml': u'Discussioni modulo',
'eo': u'Modulo-Diskuto',
'es': u'Módulo discusión',
'et': u'Mooduli arutelu',
'eu': u'Modulu eztabaida',
'fa': u'بحث پودمان',
'ff': u'Discussion module',
'fiu-vro': u'Mooduli arutelu',
'fr': u'Discussion module',
'frp': u'Discussion module',
'frr': u'Modul Diskussion',
'fur': u'Discussioni modulo',
'gag': u'Modül tartışma',
'gan': u'模組討論',
'gl': u'Conversa módulo',
'glk': u'بحث پودمان',
'gn': u'Módulo discusión',
'he': u'שיחת יחידה',
'hsb': u'Modul diskusija',
'ht': u'Discussion module',
'hu': u'Modulvita',
'id': u'Pembicaraan Modul',
'ilo': u'Modulo tungtungan',
'it': u'Discussioni modulo',
'ja': u'モジュール・トーク',
'jv': u'Pembicaraan Modul',
'kl': u'Moduldiskussion',
'ko': u'모듈토론',
'koi': u'Обсуждение модуля',
'krc': u'Обсуждение модуля',
'ksh': u'Modul Diskussion',
'kv': u'Обсуждение модуля',
'lad': u'Módulo discusión',
'lb': u'Modul Diskussion',
'lbe': u'Обсуждение модуля',
'lez': u'Обсуждение модуля',
'li': u'Overleg module',
'lij': u'Discussioni modulo',
'lmo': u'Discussioni modulo',
'ln': u'Discussion module',
'map-bms': u'Pembicaraan Modul',
'mg': u'Discussion module',
'mhr': u'Обсуждение модуля',
'min': u'Rundiang Modul',
'mrj': u'Обсуждение модуля',
'mwl': u'Módulo Discussão',
'myv': u'Обсуждение модуля',
'mzn': u'بحث پودمان',
'nah': u'Módulo discusión',
'nap': u'Discussioni modulo',
'nds': u'Modul Diskussion',
'nds-nl': u'Overleg module',
'nl': u'Overleg module',
'nn': u'Moduldiskusjon',
'os': u'Обсуждение модуля',
'pcd': u'Discussion module',
'pdc': u'Modul Diskussion',
'pfl': u'Modul Diskussion',
'pl': u'Dyskusja modułu',
'pms': u'Discussioni modulo',
'pt': u'Módulo Discussão',
'qu': u'Módulo discusión',
'ru': u'Обсуждение модуля',
'rue': u'Обговорення модуля',
'sah': u'Обсуждение модуля',
'scn': u'Discussioni modulo',
'sg': u'Discussion module',
'sk': u'Diskusia k modulu',
'sl': u'Pogovor o modulu',
'srn': u'Overleg module',
'stq': u'Modul Diskussion',
'su': u'Pembicaraan Modul',
'sv': u'Moduldiskussion',
'szl': u'Dyskusja modułu',
'tr': u'Modül tartışma',
'tt': u'Обсуждение модуля',
'ty': u'Discussion module',
'udm': u'Обсуждение модуля',
'uk': u'Обговорення модуля',
'vec': u'Discussioni modulo',
'vep': u'Mooduli arutelu',
'vi': u'Thảo luận Mô đun',
'vls': u'Overleg module',
'wa': u'Discussion module',
'wo': u'Discussion module',
'wuu': u'模块讨论',
'xal': u'Обсуждение модуля',
'yi': u'שיחת יחידה',
'za': u'模块讨论',
'zea': u'Overleg module',
'zh': u'模块讨论',
'zh-yue': u'模組討論',
}
self.namespacesWithSubpage.extend([4, 12])
# CentralAuth cross avaliable projects.
self.cross_projects = [
'commons', 'incubator', 'mediawiki', 'meta', 'species', 'test',
'wikibooks', 'wikidata', 'wikinews', 'wikipedia', 'wikiquote',
'wikisource', 'wikiversity', 'wiktionary',
]
def version(self, code):
"""Return Wikimedia projects version number as a string."""
# Don't use this, use versionnumber() instead. This only exists
# to not break family files.
return '1.22wmf12'
def shared_image_repository(self, code):
return ('commons', 'commons')
def protocol(self, code):
return 'https'
| races1986/SafeLanguage | CEM/family.py | Python | epl-1.0 | 242,167 |
"""cellprofiler.gui.tests.__init__.py
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
if __name__ == "__main__":
import nose
nose.main()
| LeeKamentsky/CellProfiler | cellprofiler/gui/tests/__init__.py | Python | gpl-2.0 | 421 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Adriano Monteiro Marques
#
# Author: Francesco Piccinno <stack.box@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Gui core module contains:
- App
+ MainWindow
+ Icons
- Paned
- FallbackPaned
+ Views
"""
| dennisfrancis/PacketManipulator | umit/pm/gui/core/__init__.py | Python | gpl-2.0 | 971 |
import gpib
class PARWriteError(Exception):
pass
class PARReadError(Exception):
pass
class PARCellWorking(Exception):
pass
class Poll:
COMMAND_DONE = 1
COMMAND_ERROR = 2
CURVE_DONE = 4
OVERLOAD = 16
SWEEP_DONE = 32
SRQ = 64
OUTPUT_READY = 128
class PAR(object):
def __init__(self, addres):
self.dev = gpib.dev(*addres)
self.write('DD 13')
self.write('TYPE Succesfull init"')
def write(self, cmd):
while True:
status = ord(gpib.serial_poll(self.dev))
#print '.',
if status & Poll.COMMAND_DONE != 0:
break
elif status & Poll.OUTPUT_READY != 0:
raise PARWriteError("Data is ready, can't write")
gpib.write(self.dev, cmd)
def read(self):
while True:
status = ord(gpib.serial_poll(self.dev))
#print ':', status,
if status & Poll.OUTPUT_READY != 0:
break
elif status & Poll.COMMAND_DONE != 0:
raise PARReadError("Nothing to read")
return gpib.read(self.dev, 1024)
def ask(self, cmd):
self.write(cmd)
return self.read()
def wait_for_relay(self):
cell_hw_switch = int(p.ask("CS").split()[0])
cell_relay = int(p.ask("CELL").split()[0])
if cell_hw_switch and cell_relay:
raise PARCellWorking("Both cell switches enabled")
elif cell_hw_switch == False and cell_relay:
raise PARCellWorking("Cell relay in waiting status...")
elif cell_hw_switch == True and cell_relay == False:
raise PARCellWorking("Previous measurement not finished!")
elif cell_hw_switch == False and cell_relay == False:
for i in range(100):
print "Press Cell Switch!"
import time
time.sleep(0.2)
cell_hw_switch = int(p.ask("CS").split()[0])
if cell_hw_switch:
p.write("CELL 1")
print "Measurement started"
return True
return False
p = PAR( (0,14) )
| leszektarkowski/PAR273 | server/server.py | Python | gpl-2.0 | 2,220 |
# -*- coding: utf-8 -*-
__title__ = 'transliterate.contrib.languages.hi.translit_language_pack'
__author__ = 'Artur Barseghyan'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('HindiLanguagePack',)
from transliterate.base import TranslitLanguagePack, registry
class HindiLanguagePack(TranslitLanguagePack):
"""
Language pack for Hindi language. See
`http://en.wikipedia.org/wiki/Hindi` for details.
"""
language_code = "hi"
language_name = "Hindi"
character_ranges = ((0x0900, 0x097f),) # Fill this in
mapping = (
u"aeof", #AEOF
u"अइओफ",
# ae of
)
#reversed_specific_mapping = (
# u"θΘ",
# u"uU"
#)
pre_processor_mapping = {
u"b": u"बी",
u"g": u"जी",
u"d": u"डी",
u"z": u"जड़",
u"h": u"एच",
u"i": u"आई",
u"l": u"अल",
u"m": u"ऍम",
u"n": u"अन",
u"x": u"अक्स",
u"k": u"के",
u"p": u"पी",
u"r": u"आर",
u"s": u"एस",
u"t": u"टी",
u"y": u"वाय",
u"w": u"डब्लू",
u"u": u"यू",
u"c": u"सी",
u"j": u"जे",
u"q": u"क्यू",
u"z": u"जड़",
}
detectable = True
#registry.register(HindiLanguagePack)
| akosiaris/transliterate | src/transliterate/contrib/languages/hi/translit_language_pack.py | Python | gpl-2.0 | 1,416 |
from __future__ import division
import logging
import time
import re
import os
import tempfile
import threading
import shutil
import stat
import xml.dom.minidom
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from avocado.core import exceptions
from avocado.utils import iso9660
from avocado.utils import process
from avocado.utils import crypto
from avocado.utils import download
from virttest import virt_vm
from virttest import asset
from virttest import utils_disk
from virttest import qemu_monitor
from virttest import remote
from virttest import syslog_server
from virttest import http_server
from virttest import data_dir
from virttest import utils_net
from virttest import utils_test
from virttest import utils_misc
from virttest import funcatexit
from virttest import storage
from virttest import error_context
from virttest import qemu_storage
from virttest.compat_52lts import decode_to_text
# Whether to print all shell commands called
DEBUG = False
_url_auto_content_server_thread = None
_url_auto_content_server_thread_event = None
_unattended_server_thread = None
_unattended_server_thread_event = None
_syslog_server_thread = None
_syslog_server_thread_event = None
def start_auto_content_server_thread(port, path):
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is None:
_url_auto_content_server_thread_event = threading.Event()
_url_auto_content_server_thread = threading.Thread(
target=http_server.http_server,
args=(port, path, terminate_auto_content_server_thread))
_url_auto_content_server_thread.start()
def start_unattended_server_thread(port, path):
global _unattended_server_thread
global _unattended_server_thread_event
if _unattended_server_thread is None:
_unattended_server_thread_event = threading.Event()
_unattended_server_thread = threading.Thread(
target=http_server.http_server,
args=(port, path, terminate_unattended_server_thread))
_unattended_server_thread.start()
def terminate_auto_content_server_thread():
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is None:
return False
if _url_auto_content_server_thread_event is None:
return False
if _url_auto_content_server_thread_event.isSet():
return True
return False
def terminate_unattended_server_thread():
global _unattended_server_thread, _unattended_server_thread_event
if _unattended_server_thread is None:
return False
if _unattended_server_thread_event is None:
return False
if _unattended_server_thread_event.isSet():
return True
return False
class RemoteInstall(object):
"""
Represents a install http server that we can master according to our needs.
"""
def __init__(self, path, ip, port, filename):
self.path = path
utils_disk.cleanup(self.path)
os.makedirs(self.path)
self.ip = ip
self.port = port
self.filename = filename
start_unattended_server_thread(self.port, self.path)
def get_url(self):
return 'http://%s:%s/%s' % (self.ip, self.port, self.filename)
def get_answer_file_path(self, filename):
return os.path.join(self.path, filename)
def close(self):
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
logging.debug("unattended http server %s successfully created",
self.get_url())
class UnattendedInstallConfig(object):
"""
Creates a floppy disk image that will contain a config file for unattended
OS install. The parameters to the script are retrieved from environment
variables.
"""
def __init__(self, test, params, vm):
"""
Sets class attributes from test parameters.
:param test: QEMU test object.
:param params: Dictionary with test parameters.
"""
root_dir = data_dir.get_data_dir()
self.deps_dir = os.path.join(test.virtdir, 'deps')
self.unattended_dir = os.path.join(test.virtdir, 'unattended')
self.results_dir = test.debugdir
self.params = params
self.attributes = ['kernel_args', 'finish_program', 'cdrom_cd1',
'unattended_file', 'medium', 'url', 'kernel',
'initrd', 'nfs_server', 'nfs_dir', 'install_virtio',
'floppy_name', 'cdrom_unattended', 'boot_path',
'kernel_params', 'extra_params', 'qemu_img_binary',
'cdkey', 'finish_program', 'vm_type',
'process_check', 'vfd_size', 'cdrom_mount_point',
'floppy_mount_point', 'cdrom_virtio',
'virtio_floppy', 're_driver_match',
're_hardware_id', 'driver_in_floppy', 'vga',
'unattended_file_kernel_param_name']
for a in self.attributes:
setattr(self, a, params.get(a, ''))
# Make finish.bat work well with positional arguments
if not self.process_check.strip(): # pylint: disable=E0203
self.process_check = '""' # pylint: disable=E0203
# Will setup the virtio attributes
v_attributes = ['virtio_floppy', 'virtio_scsi_path',
'virtio_storage_path', 'virtio_network_path',
'virtio_balloon_path', 'virtio_viorng_path',
'virtio_vioser_path', 'virtio_pvpanic_path',
'virtio_vioinput_path',
'virtio_oemsetup_id',
'virtio_network_installer_path',
'virtio_balloon_installer_path',
'virtio_qxl_installer_path']
for va in v_attributes:
setattr(self, va, params.get(va, ''))
self.tmpdir = test.tmpdir
self.qemu_img_binary = utils_misc.get_qemu_img_binary(params)
def get_unattended_file(backend):
providers = asset.get_test_provider_names(backend)
if not providers:
return
for provider_name in providers:
provider_info = asset.get_test_provider_info(provider_name)
if backend not in provider_info["backends"]:
continue
if "path" not in provider_info["backends"][backend]:
continue
path = provider_info["backends"][backend]["path"]
tp_unattended_file = os.path.join(path, self.unattended_file)
if os.path.exists(tp_unattended_file):
# Using unattended_file from test-provider
unattended_file = tp_unattended_file
# Take the first matched
return unattended_file
if getattr(self, 'unattended_file'):
# Fail-back to general unattended_file
unattended_file = os.path.join(test.virtdir, self.unattended_file)
for backend in asset.get_known_backends():
found_file = get_unattended_file(backend)
if found_file:
unattended_file = found_file
break
self.unattended_file = unattended_file
if getattr(self, 'finish_program'):
self.finish_program = os.path.join(test.virtdir,
self.finish_program)
if getattr(self, 'cdrom_cd1'):
self.cdrom_cd1 = os.path.join(root_dir, self.cdrom_cd1)
self.cdrom_cd1_mount = tempfile.mkdtemp(prefix='cdrom_cd1_',
dir=self.tmpdir)
if getattr(self, 'cdrom_unattended'):
self.cdrom_unattended = os.path.join(root_dir,
self.cdrom_unattended)
if getattr(self, 'virtio_floppy'):
self.virtio_floppy = os.path.join(root_dir, self.virtio_floppy)
if getattr(self, 'cdrom_virtio'):
self.cdrom_virtio = os.path.join(root_dir, self.cdrom_virtio)
if getattr(self, 'kernel'):
self.kernel = os.path.join(root_dir, self.kernel)
if getattr(self, 'initrd'):
self.initrd = os.path.join(root_dir, self.initrd)
if self.medium == 'nfs':
self.nfs_mount = tempfile.mkdtemp(prefix='nfs_',
dir=self.tmpdir)
setattr(self, 'floppy', self.floppy_name)
if getattr(self, 'floppy'):
self.floppy = os.path.join(root_dir, self.floppy)
if not os.path.isdir(os.path.dirname(self.floppy)):
os.makedirs(os.path.dirname(self.floppy))
self.image_path = os.path.dirname(self.kernel)
# Content server params
# lookup host ip address for first nic by interface name
try:
netdst = vm.virtnet[0].netdst
# 'netdst' parameter is taken from cartesian config. Sometimes
# netdst=<empty>. Call get_ip_address_by_interface() only for case
# when netdst= is defined to something.
if netdst:
auto_ip = utils_net.get_ip_address_by_interface(netdst)
else:
auto_ip = utils_net.get_host_ip_address(params)
except utils_net.NetError:
auto_ip = None
params_auto_ip = params.get('url_auto_ip', None)
if params_auto_ip:
self.url_auto_content_ip = params_auto_ip
else:
self.url_auto_content_ip = auto_ip
self.url_auto_content_port = None
# Kickstart server params
# use the same IP as url_auto_content_ip, but a different port
self.unattended_server_port = None
# Embedded Syslog Server
self.syslog_server_enabled = params.get('syslog_server_enabled', 'no')
self.syslog_server_ip = params.get('syslog_server_ip', auto_ip)
self.syslog_server_port = int(params.get('syslog_server_port', 5140))
self.syslog_server_tcp = params.get('syslog_server_proto',
'tcp') == 'tcp'
self.vm = vm
@error_context.context_aware
def get_driver_hardware_id(self, driver, run_cmd=True):
"""
Get windows driver's hardware id from inf files.
:param dirver: Configurable driver name.
:param run_cmd: Use hardware id in windows cmd command or not.
:return: Windows driver's hardware id
"""
if not os.path.exists(self.cdrom_mount_point):
os.mkdir(self.cdrom_mount_point)
if not os.path.exists(self.floppy_mount_point):
os.mkdir(self.floppy_mount_point)
if not os.path.ismount(self.cdrom_mount_point):
process.system("mount %s %s -o loop" % (self.cdrom_virtio,
self.cdrom_mount_point), timeout=60)
if not os.path.ismount(self.floppy_mount_point):
process.system("mount %s %s -o loop" % (self.virtio_floppy,
self.floppy_mount_point), timeout=60)
drivers_d = []
driver_link = None
if self.driver_in_floppy is not None:
driver_in_floppy = self.driver_in_floppy
drivers_d = driver_in_floppy.split()
else:
drivers_d.append('qxl.inf')
for driver_d in drivers_d:
if driver_d in driver:
driver_link = os.path.join(self.floppy_mount_point, driver)
if driver_link is None:
driver_link = os.path.join(self.cdrom_mount_point, driver)
try:
txt = open(driver_link, "r").read()
hwid = re.findall(self.re_hardware_id, txt)[-1].rstrip()
if run_cmd:
hwid = '^&'.join(hwid.split('&'))
return hwid
except Exception as e:
logging.error("Fail to get hardware id with exception: %s" % e)
@error_context.context_aware
def update_driver_hardware_id(self, driver):
"""
Update driver string with the hardware id get from inf files
@driver: driver string
:return: new driver string
"""
if 'hwid' in driver:
if 'hwidcmd' in driver:
run_cmd = True
else:
run_cmd = False
if self.re_driver_match is not None:
d_str = self.re_driver_match
else:
d_str = "(\S+)\s*hwid"
drivers_in_floppy = []
if self.driver_in_floppy is not None:
drivers_in_floppy = self.driver_in_floppy.split()
mount_point = self.cdrom_mount_point
storage_path = self.cdrom_virtio
for driver_in_floppy in drivers_in_floppy:
if driver_in_floppy in driver:
mount_point = self.floppy_mount_point
storage_path = self.virtio_floppy
break
d_link = re.findall(d_str, driver)[0].split(":")[1]
d_link = "/".join(d_link.split("\\\\")[1:])
hwid = utils_test.get_driver_hardware_id(d_link, mount_point,
storage_path,
run_cmd=run_cmd)
if hwid:
driver = driver.replace("hwidcmd", hwid.strip())
else:
raise exceptions.TestError("Can not find hwid from the driver"
" inf file")
return driver
def answer_kickstart(self, answer_path):
"""
Replace KVM_TEST_CDKEY (in the unattended file) with the cdkey
provided for this test and replace the KVM_TEST_MEDIUM with
the tree url or nfs address provided for this test.
:return: Answer file contents
"""
contents = open(self.unattended_file).read()
dummy_cdkey_re = r'\bKVM_TEST_CDKEY\b'
if re.search(dummy_cdkey_re, contents):
if self.cdkey:
contents = re.sub(dummy_cdkey_re, self.cdkey, contents)
dummy_medium_re = r'\bKVM_TEST_MEDIUM\b'
if self.medium in ["cdrom", "kernel_initrd"]:
content = "cdrom"
elif self.medium == "url":
content = "url --url %s" % self.url
elif self.medium == "nfs":
content = "nfs --server=%s --dir=%s" % (self.nfs_server,
self.nfs_dir)
else:
raise ValueError("Unexpected installation medium %s" % self.url)
contents = re.sub(dummy_medium_re, content, contents)
dummy_rh_system_stream_id_re = r'\bRH_SYSTEM_STREAM_ID\b'
if re.search(dummy_rh_system_stream_id_re, contents):
rh_system_stream_id = self.params.get("rh_system_stream_id", "")
contents = re.sub(dummy_rh_system_stream_id_re, rh_system_stream_id, contents)
dummy_repos_re = r'\bKVM_TEST_REPOS\b'
if re.search(dummy_repos_re, contents):
repo_list = self.params.get("kickstart_extra_repos", "").split()
lines = ["# Extra repositories"]
for index, repo_url in enumerate(repo_list, 1):
line = ("repo --name=extra_repo%d --baseurl=%s --install "
"--noverifyssl" % (index, repo_url))
lines.append(line)
content = "\n".join(lines)
contents = re.sub(dummy_repos_re, content, contents)
dummy_logging_re = r'\bKVM_TEST_LOGGING\b'
if re.search(dummy_logging_re, contents):
if self.syslog_server_enabled == 'yes':
log = 'logging --host=%s --port=%s --level=debug'
log = log % (self.syslog_server_ip, self.syslog_server_port)
else:
log = ''
contents = re.sub(dummy_logging_re, log, contents)
dummy_graphical_re = re.compile('GRAPHICAL_OR_TEXT')
if dummy_graphical_re.search(contents):
if not self.vga or self.vga.lower() == "none":
contents = dummy_graphical_re.sub('text', contents)
else:
contents = dummy_graphical_re.sub('graphical', contents)
"""
cmd_only_use_disk is used for specifying disk which will be used during installation.
"""
if self.params.get("cmd_only_use_disk"):
insert_info = self.params.get("cmd_only_use_disk") + '\n'
contents += insert_info
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
with open(answer_path, 'w') as answer_file:
answer_file.write(contents)
def answer_windows_ini(self, answer_path):
parser = ConfigParser.ConfigParser()
parser.read(self.unattended_file)
# First, replacing the CDKEY
if self.cdkey:
parser.set('UserData', 'ProductKey', self.cdkey)
else:
logging.error("Param 'cdkey' required but not specified for "
"this unattended installation")
# Now, replacing the virtio network driver path, under double quotes
if self.install_virtio == 'yes':
parser.set('Unattended', 'OemPnPDriversPath',
'"%s"' % self.virtio_network_path)
else:
parser.remove_option('Unattended', 'OemPnPDriversPath')
dummy_re_dirver = {'KVM_TEST_VIRTIO_NETWORK_INSTALLER':
'virtio_network_installer_path',
'KVM_TEST_VIRTIO_BALLOON_INSTALLER':
'virtio_balloon_installer_path',
'KVM_TEST_VIRTIO_QXL_INSTALLER':
'virtio_qxl_installer_path'}
dummy_re = ""
for dummy in dummy_re_dirver:
if dummy_re:
dummy_re += "|%s" % dummy
else:
dummy_re = dummy
# Replace the process check in finish command
dummy_process_re = r'\bPROCESS_CHECK\b'
for opt in parser.options('GuiRunOnce'):
check = parser.get('GuiRunOnce', opt)
if re.search(dummy_process_re, check):
process_check = re.sub(dummy_process_re,
"%s" % self.process_check,
check)
parser.set('GuiRunOnce', opt, process_check)
elif re.findall(dummy_re, check):
dummy = re.findall(dummy_re, check)[0]
driver = getattr(self, dummy_re_dirver[dummy])
if driver.endswith("msi"):
driver = 'msiexec /passive /package ' + driver
elif 'INSTALLER' in dummy:
driver = self.update_driver_hardware_id(driver)
elif driver is None:
driver = 'dir'
check = re.sub(dummy, driver, check)
parser.set('GuiRunOnce', opt, check)
# Now, writing the in memory config state to the unattended file
fp = open(answer_path, 'w')
parser.write(fp)
fp.close()
# Let's read it so we can debug print the contents
fp = open(answer_path, 'r')
contents = fp.read()
fp.close()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
def answer_windows_xml(self, answer_path):
doc = xml.dom.minidom.parse(self.unattended_file)
if self.cdkey:
# First, replacing the CDKEY
product_key = doc.getElementsByTagName('ProductKey')[0]
if product_key.getElementsByTagName('Key'):
key = product_key.getElementsByTagName('Key')[0]
key_text = key.childNodes[0]
else:
key_text = product_key.childNodes[0]
assert key_text.nodeType == doc.TEXT_NODE
key_text.data = self.cdkey
else:
logging.error("Param 'cdkey' required but not specified for "
"this unattended installation")
# Now, replacing the virtio driver paths or removing the entire
# component PnpCustomizationsWinPE Element Node
if self.install_virtio == 'yes':
paths = doc.getElementsByTagName("Path")
values = [self.virtio_scsi_path,
self.virtio_storage_path, self.virtio_network_path,
self.virtio_balloon_path, self.virtio_viorng_path,
self.virtio_vioser_path, self.virtio_pvpanic_path,
self.virtio_vioinput_path]
# XXX: Force to replace the drive letter which loaded the
# virtio driver by the specified letter.
letter = self.params.get('virtio_drive_letter')
if letter is not None:
values = (re.sub(r'^\w+', letter, val) for val in values)
for path, value in list(zip(paths, values)):
if value:
path_text = path.childNodes[0]
assert path_text.nodeType == doc.TEXT_NODE
path_text.data = value
else:
settings = doc.getElementsByTagName("settings")
for s in settings:
for c in s.getElementsByTagName("component"):
if (c.getAttribute('name') ==
"Microsoft-Windows-PnpCustomizationsWinPE"):
s.removeChild(c)
# Last but not least important, replacing the virtio installer command
# And process check in finish command
command_lines = doc.getElementsByTagName("CommandLine")
dummy_re_dirver = {'KVM_TEST_VIRTIO_NETWORK_INSTALLER':
'virtio_network_installer_path',
'KVM_TEST_VIRTIO_BALLOON_INSTALLER':
'virtio_balloon_installer_path',
'KVM_TEST_VIRTIO_QXL_INSTALLER':
'virtio_qxl_installer_path'}
process_check_re = 'PROCESS_CHECK'
dummy_re = ""
for dummy in dummy_re_dirver:
if dummy_re:
dummy_re += "|%s" % dummy
else:
dummy_re = dummy
for command_line in command_lines:
command_line_text = command_line.childNodes[0]
assert command_line_text.nodeType == doc.TEXT_NODE
if re.findall(dummy_re, command_line_text.data):
dummy = re.findall(dummy_re, command_line_text.data)[0]
driver = getattr(self, dummy_re_dirver[dummy])
if driver.endswith("msi"):
driver = 'msiexec /passive /package ' + driver
elif 'INSTALLER' in dummy:
driver = self.update_driver_hardware_id(driver)
t = command_line_text.data
t = re.sub(dummy_re, driver, t)
command_line_text.data = t
if process_check_re in command_line_text.data:
t = command_line_text.data
t = re.sub(process_check_re, self.process_check, t)
command_line_text.data = t
contents = doc.toxml()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
fp = open(answer_path, 'w')
doc.writexml(fp)
fp.close()
def answer_suse_xml(self, answer_path):
# There's nothing to replace on SUSE files to date. Yay!
doc = xml.dom.minidom.parse(self.unattended_file)
contents = doc.toxml()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
fp = open(answer_path, 'w')
doc.writexml(fp)
fp.close()
def preseed_initrd(self):
"""
Puts a preseed file inside a gz compressed initrd file.
Debian and Ubuntu use preseed as the OEM install mechanism. The only
way to get fully automated setup without resorting to kernel params
is to add a preseed.cfg file at the root of the initrd image.
"""
logging.debug("Remastering initrd.gz file with preseed file")
dest_fname = 'preseed.cfg'
remaster_path = os.path.join(self.image_path, "initrd_remaster")
if not os.path.isdir(remaster_path):
os.makedirs(remaster_path)
base_initrd = os.path.basename(self.initrd)
os.chdir(remaster_path)
process.run("gzip -d < ../%s | fakeroot cpio --extract --make-directories "
"--no-absolute-filenames" % base_initrd, verbose=DEBUG,
shell=True)
process.run("cp %s %s" % (self.unattended_file, dest_fname),
verbose=DEBUG)
# For libvirt initrd.gz will be renamed to initrd.img in setup_cdrom()
process.run("find . | fakeroot cpio -H newc --create | gzip -9 > ../%s" %
base_initrd, verbose=DEBUG, shell=True)
os.chdir(self.image_path)
process.run("rm -rf initrd_remaster", verbose=DEBUG)
contents = open(self.unattended_file).read()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
def set_unattended_param_in_kernel(self, unattended_file_url):
'''
Check if kernel parameter that sets the unattended installation file
is present.
Add the parameter with the passed URL if it does not exist,
otherwise replace the existing URL.
:param unattended_file_url: URL to unattended installation file
:return: modified kernel parameters
'''
unattended_param = '%s=%s' % (self.unattended_file_kernel_param_name,
unattended_file_url)
if '%s=' % self.unattended_file_kernel_param_name in self.kernel_params:
kernel_params = re.sub('%s=[\w\d:\-\./]+' %
(self.unattended_file_kernel_param_name),
unattended_param,
self.kernel_params)
else:
kernel_params = '%s %s' % (self.kernel_params, unattended_param)
return kernel_params
def setup_unattended_http_server(self):
'''
Setup a builtin http server for serving the kickstart/preseed file
Does nothing if unattended file is not a kickstart/preseed file
'''
if self.unattended_file.endswith('.ks') or self.unattended_file.endswith('.preseed'):
# Red Hat kickstart install or Ubuntu preseed install
dest_fname = 'ks.cfg'
answer_path = os.path.join(self.tmpdir, dest_fname)
self.answer_kickstart(answer_path)
if self.unattended_server_port is None:
self.unattended_server_port = utils_misc.find_free_port(
8000,
8099,
self.url_auto_content_ip)
start_unattended_server_thread(self.unattended_server_port,
self.tmpdir)
else:
return
# Point installation to this kickstart url
unattended_file_url = 'http://%s:%s/%s' % (self.url_auto_content_ip,
self.unattended_server_port,
dest_fname)
kernel_params = self.set_unattended_param_in_kernel(
unattended_file_url)
# reflect change on params
self.kernel_params = kernel_params
def setup_boot_disk(self):
if self.unattended_file.endswith('.sif'):
dest_fname = 'winnt.sif'
setup_file = 'winnt.bat'
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir, self.vfd_size)
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_windows_ini(answer_path)
setup_file_path = os.path.join(self.unattended_dir, setup_file)
boot_disk.copy_to(setup_file_path)
if self.install_virtio == "yes":
boot_disk.setup_virtio_win2003(self.virtio_floppy,
self.virtio_oemsetup_id)
boot_disk.copy_to(self.finish_program)
elif self.unattended_file.endswith('.ks'):
# Red Hat kickstart install
dest_fname = 'ks.cfg'
if self.params.get('unattended_delivery_method') == 'integrated':
unattended_file_url = 'cdrom:/dev/sr0:/isolinux/%s' % (
dest_fname)
kernel_params = self.set_unattended_param_in_kernel(
unattended_file_url)
# Standard setting is kickstart disk in /dev/sr0 and
# install cdrom in /dev/sr1. As we merge them together,
# we need to change repo configuration to /dev/sr0
if 'repo=cdrom' in kernel_params:
kernel_params = re.sub('repo=cdrom[:\w\d\-/]*',
'repo=cdrom:/dev/sr0',
kernel_params)
self.kernel_params = None
boot_disk = utils_disk.CdromInstallDisk(
self.cdrom_unattended,
self.tmpdir,
self.cdrom_cd1_mount,
kernel_params)
elif self.params.get('unattended_delivery_method') == 'url':
if self.unattended_server_port is None:
self.unattended_server_port = utils_misc.find_free_port(
8000,
8099,
self.url_auto_content_ip)
path = os.path.join(os.path.dirname(self.cdrom_unattended),
'ks')
boot_disk = RemoteInstall(path, self.url_auto_content_ip,
self.unattended_server_port,
dest_fname)
unattended_file_url = boot_disk.get_url()
kernel_params = self.set_unattended_param_in_kernel(
unattended_file_url)
# Standard setting is kickstart disk in /dev/sr0 and
# install cdrom in /dev/sr1. When we get ks via http,
# we need to change repo configuration to /dev/sr0
kernel_params = re.sub('repo=cdrom[:\w\d\-/]*',
'repo=cdrom:/dev/sr0',
kernel_params)
self.kernel_params = kernel_params
elif self.params.get('unattended_delivery_method') == 'cdrom':
boot_disk = utils_disk.CdromDisk(self.cdrom_unattended,
self.tmpdir)
elif self.params.get('unattended_delivery_method') == 'floppy':
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir, self.vfd_size)
ks_param = '%s=floppy' % self.unattended_file_kernel_param_name
kernel_params = self.kernel_params
if '%s=' % self.unattended_file_kernel_param_name in kernel_params:
# Reading ks from floppy directly doesn't work in some OS,
# options 'ks=hd:/dev/fd0' can reading ks from mounted
# floppy, so skip repace it;
if not re.search("fd\d+", kernel_params):
kernel_params = re.sub('%s=[\w\d\-:\./]+' %
(self.unattended_file_kernel_param_name),
ks_param,
kernel_params)
else:
kernel_params = '%s %s' % (kernel_params, ks_param)
kernel_params = re.sub('repo=cdrom[:\w\d\-/]*',
'repo=cdrom:/dev/sr0',
kernel_params)
self.kernel_params = kernel_params
else:
raise ValueError("Neither cdrom_unattended nor floppy set "
"on the config file, please verify")
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_kickstart(answer_path)
elif self.unattended_file.endswith('.xml'):
if "autoyast" in self.kernel_params:
# SUSE autoyast install
dest_fname = "autoinst.xml"
if (self.cdrom_unattended and
self.params.get('unattended_delivery_method') == 'cdrom'):
boot_disk = utils_disk.CdromDisk(self.cdrom_unattended,
self.tmpdir)
elif self.floppy:
autoyast_param = 'autoyast=device://fd0/autoinst.xml'
kernel_params = self.kernel_params
if 'autoyast=' in kernel_params:
kernel_params = re.sub('autoyast=[\w\d\-:\./]+',
autoyast_param,
kernel_params)
else:
kernel_params = '%s %s' % (
kernel_params, autoyast_param)
self.kernel_params = kernel_params
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir,
self.vfd_size)
else:
raise ValueError("Neither cdrom_unattended nor floppy set "
"on the config file, please verify")
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_suse_xml(answer_path)
else:
# Windows unattended install
dest_fname = "autounattend.xml"
if self.params.get('unattended_delivery_method') == 'cdrom':
boot_disk = utils_disk.CdromDisk(self.cdrom_unattended,
self.tmpdir)
if self.install_virtio == "yes":
boot_disk.setup_virtio_win2008(self.virtio_floppy,
self.cdrom_virtio)
else:
self.cdrom_virtio = None
else:
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir,
self.vfd_size)
if self.install_virtio == "yes":
boot_disk.setup_virtio_win2008(self.virtio_floppy)
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_windows_xml(answer_path)
boot_disk.copy_to(self.finish_program)
else:
raise ValueError('Unknown answer file type: %s' %
self.unattended_file)
boot_disk.close()
@error_context.context_aware
def setup_cdrom(self):
"""
Mount cdrom and copy vmlinuz and initrd.img.
"""
error_context.context("Copying vmlinuz and initrd.img from install cdrom %s" %
self.cdrom_cd1)
if not os.path.isdir(self.image_path):
os.makedirs(self.image_path)
if (self.params.get('unattended_delivery_method') in
['integrated', 'url']):
i = iso9660.Iso9660Mount(self.cdrom_cd1)
self.cdrom_cd1_mount = i.mnt_dir
else:
i = iso9660.iso9660(self.cdrom_cd1)
if i is None:
raise exceptions.TestFail("Could not instantiate an iso9660 class")
i.copy(os.path.join(self.boot_path, os.path.basename(self.kernel)),
self.kernel)
assert(os.path.getsize(self.kernel) > 0)
i.copy(os.path.join(self.boot_path, os.path.basename(self.initrd)),
self.initrd)
assert(os.path.getsize(self.initrd) > 0)
if self.unattended_file.endswith('.preseed'):
self.preseed_initrd()
if self.params.get("vm_type") == "libvirt":
if self.vm.driver_type == 'qemu':
# Virtinstall command needs files "vmlinuz" and "initrd.img"
os.chdir(self.image_path)
base_kernel = os.path.basename(self.kernel)
base_initrd = os.path.basename(self.initrd)
if base_kernel != 'vmlinuz':
process.run("mv %s vmlinuz" % base_kernel, verbose=DEBUG)
if base_initrd != 'initrd.img':
process.run("mv %s initrd.img" %
base_initrd, verbose=DEBUG)
if (self.params.get('unattended_delivery_method') !=
'integrated'):
i.close()
utils_disk.cleanup(self.cdrom_cd1_mount)
elif ((self.vm.driver_type == 'xen') and
(self.params.get('hvm_or_pv') == 'pv')):
logging.debug("starting unattended content web server")
self.url_auto_content_port = utils_misc.find_free_port(8100,
8199,
self.url_auto_content_ip)
start_auto_content_server_thread(self.url_auto_content_port,
self.cdrom_cd1_mount)
self.medium = 'url'
self.url = ('http://%s:%s' % (self.url_auto_content_ip,
self.url_auto_content_port))
pxe_path = os.path.join(
os.path.dirname(self.image_path), 'xen')
if not os.path.isdir(pxe_path):
os.makedirs(pxe_path)
pxe_kernel = os.path.join(pxe_path,
os.path.basename(self.kernel))
pxe_initrd = os.path.join(pxe_path,
os.path.basename(self.initrd))
process.run("cp %s %s" % (self.kernel, pxe_kernel))
process.run("cp %s %s" % (self.initrd, pxe_initrd))
if 'repo=cdrom' in self.kernel_params:
# Red Hat
self.kernel_params = re.sub('repo=[:\w\d\-/]*',
'repo=http://%s:%s' %
(self.url_auto_content_ip,
self.url_auto_content_port),
self.kernel_params)
@error_context.context_aware
def setup_url_auto(self):
"""
Configures the builtin web server for serving content
"""
auto_content_url = 'http://%s:%s' % (self.url_auto_content_ip,
self.url_auto_content_port)
self.params['auto_content_url'] = auto_content_url
@error_context.context_aware
def setup_url(self):
"""
Download the vmlinuz and initrd.img from URL.
"""
# it's only necessary to download kernel/initrd if running bare qemu
if self.vm_type == 'qemu':
error_context.context(
"downloading vmlinuz/initrd.img from %s" % self.url)
if not os.path.exists(self.image_path):
os.mkdir(self.image_path)
os.chdir(self.image_path)
kernel_basename = os.path.basename(self.kernel)
initrd_basename = os.path.basename(self.initrd)
sha1sum_kernel_cmd = 'sha1sum %s' % kernel_basename
sha1sum_kernel_output = decode_to_text(process.system_output(sha1sum_kernel_cmd,
ignore_status=True,
verbose=DEBUG))
try:
sha1sum_kernel = sha1sum_kernel_output.split()[0]
except IndexError:
sha1sum_kernel = ''
sha1sum_initrd_cmd = 'sha1sum %s' % initrd_basename
sha1sum_initrd_output = decode_to_text(process.system_output(sha1sum_initrd_cmd,
ignore_status=True,
verbose=DEBUG))
try:
sha1sum_initrd = sha1sum_initrd_output.split()[0]
except IndexError:
sha1sum_initrd = ''
url_kernel = os.path.join(self.url, self.boot_path,
os.path.basename(self.kernel))
url_initrd = os.path.join(self.url, self.boot_path,
os.path.basename(self.initrd))
if not sha1sum_kernel == self.params.get('sha1sum_vmlinuz',
None):
if os.path.isfile(self.kernel):
os.remove(self.kernel)
logging.info('Downloading %s -> %s', url_kernel,
self.image_path)
download.get_file(url_kernel, os.path.join(self.image_path,
os.path.basename(self.kernel)))
if not sha1sum_initrd == self.params.get('sha1sum_initrd',
None):
if os.path.isfile(self.initrd):
os.remove(self.initrd)
logging.info('Downloading %s -> %s', url_initrd,
self.image_path)
download.get_file(url_initrd, os.path.join(self.image_path,
os.path.basename(self.initrd)))
if 'repo=cdrom' in self.kernel_params:
# Red Hat
self.kernel_params = re.sub('repo=[:\w\d\-/]*',
'repo=%s' % self.url,
self.kernel_params)
elif 'autoyast=' in self.kernel_params:
# SUSE
self.kernel_params = (
self.kernel_params + " ip=dhcp install=" + self.url)
elif self.vm_type == 'libvirt':
logging.info("Not downloading vmlinuz/initrd.img from %s, "
"letting virt-install do it instead")
else:
logging.info("No action defined/needed for the current virt "
"type: '%s'" % self.vm_type)
def setup_nfs(self):
"""
Copy the vmlinuz and initrd.img from nfs.
"""
error_context.context(
"copying the vmlinuz and initrd.img from NFS share")
m_cmd = ("mount %s:%s %s -o ro" %
(self.nfs_server, self.nfs_dir, self.nfs_mount))
process.run(m_cmd, verbose=DEBUG)
if not os.path.isdir(self.image_path):
os.makedirs(self.image_path)
try:
kernel_fetch_cmd = ("cp %s/%s/%s %s" %
(self.nfs_mount, self.boot_path,
os.path.basename(self.kernel), self.image_path))
process.run(kernel_fetch_cmd, verbose=DEBUG)
initrd_fetch_cmd = ("cp %s/%s/%s %s" %
(self.nfs_mount, self.boot_path,
os.path.basename(self.initrd), self.image_path))
process.run(initrd_fetch_cmd, verbose=DEBUG)
finally:
utils_disk.cleanup(self.nfs_mount)
if 'autoyast=' in self.kernel_params:
# SUSE
self.kernel_params = (self.kernel_params + " ip=dhcp "
"install=nfs://" + self.nfs_server + ":" + self.nfs_dir)
def setup_import(self):
self.unattended_file = None
self.kernel_params = None
def setup(self):
"""
Configure the environment for unattended install.
Uses an appropriate strategy according to each install model.
"""
logging.info("Starting unattended install setup")
if DEBUG:
utils_misc.display_attributes(self)
if self.syslog_server_enabled == 'yes':
start_syslog_server_thread(self.syslog_server_ip,
self.syslog_server_port,
self.syslog_server_tcp)
if self.medium in ["cdrom", "kernel_initrd"]:
if self.kernel and self.initrd:
self.setup_cdrom()
elif self.medium == "url":
self.setup_url()
elif self.medium == "nfs":
self.setup_nfs()
elif self.medium == "import":
self.setup_import()
else:
raise ValueError("Unexpected installation method %s" %
self.medium)
if self.unattended_file:
if self.floppy or self.cdrom_unattended:
self.setup_boot_disk()
if self.params.get("store_boot_disk") == "yes":
logging.info("Storing the boot disk to result directory "
"for further debug")
src_dir = self.floppy or self.cdrom_unattended
dst_dir = self.results_dir
shutil.copy(src_dir, dst_dir)
else:
self.setup_unattended_http_server()
# Update params dictionary as some of the values could be updated
for a in self.attributes:
self.params[a] = getattr(self, a)
def start_syslog_server_thread(address, port, tcp):
global _syslog_server_thread
global _syslog_server_thread_event
syslog_server.set_default_format('[UnattendedSyslog '
'(%s.%s)] %s')
if _syslog_server_thread is None:
_syslog_server_thread_event = threading.Event()
_syslog_server_thread = threading.Thread(
target=syslog_server.syslog_server,
args=(address, port, tcp, terminate_syslog_server_thread))
_syslog_server_thread.start()
def terminate_syslog_server_thread():
global _syslog_server_thread, _syslog_server_thread_event
if _syslog_server_thread is None:
return False
if _syslog_server_thread_event is None:
return False
if _syslog_server_thread_event.isSet():
return True
return False
def copy_file_from_nfs(src, dst, mount_point, image_name):
logging.info("Test failed before the install process start."
" So just copy a good image from nfs for following tests.")
utils_misc.mount(src, mount_point, "nfs", perm="ro")
image_src = utils_misc.get_path(mount_point, image_name)
shutil.copy(image_src, dst)
utils_misc.umount(src, mount_point, "nfs")
def string_in_serial_log(serial_log_file_path, string):
"""
Check if string appears in serial console log file.
:param serial_log_file_path: Path to the installation serial log file.
:param string: String to look for in serial log file.
:return: Whether the string is found in serial log file.
:raise: IOError: Serial console log file could not be read.
"""
if not string:
return
with open(serial_log_file_path, 'r') as serial_log_file:
serial_log_msg = serial_log_file.read()
if string in serial_log_msg:
logging.debug("Message read from serial console log: %s", string)
return True
else:
return False
def attempt_to_log_useful_files(test, vm):
"""
Tries to use ssh or serial_console to get logs from usual locations.
"""
if not vm.is_alive():
return
base_dst_dir = os.path.join(test.outputdir, vm.name)
sessions = []
close = []
try:
try:
session = vm.wait_for_login()
close.append(session)
sessions.append(session)
except Exception as details:
pass
if vm.serial_console:
sessions.append(vm.serial_console)
for i, console in enumerate(sessions):
failures = False
try:
console.cmd("true")
except Exception as details:
logging.info("Skipping log_useful_files #%s: %s", i, details)
continue
failures = False
for path_glob in ["/*.log", "/tmp/*.log", "/var/tmp/*.log"]:
try:
status, paths = console.cmd_status_output("ls -1 %s"
% path_glob)
if status:
continue
except Exception as details:
failures = True
continue
for path in paths.splitlines():
if not path:
continue
if path.startswith(os.path.sep):
rel_path = path[1:]
else:
rel_path = path
dst = os.path.join(test.outputdir, vm.name, str(i),
rel_path)
dst_dir = os.path.dirname(dst)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst, 'w') as fd_dst:
try:
fd_dst.write(console.cmd("cat %s" % path))
logging.info('Attached "%s" log file from guest '
'at "%s"', path, base_dst_dir)
except Exception as details:
logging.warning("Unknown exception while "
"attempt_to_log_useful_files(): "
"%s", details)
fd_dst.write("Unknown exception while getting "
"content: %s" % details)
failures = True
if not failures:
# All commands succeeded, no need to use next session
break
finally:
for session in close:
session.close()
@error_context.context_aware
def run(test, params, env):
"""
Unattended install test:
1) Starts a VM with an appropriated setup to start an unattended OS install.
2) Wait until the install reports to the install watcher its end.
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
@error_context.context_aware
def copy_images():
error_context.base_context(
"Copy image from NFS after installation failure")
image_copy_on_error = params.get("image_copy_on_error", "no")
if image_copy_on_error == "yes":
logging.info("Running image_copy to copy pristine image from NFS.")
try:
error_context.context(
"Quit qemu-kvm before copying guest image")
vm.monitor.quit()
except Exception as e:
logging.warn(e)
from virttest import utils_test
error_context.context("Copy image from NFS Server")
image = params.get("images").split()[0]
t_params = params.object_params(image)
qemu_image = qemu_storage.QemuImg(t_params, data_dir.get_data_dir(), image)
ver_to = utils_test.get_image_version(qemu_image)
utils_test.run_image_copy(test, params, env)
qemu_image = qemu_storage.QemuImg(t_params, data_dir.get_data_dir(), image)
ver_from = utils_test.get_image_version(qemu_image)
utils_test.update_qcow2_image_version(qemu_image, ver_from, ver_to)
src = params.get('images_good')
vt_data_dir = data_dir.get_data_dir()
base_dir = params.get("images_base_dir", vt_data_dir)
dst = storage.get_image_filename(params, base_dir)
if params.get("storage_type") == "iscsi":
dd_cmd = "dd if=/dev/zero of=%s bs=1M count=1" % dst
txt = "iscsi used, need destroy data in %s" % dst
txt += " by command: %s" % dd_cmd
logging.info(txt)
process.system(dd_cmd)
image_name = os.path.basename(dst)
mount_point = params.get("dst_dir")
if mount_point and src:
funcatexit.register(env, params.get("type"), copy_file_from_nfs, src,
dst, mount_point, image_name)
vm = env.get_vm(params["main_vm"])
local_dir = params.get("local_dir", os.path.abspath(vt_data_dir))
local_dir = utils_misc.get_path(vt_data_dir, local_dir)
for media in params.get("copy_to_local", "").split():
media_path = params.get(media)
if not media_path:
logging.warn("Media '%s' is not available, will not "
"be copied into local directory", media)
continue
media_name = os.path.basename(media_path)
nfs_link = utils_misc.get_path(vt_data_dir, media_path)
local_link = os.path.join(local_dir, media_name)
if os.path.isfile(local_link):
file_hash = crypto.hash_file(local_link, algorithm="md5")
expected_hash = crypto.hash_file(nfs_link, algorithm="md5")
if file_hash == expected_hash:
continue
msg = "Copy %s to %s in local host." % (media_name, local_link)
error_context.context(msg, logging.info)
download.get_file(nfs_link, local_link)
params[media] = local_link
unattended_install_config = UnattendedInstallConfig(test, params, vm)
unattended_install_config.setup()
# params passed explicitly, because they may have been updated by
# unattended install config code, such as when params['url'] == auto
vm.create(params=params)
install_error_str = params.get("install_error_str")
install_error_exception_str = ("Installation error reported in serial "
"console log: %s" % install_error_str)
rh_upgrade_error_str = params.get("rh_upgrade_error_str",
"RH system upgrade failed")
post_finish_str = params.get("post_finish_str",
"Post set up finished")
install_timeout = int(params.get("install_timeout", 4800))
wait_ack = params.get("wait_no_ack", "no") == "no"
migrate_background = params.get("migrate_background") == "yes"
if migrate_background:
mig_timeout = float(params.get("mig_timeout", "3600"))
mig_protocol = params.get("migration_protocol", "tcp")
logging.info("Waiting for installation to finish. Timeout set to %d s "
"(%d min)", install_timeout, install_timeout // 60)
error_context.context("waiting for installation to finish")
start_time = time.time()
log_file = vm.serial_console_log
if log_file is None:
raise virt_vm.VMConfigMissingError(vm.name, "serial")
logging.debug("Monitoring serial console log for completion message: %s",
log_file)
serial_read_fails = 0
# As the install process start, we may need collect information from
# the image. So use the test case instead this simple function in the
# following code.
if mount_point and src:
funcatexit.unregister(env, params.get("type"), copy_file_from_nfs,
src, dst, mount_point, image_name)
send_key_timeout = int(params.get("send_key_timeout", 60))
kickstart_reboot_bug = params.get("kickstart_reboot_bug", "no") == "yes"
while (time.time() - start_time) < install_timeout:
try:
vm.verify_alive()
if (params.get("send_key_at_install") and
(time.time() - start_time) < send_key_timeout):
vm.send_key(params.get("send_key_at_install"))
# Due to a race condition, sometimes we might get a MonitorError
# before the VM gracefully shuts down, so let's capture MonitorErrors.
except (virt_vm.VMDeadError, qemu_monitor.MonitorError) as e:
if wait_ack:
try:
install_error_str_found = string_in_serial_log(
log_file, install_error_str)
rh_upgrade_error_str_found = string_in_serial_log(
log_file, rh_upgrade_error_str)
post_finish_str_found = string_in_serial_log(
log_file, post_finish_str)
except IOError:
logging.warn("Could not read final serial log file")
else:
if install_error_str_found:
raise exceptions.TestFail(install_error_exception_str)
if rh_upgrade_error_str_found:
raise exceptions.TestFail("rh system upgrade failed, please "
"check serial log")
if post_finish_str_found:
break
# Bug `reboot` param from the kickstart is not actually restarts
# the VM instead it shutsoff this is temporary workaround
# for the test to proceed
if unattended_install_config.unattended_file:
with open(unattended_install_config.unattended_file) as unattended_fd:
reboot_in_unattended = "reboot" in unattended_fd.read()
if (reboot_in_unattended and kickstart_reboot_bug and not
vm.is_alive()):
try:
vm.start()
break
except:
logging.warn("Failed to start unattended install "
"image workaround reboot kickstart "
"parameter bug")
# Print out the original exception before copying images.
logging.error(e)
copy_images()
raise e
else:
break
try:
test.verify_background_errors()
except Exception as e:
attempt_to_log_useful_files(test, vm)
copy_images()
raise e
if wait_ack:
try:
install_error_str_found = string_in_serial_log(
log_file, install_error_str)
rh_upgrade_error_str_found = string_in_serial_log(
log_file, rh_upgrade_error_str)
post_finish_str_found = string_in_serial_log(
log_file, post_finish_str)
except IOError:
# Only make noise after several failed reads
serial_read_fails += 1
if serial_read_fails > 10:
logging.warn(
"Cannot read from serial log file after %d tries",
serial_read_fails)
else:
if install_error_str_found:
attempt_to_log_useful_files(test, vm)
raise exceptions.TestFail(install_error_exception_str)
if rh_upgrade_error_str_found:
raise exceptions.TestFail("rh system upgrade failed, please "
"check serial log")
if post_finish_str_found:
break
# Due to libvirt automatically start guest after import
# we only need to wait for successful login.
if params.get("medium") == "import":
try:
vm.login()
break
except (remote.LoginError, Exception) as e:
pass
if migrate_background:
vm.migrate(timeout=mig_timeout, protocol=mig_protocol)
else:
time.sleep(1)
else:
logging.warn("Timeout elapsed while waiting for install to finish ")
attempt_to_log_useful_files(test, vm)
copy_images()
raise exceptions.TestFail("Timeout elapsed while waiting for install to "
"finish")
logging.debug('cleaning up threads and mounts that may be active')
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is not None:
_url_auto_content_server_thread_event.set()
_url_auto_content_server_thread.join(3)
_url_auto_content_server_thread = None
utils_disk.cleanup(unattended_install_config.cdrom_cd1_mount)
global _unattended_server_thread
global _unattended_server_thread_event
if _unattended_server_thread is not None:
_unattended_server_thread_event.set()
_unattended_server_thread.join(3)
_unattended_server_thread = None
global _syslog_server_thread
global _syslog_server_thread_event
if _syslog_server_thread is not None:
_syslog_server_thread_event.set()
_syslog_server_thread.join(3)
_syslog_server_thread = None
time_elapsed = time.time() - start_time
logging.info("Guest reported successful installation after %d s (%d min)",
time_elapsed, time_elapsed // 60)
if params.get("shutdown_cleanly", "yes") == "yes":
shutdown_cleanly_timeout = int(params.get("shutdown_cleanly_timeout",
120))
logging.info("Wait for guest to shutdown cleanly")
if params.get("medium", "cdrom") == "import":
vm.shutdown()
try:
if utils_misc.wait_for(vm.is_dead, shutdown_cleanly_timeout, 1, 1):
logging.info("Guest managed to shutdown cleanly")
except qemu_monitor.MonitorError as e:
logging.warning("Guest apparently shut down, but got a "
"monitor error: %s", e)
| xutian/avocado-vt | virttest/tests/unattended_install.py | Python | gpl-2.0 | 63,456 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import pyre.geometry.solids
from AbstractNode import AbstractNode
class Cylinder(AbstractNode):
tag = "cylinder"
def notify(self, parent):
cylinder = pyre.geometry.solids.cylinder(radius=self._radius, height=self._height)
parent.onCylinder(cylinder)
return
def __init__(self, document, attributes):
AbstractNode.__init__(self, attributes)
self._radius = self._parse(attributes["radius"])
self._height = self._parse(attributes["height"])
return
# version
__id__ = "$Id: Cylinder.py,v 1.1.1.1 2005/03/08 16:13:45 aivazis Exp $"
# End of file
| bmi-forum/bmi-pyre | pythia-0.8/packages/pyre/pyre/geometry/pml/parser/Cylinder.py | Python | gpl-2.0 | 1,012 |
# -*- coding: iso-8859-1 -*-
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2001-2007 Free Software Foundation
#
# FILE:
# FileUtils.py
#
# DESCRIPTION:
# Common file/url/resource related utilities
#
# NOTES:
# TODO: Deprecate
import os
import urllib
import urlparse
import sys
import cStringIO
# For backwards compatability
from gnue.common.utils.importing import import_string as dyn_import
from gnue.common.utils.file import to_uri as urlize, \
open_uri as openResource, \
to_buffer as openBuffer
| HarmonyEnterpriseSolutions/harmony-platform | src/gnue/common/utils/FileUtils.py | Python | gpl-2.0 | 1,224 |
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import gettext
import logging
import os
import threading
#from gi.repository import GObject
import socket
import rhsm.config
import rhsm.connection as connection
import rhsm.utils
from rhsm.utils import remove_scheme
from rhsm.utils import parse_url
from subscription_manager.ga import GObject as ga_GObject
from subscription_manager.gui.utils import show_error_window
import subscription_manager.injection as inj
from subscription_manager.gui import progress
from subscription_manager.gui import widgets
_ = gettext.gettext
DIR = os.path.dirname(__file__)
log = logging.getLogger('rhsm-app.' + __name__)
class NetworkConfigDialog(widgets.SubmanBaseWidget):
"""This is the dialog that allows setting http proxy settings.
It uses the instant apply paradigm or whatever you wanna call it that the
gnome HIG recommends. Whenever a toggle button is flipped or a text entry
changed, the new setting will be saved.
"""
widget_names = ["networkConfigDialog", "enableProxyButton", "enableProxyAuthButton",
"proxyEntry", "proxyUserEntry", "proxyPasswordEntry",
"cancelButton", "saveButton", "testConnectionButton",
"connectionStatusLabel"]
gui_file = "networkConfig"
def __init__(self):
# Get widgets we'll need to access
super(NetworkConfigDialog, self).__init__()
self.org_timeout = socket.getdefaulttimeout()
self.progress_bar = None
self.cfg = rhsm.config.initConfig()
self.cp_provider = inj.require(inj.CP_PROVIDER)
# Need to load values before connecting signals because when the dialog
# starts up it seems to trigger the signals which overwrites the config
# with the blank values.
self.set_initial_values()
self.enableProxyButton.connect("toggled", self.enable_action)
self.enableProxyAuthButton.connect("toggled", self.enable_action)
self.enableProxyButton.connect("toggled", self.clear_connection_label)
self.enableProxyAuthButton.connect("toggled", self.clear_connection_label)
self.enableProxyButton.connect("toggled", self.enable_test_button)
self.proxyEntry.connect("changed", self.clear_connection_label)
self.proxyUserEntry.connect("changed", self.clear_connection_label)
self.proxyPasswordEntry.connect("changed", self.clear_connection_label)
self.proxyEntry.connect("focus-out-event", self.clean_proxy_entry)
self.cancelButton.connect("clicked", self.on_cancel_clicked)
self.saveButton.connect("clicked", self.on_save_clicked)
self.testConnectionButton.connect("clicked", self.on_test_connection_clicked)
self.networkConfigDialog.connect("delete-event", self.deleted)
def set_initial_values(self):
proxy_url = self.cfg.get("server", "proxy_hostname") or ""
# append port unless not specified, then append the default of 3128
if proxy_url:
proxy_url = proxy_url + ':' + (self.cfg.get("server", "proxy_port") or rhsm.config.DEFAULT_PROXY_PORT)
self.proxyEntry.set_text("%s" % proxy_url)
# show proxy/proxy auth sections as being enabled if we have values set
# rhn actualy has a seperate for config flag for enabling, which seems overkill
if self.cfg.get("server", "proxy_hostname"):
self.enableProxyButton.set_active(True)
if self.cfg.get("server", "proxy_hostname") and self.cfg.get("server", "proxy_user"):
self.enableProxyAuthButton.set_active(True)
self.enable_action(self.enableProxyAuthButton)
self.enable_action(self.enableProxyButton)
# the extra or "" are to make sure we don't str None
self.proxyUserEntry.set_text(str(self.cfg.get("server", "proxy_user") or ""))
self.proxyPasswordEntry.set_text(str(self.cfg.get("server", "proxy_password") or ""))
self.connectionStatusLabel.set_label("")
# If there is no proxy information, disable the proxy test
# button.
if not self.enableProxyButton.get_active():
self.testConnectionButton.set_sensitive(False)
self.enableProxyAuthButton.set_sensitive(False)
def write_values(self, widget=None, dummy=None):
proxy = self.proxyEntry.get_text() or ""
# don't save these values if they are disabled in the gui
if proxy and self.enableProxyButton.get_active():
# Remove any URI scheme provided
proxy = remove_scheme(proxy)
# Update the proxy entry field to show we removed any scheme
self.proxyEntry.set_text(proxy)
try:
proxy_hostname, proxy_port = proxy.split(':')
self.cfg.set("server", "proxy_hostname", proxy_hostname)
self.cfg.set("server", "proxy_port", proxy_port)
except ValueError:
# no port? just write out the hostname and assume default
self.cfg.set("server", "proxy_hostname", proxy)
self.cfg.set("server", "proxy_port", rhsm.config.DEFAULT_PROXY_PORT)
else:
# delete config options if we disable it in the ui
self.cfg.set("server", "proxy_hostname", "")
self.cfg.set("server", "proxy_port", "")
if self.enableProxyAuthButton.get_active():
if self.proxyUserEntry.get_text() is not None:
self.cfg.set("server", "proxy_user",
str(self.proxyUserEntry.get_text()))
if self.proxyPasswordEntry.get_text() is not None:
self.cfg.set("server", "proxy_password",
str(self.proxyPasswordEntry.get_text()))
else:
self.cfg.set("server", "proxy_user", "")
self.cfg.set("server", "proxy_password", "")
try:
self.cfg.save()
self.cp_provider.set_connection_info()
except Exception:
show_error_window(_("There was an error saving your configuration.") +
_("Make sure that you own %s.") % self.cfg.fileName,
parent=self.networkConfigDialog)
def show(self):
self.set_initial_values()
self.networkConfigDialog.present()
def on_save_clicked(self, button):
self.write_values()
self.networkConfigDialog.hide()
def on_cancel_clicked(self, button):
self.networkConfigDialog.hide()
def enable_test_button(self, button):
self.testConnectionButton.set_sensitive(button.get_active())
def clear_connection_label(self, entry):
self.connectionStatusLabel.set_label("")
# only used as callback from test_connection thread
def on_test_connection_finish(self, result):
if result:
self.connectionStatusLabel.set_label(_("Proxy connection succeeded"))
else:
self.connectionStatusLabel.set_label(_("Proxy connection failed"))
self._clear_progress_bar()
def _reset_socket_timeout(self):
socket.setdefaulttimeout(self.org_timeout)
def test_connection_wrapper(self, proxy_host, proxy_port, proxy_user, proxy_password):
connection_status = self.test_connection(proxy_host, proxy_port, proxy_user, proxy_password)
ga_GObject.idle_add(self.on_test_connection_finish, connection_status)
def test_connection(self, proxy_host, proxy_port, proxy_user, proxy_password):
cp = connection.UEPConnection(
proxy_hostname=proxy_host,
proxy_port=proxy_port,
proxy_user=proxy_user,
proxy_password=proxy_password)
try:
socket.setdefaulttimeout(10)
cp.getStatus()
# Either connection.RemoteServerException or connection.RestLibExecption are considered
# acceptable exceptions because they are only thrown as a response from the server. Meaning the
# connection through the proxy was successful.
except (connection.RemoteServerException,
connection.RestlibException) as e:
log.warn("Reporting proxy connection as good despite %s" %
e)
return True
except connection.NetworkException, e:
log.warn("%s when attempting to connect through %s:%s" %
(e.code, proxy_host, proxy_port))
return False
except Exception, e:
log.exception("'%s' when attempting to connect through %s:%s" %
(e, proxy_host, proxy_port))
return False
else:
return True
finally:
self._reset_socket_timeout()
# Pass through of the return values of parse_proxy_entry
# This was done to simplify on_test_connection_clicked
def clean_proxy_entry(self, widget=None, dummy=None):
proxy_url = self.proxyEntry.get_text()
proxy_host, proxy_port = self.parse_proxy_entry(proxy_url)
cleaned_proxy_url = "%s:%s" % (proxy_host, proxy_port)
self.proxyEntry.set_text(cleaned_proxy_url)
return (proxy_host, proxy_port)
def parse_proxy_entry(self, proxy_url):
proxy_url = remove_scheme(proxy_url)
proxy_host = None
proxy_port = None
try:
proxy_info = parse_url(proxy_url, default_port=rhsm.config.DEFAULT_PROXY_PORT)
proxy_host = proxy_info[2]
proxy_port = proxy_info[3]
except rhsm.utils.ServerUrlParseErrorPort, e:
proxy_host = proxy_url.split(':')[0]
proxy_port = rhsm.config.DEFAULT_PROXY_PORT
except rhsm.utils.ServerUrlParseError, e:
log.error(e)
return (proxy_host, proxy_port)
def on_test_connection_clicked(self, button):
proxy_host, proxy_port = self.clean_proxy_entry()
# ensure that we only use those values for testing if required
# this catches the case where there was previously a user and pass in the config
# and the user unchecks the box, leaving behind the values for the time being.
# Alternatively we could clear those boxes when the box is unchecked
if self.enableProxyAuthButton.get_active():
proxy_user = self.proxyUserEntry.get_text()
proxy_password = self.proxyPasswordEntry.get_text()
else:
proxy_user = None
proxy_password = None
self._display_progress_bar()
threading.Thread(target=self.test_connection_wrapper,
args=(proxy_host, proxy_port, proxy_user, proxy_password),
name='TestNetworkConnectionThread').start()
def deleted(self, event, data):
self.write_values()
self.networkConfigDialog.hide()
self._clear_progress_bar()
return True
def _display_progress_bar(self):
if self.progress_bar:
self.progress_bar.set_title(_("Testing Connection"))
self.progress_bar.set_label(_("Please wait"))
else:
self.progress_bar = progress.Progress(_("Testing Connection"), _("Please wait"))
self.timer = ga_GObject.timeout_add(100, self.progress_bar.pulse)
self.progress_bar.set_transient_for(self.networkConfigDialog)
def _clear_progress_bar(self):
if not self.progress_bar: # progress bar could be none iff self.test_connection is called directly
return
self.progress_bar.hide()
ga_GObject.source_remove(self.timer)
self.timer = 0
self.progress_bar = None
def enable_action(self, button):
if button.get_name() == "enableProxyButton":
self.proxyEntry.set_sensitive(button.get_active())
self.proxyEntry.grab_focus()
self.enableProxyAuthButton.set_sensitive(button.get_active())
# Proxy authentication should only be active if proxy is also enabled
self.proxyUserEntry.set_sensitive(button.get_active() and
self.enableProxyAuthButton.get_active())
self.proxyPasswordEntry.set_sensitive(button.get_active() and
self.enableProxyAuthButton.get_active())
elif button.get_name() == "enableProxyAuthButton":
self.proxyUserEntry.set_sensitive(button.get_active())
self.proxyPasswordEntry.set_sensitive(button.get_active())
self.get_object("usernameLabel").set_sensitive(button.get_active())
self.get_object("passwordLabel").set_sensitive(button.get_active())
def set_parent_window(self, window):
self.networkConfigDialog.set_transient_for(window)
| alikins/subscription-manager | src/subscription_manager/gui/networkConfig.py | Python | gpl-2.0 | 13,296 |
# functions for processing the parsed syntax tree
import sys
import decimal
import string
import logger
# list of standard functions (from <math.h>) that are
# already known to be included in the destination
# NB: this list is fairly minimal -- the actual math.h
# for a contemporary C compiler is likely to include
# many more functions, but since we only use this for
# reporting purposes this list will do for now
# (these may eventually move to some other module)
STD_FUNCS = set(['acos', 'asin', 'atan', 'atan2', 'cos',
'cosh', 'sin', 'sinh', 'tan', 'tanh',
'exp', 'frexp', 'ldexp', 'log', 'log10',
'modf', 'pow', 'sqrt', 'ceil', 'fabs',
'floor', 'fmod'])
# shared non-negativity constraint for use with chemicals
NON_NEGATIVE = {'expr' : '0', 'i_expr' : (('literal', '0'),), 'kind' : 'bound', 'test' : '<'}
# when we need labels and they aren't supplied, use a simple
# integer counter to distinguish them
n_unlabelled = 0
def default_label(basename='unlabelled__'):
global n_unlabelled
n_unlabelled = n_unlabelled + 1
return basename + str(n_unlabelled)
# process all the top level items in a parsed model AST
# working out what's in them and what their dependencies are
def process(merged, sources, independent='t'):
work = {
'roots' : [],
'assigned' : set(),
'chemicals' : {},
'reactions' : {},
'symbols' : {},
'conflicts' : [],
'functions' : set(),
'embeds' : [],
'required' : set(),
'unused' : set(),
'symlist' : [],
'diffs' : [],
'algs' : [],
'auxiliaries' : {},
'diagonal' : True,
'version' : '',
'params' : [],
'intermeds' : [],
'outputs' : [],
'inputs' : [],
'docs' : [],
'docstack' : [],
'modeldocs' : [],
'tags' : {},
'sources' : sources,
'extern' : [],
}
# independent variable is always at index 0
declare_symbol(find_independent(merged, independent), work)
for item in merged:
{
'reaction' : process_reaction,
'algeqn' : process_algeqn,
'diffeqn' : process_diffeqn,
'assign' : process_assign,
'constraint' : process_constraint,
'EMBEDDED' : process_embedded,
'version' : process_version,
'output' : process_output,
'input' : process_input,
'extern' : process_extern,
'import' : ignore_silent,
'independent' : ignore_silent,
'DOC' : process_doc
}.get(item[0], ignore_item)(item, work)
transform_reactions(work)
# consolidate global dependencies
for name in work['symbols'].keys():
recurse_dependencies(name, set(), set(), work['symbols'])
# assess whether a symbol depends on Y changes (made by solver)
# only parameter updates (specified by user)
rootset = set(work['roots'] + [work['symlist'][0]])
for name in work['symbols']:
if name in work['roots']:
pass
elif work['symbols'][name]['depends'] & rootset:
work['intermeds'].append(name)
else:
work['params'].append(name)
work['assignments'] = sort_assignments(work)
finalise_outputs(work)
finalise_externs(work)
for name in rootset.union(work['outputs']):
work['required'].add(name)
work['required'] = work['required'] | work['symbols'][name]['depends']
work['unused'] = set(work['symbols'].keys()) - work['required']
work['ind_params'] = sorted([x for x in work['params'] if len(work['symbols'][x]['depends']) == 0], key=str.lower)
work['deriv_params'] = sorted([x for x in work['params'] if x not in work['ind_params']], key=str.lower)
work['known'] = work['functions'] & STD_FUNCS
work['unknown'] = work['functions'] - STD_FUNCS
postprocess_docs(work)
return work
# at the moment this is just a hack to handle one special case
# more considered doc compiling will be dealt with later (and probably elsewhere)
def postprocess_docs(work):
for name in work['symbols']:
symbol = work['symbols'][name]
docs = symbol['docs']
for line in docs:
if line.startswith('+'):
symbol['tags'].extend(line.strip('+').strip().split())
elif line.startswith('$'):
symbol['latex'] = line.strip('$').strip()
elif line.startswith('~'):
symbol['units'] = line.strip('~').strip()
for tag in symbol['tags']:
if tag in work['tags']:
work['tags'][tag].append(name)
else:
work['tags'][tag] = [name]
# identify independent variable -- only first declaration applies
def find_independent(merged, default):
independent = None
for item in merged:
if item[0] == 'independent':
if independent is None:
independent = item[1]
else:
logger.warn('Ignoring additional @independent directive: ' + item[1])
if independent is None:
return default
return independent
# recursively consolidate all dependencies for all symbols
# this is pretty clunky and probably doing a lot of redundant work
# especially given that the results are of marginal utility
def recurse_dependencies(name, parents, done, symbols):
if name in parents:
if not symbols[name]['circular']:
logger.detail('Circular dependency found for ' + name)
symbols[name]['circular'] = True
elif symbols[name]['circular']:
logger.detail('Previous circularity noted for ' + name)
else:
for dep in symbols[name]['depends']:
if dep in done:
symbols[name]['depends'] = symbols[name]['depends'] | symbols[dep]['depends']
else:
symbols[name]['depends'] = (symbols[name]['depends']
| recurse_dependencies(dep, parents | set([name]), done, symbols))
done = done | set([name])
return symbols[name]['depends']
# sort assignment expressions into four groups:
# - independent for initialisation time
# - dependents ordered for overall initialisation time
# - parameters ordered for step initialisation time
# - dependents ordered for solver runtime
def sort_assignments(work):
independent = []
ind_expr = []
dependent_init = []
init_expr = []
dependent_step = []
step_expr = []
dependent_run = []
run_expr = []
symbols = work['symbols']
for name in work['assigned']:
init, run = choose_assignments(symbols[name])
if len(init['depends']) > 0:
dependent_init.append(name)
init_expr.append(init)
else:
independent.append(name)
ind_expr.append(init)
if run:
if name in work['intermeds']:
dependent_run.append(name)
run_expr.append(run)
else:
dependent_step.append(name)
step_expr.append(run)
else:
# intermeds is filled before we've determined
# whether there's any runtime assignment to do
# - now correct any earlier misapprehensions...
if name in work['intermeds']:
logger.message('reclassifying symbol %s as parameter' % name)
work['intermeds'].remove(name)
if name not in work['params']:
work['params'].append(name)
result = { 'independent': { 'names':independent, 'exprs':ind_expr } }
names, exprs = dependency_sort(dependent_init, init_expr)
result['dependent'] = { 'names': names, 'exprs': exprs }
names, exprs = dependency_sort(dependent_step, step_expr)
result['step'] = { 'names': names, 'exprs': exprs }
names, exprs = dependency_sort(dependent_run, run_expr)
result['runtime'] = { 'names':names, 'exprs': exprs }
return result
# sort a matched pair of lists into dependency order
def dependency_sort(names, exprs):
# this machine kills infinite loops (I hope)
stopper = {}
ordered_names = []
ordered_exprs = []
while names:
name = names[0]
del names[0]
expr = exprs[0]
del exprs[0]
if len(names) >= stopper.get(name, len(names)+1):
# we're now going around in circles
logger.error('Unresolved circular dependency in assignments (at symbol ' \
+ name + '), model may be non-viable')
ordered_names.append(name)
ordered_names += names
ordered_exprs.append(expr)
ordered_exprs += exprs
return ordered_names, ordered_exprs
stopper[name] = len(names)
for dep in expr['depends']:
if dep in names:
names.append(name)
exprs.append(expr)
break
if name not in names:
ordered_names.append(name)
ordered_exprs.append(expr)
return ordered_names, ordered_exprs
# choose (guess) the relevant assignment expressions for initialisation and runtime
def choose_assignments(symbol):
assigns = symbol['assigns']
if len(assigns) == 1:
if assigns[0]['init'] or len(assigns[0]['depends']) == 0:
return assigns[0], False
else:
return assigns[0], assigns[0]
if len(assigns) > 2:
logger.warn('Warning: too many assignments for symbol ' + symbol['id'] + ':')
logger.message(assigns, True)
lo = 1e6
hi = -1
init = False
noinit = False
# we use a rule of thumb that the expression with fewer dependencies is
# the initialisation; in the case of a tie, we take the later one as the init
# (which is why we have <= vs > in the if clauses below)
for ass in assigns:
if ass['init']:
init = ass
else:
noinit = ass
ndeps = len(ass['depends'])
if ndeps <= lo:
lo = ndeps
lo_expr = ass
if ndeps > hi:
hi = ndeps
hi_expr = ass
if init:
return init, noinit
if hi == lo:
logger.warn('Ambiguous dependencies in assignment for ' + symbol['id'])
return lo_expr, hi_expr
# unrecognised items and those we don't deal with yet
def ignore_item(item, work):
logger.detail("Ignoring item: " + item[0])
# items we recognise but do nothing with
def ignore_silent(item, work):
pass
def declare_symbol(name, work):
if name in work['symbols']:
symbol = work['symbols'][name]
else:
symbol = { 'id' : name,
'depends' : set(),
'conflicts':0,
'assigns':[],
'diffs':[],
'algs':[],
'constraints':[],
'circular': False,
'index':len(work['symbols']),
'docs':[],
'tags':[] }
work['symbols'][name] = symbol
work['symlist'].append(name)
logger.detail("Created symbol '" + name + "' at index %d" % work['symbols'][name]['index'])
return symbol
# process documentation comments
def process_doc(item, work):
work['docs'].append(item[1])
# we normally attach doc comments to the left hand symbol of a following eq
# but we can instead attach to an arbitrary list of symbols using an @ line...
# further, if no targets are specified, we attach the doc comments to the model itself
if item[1].startswith('@'):
targets = item[1].strip('@').split()
if targets:
for target in targets:
sym = declare_symbol(target, work)
sym['docs'].extend(work['docstack'])
else:
work['modeldocs'].extend(work['docstack'])
work['docstack'] = []
else:
work['docstack'].append(item[1])
def process_version(item, work):
if ( work['version'] ):
logger.warn('Ignoring additional @version directive: ' + item[1])
else:
logger.message('Model version is: ' + item[1])
work['version'] = item[1]
# set fields to output by default
# note that this does not create symbols -- fields which are listed as outputs
# but never get actually created anywhere will be ignored
def process_output(item, work):
logger.detail('Appending default output fields:' + str(item[1:]))
# only include each field once
for id in item[1:]:
if id not in work['outputs']:
work['outputs'].append(id)
# finalise the output fields list
def finalise_outputs(work):
logger.warn(work['outputs'], True)
present = [ x for x in work['outputs'] if x in work['symbols']]
if present:
work['outputs'] = present
else:
work['outputs'] = work['roots']
# mark fields as inputs
# as with output, this does not create symbols
def process_input(item, work):
logger.detail('Appending input fields:' + str(item[1:]))
# only include each field once
work['inputs'].extend([x for x in item[1:] if x not in work['inputs']])
# mark fields as external
# as with output, this does not create symbols
def process_extern(item, work):
logger.detail('Appending extern fields:' + str(item[1:]))
# only include each field once
work['extern'].extend([x for x in item[1:] if x not in work['extern']])
# finalise the external fields list
# fields are only considered external if they exist but are not assigned in the model
def finalise_externs(work):
logger.message(work['extern'], True)
work['extern'] = [ x for x in work['extern'] if x in work['symbols'] and x not in work['assigned'] ]
# processing for diff eqns, algebraics and assignments is similar,
# but there are enough differences that they get separate functions
def process_diffeqn(item, work):
target = item[1]
logger.message("Processing differential equation for variable: " + target)
symbol = declare_symbol(target, work)
if work['docstack']:
symbol['docs'].extend(work['docstack'])
work['docstack'] = []
if target in work['roots']:
symbol['conflicts'] = symbol['conflicts'] + 1
else:
work['roots'].append(target)
if target not in work['diffs']:
work['diffs'].append(target)
i_expr, expr, depends = process_mathterm(item[2][1], work)
symbol['diffs'].append({'expr':expr, 'i_expr':i_expr, 'depends':depends, 'mathterm':item[2][1]})
symbol['depends'] |= depends
# process auxiliary terms, if any
auxterm = item[4]
offset = 1
auxes = []
while len(auxterm) >= offset + 3:
mass = auxterm[offset + 1]
if auxterm[offset] == '-':
mass = -1 * mass
auxes.append((mass, auxterm[offset + 2]))
offset = offset + 3
work['auxiliaries'][item[1]] = auxes
if offset > 1:
work['diagonal'] = False
def process_algeqn(item, work):
target = item[1]
logger.message("Processing algebraic relation for variable: " + target)
symbol = declare_symbol(target, work)
if work['docstack']:
symbol['docs'].extend(work['docstack'])
work['docstack'] = []
if target in work['roots']:
symbol['conflicts'] = symbol['conflicts'] + 1
else:
work['roots'].append(target)
if target not in work['algs']:
work['algs'].append(target)
i_expr, expr, depends = process_mathterm(item[2][1], work)
symbol['algs'].append({'expr':expr, 'i_expr':i_expr, 'depends':depends, 'mathterm':item[2][1]})
symbol['depends'] |= depends
def process_assign(item, work):
target = item[1]
logger.detail("Processing assignment to variable: " + target)
symbol = declare_symbol(target, work)
if work['docstack']:
symbol['docs'].extend(work['docstack'])
work['docstack'] = []
work['assigned'].add(target)
i_expr, expr, depends = process_mathterm(item[2][1], work)
symbol['assigns'].append({'expr':expr, 'i_expr':i_expr, 'depends':depends, 'init':item[4], 'mathterm':item[2][1]})
symbol['depends'] |= depends
def process_constraint(item, work):
item = item[1]
kind = item[0]
if ( kind == 'softbound' ): item = item[1]
symbol = declare_symbol(item[1], work)
if work['docstack']:
symbol['docs'].extend(work['docstack'])
work['docstack'] = []
# declarations are inverted for testing
test = {'<':'>=', '<=':'>', '>':'<=', '>=':'<'}.get(item[2], 'ERROR')
i_expr, expr, deps = process_mathterm(item[3][1], work)
symbol['constraints'].append({'kind':kind, 'test':test, 'expr':expr,
'i_expr':i_expr, 'depends':deps, 'mathterm':item[3][1]})
symbol['depends'] |= deps
# convert a reaction to standard form and add to the workspace
def process_reaction(item, work):
logger.message("Processing reaction '" + item[2][1] + "' of type: " + item[1])
{
'influx' : process_flux,
'outflux' : process_flux,
'oneway' : process_oneway,
'twoway' : process_twoway
}.get(item[1], unknown_reaction)(item, work)
# reactions with only one side are treated similarly, but not
# *exactly* the same
def process_flux(term, work):
tag = term[1]
logger.detail('Processing ' + tag + ' reaction')
label = term[2][1]
if label == '': label = default_label(tag + '__')
while label in work['reactions'].keys():
newlabel = default_label(tag + '__')
logger.warn("Duplicate reaction label '" + label
+ "', substituting '" + newlabel + "'")
label = newlabel
# I have no idea whether this is reasonable, but:
# outflux reactions have an LHS and can (if necessary)
# be given MA rates; influx reactions don't and can't
if tag == 'outflux':
terms = process_chemterm(term[3], work, -1)
rate = process_rateterm(term[4], work, terms)
lhs = terms
rhs = []
else:
terms = process_chemterm(term[3], work, 1)
rate = process_rateterm(term[4], work, None)
lhs = []
rhs = terms
work['reactions'][label] = { 'type' : tag,
'terms' : terms,
'rate' : rate,
'lhs' : lhs,
'rhs': rhs,
'ratespec': term[4] }
consolidate_chems(work['reactions'][label])
def process_oneway(term, work):
label = term[2][1]
logger.detail("Processing oneway reaction '" + label + "'")
if label == '': label = default_label('oneway__')
while label in work['reactions'].keys():
newlabel = default_label('oneway__')
logger.warn("Duplicate reaction label '" + label
+ "', substituting '" + newlabel + "'")
label = newlabel
lhs = process_chemterm(term[3], work, -1)
rhs = process_chemterm(term[4], work, 1)
rate = process_rateterm(term[5], work, lhs)
work['reactions'][label] = { 'type' : 'oneway',
'terms' : lhs + rhs,
'rate' : rate,
'lhs' : lhs,
'rhs' : rhs,
'ratespec': term[5] }
consolidate_chems(work['reactions'][label])
# twoway reactions are just broken into separate forward and backward oneways
def process_twoway(term, work):
label = term[2][1]
logger.detail("Processing twoway reaction '" + label + "'")
if label == '': label = default_label('twoway__')
forward = ('reaction',
'oneway',
('label', label + '_forward'),
term[3],
term[4],
term[5])
reverse = ('reaction',
'oneway',
('label', label + '_reverse'),
term[4],
term[3],
term[6])
process_oneway(forward, work)
process_oneway(reverse, work)
# this shouldn't happen
def unknown_reaction(item, work):
logger.message("Ignoring unknown reaction type: " + item[1])
# it is convenient to have the terms information in lookup form
def consolidate_chems(reaction):
terms = reaction['terms']
chems = {}
for term in terms:
chems[term['chem']] = {'depends':term['depends'],
'stoich':term['stoich'],
'i_stoich':term['i_stoich'],
'mathterm':term.get('mathterm', '[ERROR]')}
reaction['chems'] = chems
# once all reactions (and other relevant terms) have been processed,
# transform the whole system to ODEs, one per chemical
def transform_reactions(work):
reacs = work['reactions']
if len(reacs) > 0:
logger.message('Transforming reactions to ODEs')
else:
return
chems = work['chemicals']
syms = work['symbols']
roots = work['roots']
# construct an ODE for each chemical
for chem in chems.keys():
sym = declare_symbol(chem, work)
if ( chem in roots ):
sym['conflicts'] = sym['conflicts'] + 1
else:
roots.append(chem)
work['diffs'].append(chem)
if chem not in work['auxiliaries']:
work['auxiliaries'][chem] = []
expr = ''
i_expr = ()
deps = set()
mathterm = ()
for reactlabel in reacs.keys():
reaction = reacs[reactlabel]
if chem in reaction['chems'].keys():
deps |= reaction['rate']['depends']
deps |= reaction['chems'][chem]['depends']
if len(expr) > 0: expr = expr + ' + '
if len(i_expr) > 0: i_expr = i_expr + (('literal',' + '),)
expr = expr + ( '(' + reaction['chems'][chem]['stoich']
+ '*' + reaction['rate']['expr'] + ')' )
i_expr = i_expr + (('literal','('),) \
+ reaction['chems'][chem]['i_stoich'] \
+ (('literal','*'),) \
+ reaction['rate']['i_expr'] \
+ (('literal',')'),)
subterm = ( 'arithmetic',
'*',
('mathterm', reaction['chems'][chem]['mathterm']),
('mathterm', reaction['rate']['mathterm']) )
if mathterm:
mathterm = ( 'arithmetic',
'+',
('mathterm', mathterm),
('mathterm', subterm) )
else:
mathterm = subterm
sym['depends'] |= deps
sym['diffs'].append({'depends':deps, 'expr':expr, 'i_expr':i_expr, 'mathterm':mathterm})
# multiplier indicates whether terms are reactants or products
# conventionally will be -1 for LHS terms and +1 for RHS ones
def process_chemterm(term, work, multiplier):
logger.detail("Processing chemterm")
terms = []
idx = 1
while idx < len(term):
logger.detail('iterating term: ' + str(term[idx]) + ', ' + str(term[idx+1]))
i_expr, expr, deps = process_mathterm(term[idx][1], work)
unmod = expr
i_unmod = i_expr
mathterm = term[idx][1]
# # this is a thoroughly dodgy bit of hackery
# # may change in line with better expr handling
if ( multiplier != 1 ):
expr = '(' + str(multiplier) + '*' + expr + ')'
i_expr = (('literal','(' + str(multiplier) + '*'),) + i_expr + (('literal',')'),)
mathterm = ('arithmetic', '*', ('mathterm', decimal.Decimal('-1')), term[idx])
chem = process_chemical(term[idx+1], work)
terms.append({'stoich': expr, 'chem':chem, 'unmod':unmod,
'i_stoich': i_expr, 'i_unmod':i_unmod,
'mathterm_unmod':term[idx][1], 'mathterm':mathterm,
'depends': deps|work['symbols'][chem]['depends']})
idx = idx + 2
return terms
def process_chemical(term, work):
logger.detail("Processing chemical: " + str(term))
if len(term) == 2:
chem = term[1]
work['chemicals'][chem] = None
symbol = declare_symbol(chem, work)
else:
chem = term[1] + '_' + term[2]
work['chemicals'][chem] = term[2]
declare_symbol(term[2], work)
symbol = declare_symbol(chem, work)
symbol['depends'] = symbol['depends'] | set([term[2]])
# automatic non-negativity constraint on chemicals
if NON_NEGATIVE not in symbol['constraints']:
symbol['constraints'].append(NON_NEGATIVE)
return chem
def process_rateterm(term, work, lhs):
return {
'explicit' : process_explicit_rate,
'MA' : process_MA_rate,
'MM' : process_MM_rate
}.get(term[1], unknown_rate_type)(term, work, lhs)
def process_explicit_rate(term, work, lhs):
logger.detail("Processing explicit rateterm")
if len(term[2]) > 2:
logger.warn("More than 1 rate term supplied, ignoring excess")
i_expr, expr, deps = process_mathterm(term[2][1][1], work)
return { 'i_expr':i_expr, 'expr':expr, 'depends':deps, 'mathterm':term[2][1][1] }
def process_MA_rate(term, work, lhs):
logger.detail("Processing mass action rateterm")
if len(term[2]) > 2:
logger.detail("More than 1 rate term supplied, extras will be taken as concentration exponents")
i_expr, expr, deps = process_mathterm(term[2][1][1], work)
mathterm = term[2][1][1]
if lhs is None:
logger.detail("Reaction has no LHS, omitting concentration dependence")
else:
num = []
denom = []
i_num = []
i_denom = []
m_num = ()
m_denom = ()
offset_exp_idx = 2
for chem in lhs:
name = chem['chem']
if len(term[2]) > offset_exp_idx:
i_exponent, exponent, exp_deps = process_mathterm(term[2][offset_exp_idx][1], work)
m_exponent = term[2][offset_exp_idx][1]
num.append('pow(' + name + ',' + exponent + ')')
i_num.append( (('literal','pow('),) + (('symbol', name),) + (('literal',','),) + i_exponent + (('literal',')'),) )
deps = deps | exp_deps
m_sub = ('arithmetic',
'^',
('mathterm', name),
('mathterm', m_exponent))
if m_num:
m_num = ('arithmetic',
'*',
('mathterm', m_num),
('mathterm', m_sub))
else:
m_num = m_sub
else:
num.append(name)
i_num.append((('symbol',name),))
if m_num:
m_num = ('arithmetic',
'*',
('mathterm', m_num),
('mathterm', name))
else:
m_num = name
if work['chemicals'][name] is not None:
compartment = work['chemicals'][name]
denom.append(compartment)
i_denom.append((('symbol',compartment),))
if m_denom:
m_denom = ('arithmetic',
'*',
('mathterm', m_denom),
('mathterm', compartment))
else:
m_denom = compartment
deps = deps | set([name]) | work['symbols'][name]['depends']
offset_exp_idx += 1
factor = '(' + '*'.join(num) + ')'
i_factor = i_num[0]
for ifx in i_num[1:]:
i_factor = i_factor + (('literal','*'),) + ifx
i_factor = (('literal','('),) + i_factor + (('literal',')'),)
m_factor = m_num
if len(denom) > 0:
factor = factor + '/(' + '*'.join(denom) + ')'
i_divisor = i_denom[0]
for idn in i_denom[1:]:
i_divisor = i_divisor + (('literal','*'),) + idn
i_factor = i_factor + (('literal','/('),) + i_divisor + (('literal', ')'),)
m_factor = ('arithmetic',
'/',
('mathterm', m_factor),
('mathterm', m_denom))
expr = '(' + expr + '*' + factor + ')'
i_expr = (('literal','('),) + i_expr + (('literal','*'),) \
+ i_factor + (('literal',')'),)
mathterm = ('arithmetic',
'+',
('mathterm', mathterm),
('mathterm', m_factor))
return { 'i_expr':i_expr, 'expr':expr, 'depends':deps, 'mathterm':mathterm }
# this is the most complicated rate term we support, requiring terms to
# be raised to the power of the stoichiometry
def process_MM_rate(term, work, lhs):
logger.detail("Processing Michaelis-Menten rateterm")
arglist = term[2]
if len(arglist) != len(lhs) + 2: # one for 'arglist' + one for Vmax
logger.warn("Incorrect parameters for Michaelis-Menten rate term, skipping!")
return { 'i_expr':('error','FAILED'), 'expr':'FAILED', 'depends':set() }
i_num = []
i_denom = []
num = []
denom = []
i_expr, expr, deps = process_mathterm(arglist[1][1], work)
num.append(expr)
i_num.append(i_expr)
m_num = arglist[1][1]
m_denom = ()
for idx in range(len(lhs)):
logger.detail(idx)
i_Km_expr, Km_expr, km_deps = process_mathterm(arglist[idx+2][1], work)
deps = deps | km_deps | lhs[idx]['depends']
m_Km = arglist[idx+2][1]
# explicitly add Km to dependencies if it is a symbol in its own right
# since otherwise the dependency won't get registered
if Km_expr in work['symbols'].keys():
deps = deps | set([Km_expr])
chem = lhs[idx]['chem']
i_chem = (('symbol',chem),)
stoich = lhs[idx]['unmod'] # we only want the original value without the -1 multiplier
i_stoich = lhs[idx]['i_unmod']
m_stoich = lhs[idx]['mathterm_unmod']
# x^1 is obviously just x...
if stoich == '1':
conc_pwr = chem
Km_pwr = Km_expr
i_conc_pwr = i_chem
i_Km_pwr = i_Km_expr
m_conc_pwr = chem
m_Km_pwr = m_Km
else:
conc_pwr = 'pow(' + chem + ',' + stoich + ')'
Km_pwr = 'pow(' + Km_expr + ',' + stoich + ')'
i_conc_pwr = (('literal','pow('),) + i_chem + (('literal',','),) + i_stoich + (('literal',')'),)
i_Km_pwr = (('literal','pow('),) + i_Km_expr + (('literal',','),) + i_stoich + (('literal',')'),)
m_conc_pwr = ('arithmetic',
'^',
('mathterm', chem),
('mathterm', m_stoich))
m_Km_pwr = ('arithmetic',
'^',
('mathterm', m_Km),
('mathterm', m_stoich))
num.append(conc_pwr)
denom.append('(' + Km_pwr + '+' + conc_pwr + ')')
i_num.append(i_conc_pwr)
i_denom.append((('literal', '('),) + i_Km_pwr + (('literal','+'),) + i_conc_pwr + (('literal',')'),))
m_num = ('arithmetic',
'*',
('mathterm', m_num),
('mathterm', m_conc_pwr))
m_sub = ('arithmetic',
'+',
('mathterm', m_Km_pwr),
('mathterm', m_conc_pwr))
if m_denom:
m_denom = ('arithmetic',
'*',
('mathterm', m_denom),
('mathterm', m_sub))
else:
m_denom = m_sub
num_expr = '*'.join(num)
denom_expr = '*'.join(denom)
expr = '((' + num_expr + ')/(' + denom_expr + '))'
i_num_expr = i_num[0]
for iex in i_num[1:]:
i_num_expr = i_num_expr + (('literal','*'),) + iex
i_denom_expr = i_denom[0]
for iex in i_denom[1:]:
i_denom_expr = i_denom_expr + (('literal','*'),) + iex
i_expr = (('literal','(('),) + i_num_expr + (('literal',')/('),) \
+ i_denom_expr + (('literal','))'),)
mathterm = ('arithmetic',
'/',
('mathterm', m_num),
('mathterm', m_denom))
return { 'i_expr':i_expr, 'expr':expr, 'depends':deps, 'mathterm': mathterm }
def unknown_rate_type(term, work, lhs):
logger.warn("Unknown rate type: '" + term[1] + "' -- treating as explicit")
return process_explicit_rate(term, work, lhs)
# embeds just get stashed blindly, we don't do any analysis on them
def process_embedded(item, work):
logger.detail("Processing embedded code fragment")
work['embeds'].append(item[1])
#----------------------------------------------------------------------
# expression handling functions -- these all return I_EXPR, EXPR, DEPENDS
def process_mathterm(term, work):
logger.detail("Processing mathterm: " + str(term))
if isinstance(term, decimal.Decimal):
return (('literal', str(float(term))),), str(term), set()
if isinstance(term, str):
declare_symbol(term,work)
return (('symbol', term),), term, set([term])
if not isinstance(term, tuple):
return (('error','ERROR'),), 'ERROR', set()
return {
'function' : process_function,
'conditional' : process_conditional,
'arithmetic' : process_binop
}.get(term[0], unknown_mathterm)(term, work)
def unknown_mathterm(term, work):
# recursive catcher for errors with my dumb calling convention
if ( term[0] == 'mathterm' ):
return process_mathterm(term[1], work)
return (('error','UNKNOWN'),), 'UNKNOWN', set()
def process_function(term, work):
work['functions'] = work['functions'] | set([term[1]])
i_argexpr, argexpr, argdeps = process_arglist(term[2], work)
i_expr = (('literal',term[1]), ('literal','(')) + i_argexpr + (('literal',')'),)
expr = term[1] + '(' + argexpr + ')'
return i_expr, expr, argdeps
def process_arglist(arglist, work):
if len(arglist) < 2:
return '', '', set()
i_expr, expr, deps = process_mathterm(arglist[1][1], work)
idx = 2
while idx < len(arglist):
i_nextExpr, nextExpr, nextDeps = process_mathterm(arglist[idx][1], work)
i_expr = i_expr + (('literal',', '),) + i_nextExpr
expr = expr + ', ' + nextExpr
deps = deps | nextDeps
idx += 1
return i_expr, expr, deps
def process_conditional(term, work):
i_condExpr, condExpr, condDeps = process_binop(term[1], work)
i_yesExpr, yesExpr, yesDeps = process_mathterm(term[2][1], work)
i_noExpr, noExpr, noDeps = process_mathterm(term[3][1], work)
i_expr = (('literal','('),) + i_condExpr + (('literal',' ? '),) \
+ i_yesExpr + (('literal', ' : '),) + i_noExpr + (('literal',')'),)
expr = '(' + condExpr + ' ? ' + yesExpr + ' : ' + noExpr + ')'
return i_expr, expr, condDeps | yesDeps | noDeps
# logical and arithmetic binary operations are all handled the same way
def process_binop(term, work):
i_expr1, expr1, deps1 = process_mathterm(term[2][1], work)
i_expr2, expr2, deps2 = process_mathterm(term[3][1], work)
# special case '^', since it means something else in C
expr = '(' + expr1 + term[1] + expr2 + ')'
if term[1] == '^':
i_expr = (('literal','pow('),) + i_expr1 + (('literal', ', '),) + i_expr2 + (('literal', ')'),)
else:
i_expr = (('literal', '('),) + i_expr1 + (('literal', term[1]),) + i_expr2 + (('literal',')'),)
return i_expr, expr, deps1 | deps2
| buck06191/BayesCMD | bparser/ast.py | Python | gpl-2.0 | 36,960 |
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import urllib2, os, re
from urlresolver import common
#SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class DaclipsResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "daclips"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
#e.g. http://daclips.com/vb80o1esx2eb
self.pattern = 'http://((?:www.)?daclips.(?:in|com))/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
""" Human Verification """
try:
resp = self.net.http_GET(web_url)
html = resp.content
r = re.findall(r'<span class="t" id="head_title">404 - File Not Found</span>',html)
if r:
raise Exception ('File Not Found or removed')
post_url = resp.get_url()
form_values = {}
for i in re.finditer('<input type="hidden" name="(.+?)" value="(.+?)">', html):
form_values[i.group(1)] = i.group(2)
html = self.net.http_POST(post_url, form_data=form_values).content
r = re.search('file: "http(.+?)"', html)
if r:
return "http" + r.group(1)
else:
raise Exception ('Unable to resolve Daclips link')
except urllib2.URLError, e:
common.addon.log_error('daclips: got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Daclips Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]DACLIPS[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
#return 'http://(daclips|daclips).(in|com)/%s' % (media_id)
return 'http://daclips.in/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or self.name in host
| SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/script.module.urlresolver/lib/urlresolver/plugins/daclips.py | Python | gpl-2.0 | 3,494 |
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import wx
# Local imports
import eg
class SerialPortChoice(wx.Choice):
"""
A wx.Choice control that shows all available serial ports on the system.
"""
def __init__(
self,
parent,
id=-1,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=0,
validator=wx.DefaultValidator,
name=wx.ChoiceNameStr,
value=None
):
"""
:Parameters:
`value` : int
The initial port to select (0 = COM1:). The first available
port will be selected if the given port does not exist or
no value is given.
"""
ports = eg.SerialThread.GetAllPorts()
self.ports = ports
choices = [("COM%d" % (portnum + 1)) for portnum in ports]
wx.Choice.__init__(
self, parent, id, pos, size, choices, style, validator, name
)
try:
portPos = ports.index(value)
except ValueError:
portPos = 0
self.SetSelection(portPos)
def GetValue(self):
"""
Return the currently selected serial port.
:rtype: int
:returns: The serial port as an integer (0 = COM1:)
"""
try:
port = self.ports[self.GetSelection()]
except:
port = 0
return port
| tfroehlich82/EventGhost | eg/Classes/SerialPortChoice.py | Python | gpl-2.0 | 2,115 |
__version__ = '1.2.8'
| ricotabor/opendrop | opendrop/vendor/harvesters/__init__.py | Python | gpl-2.0 | 22 |
#!/usr/bin/env python
# encoding: utf-8
from .user import *
from .upload import *
from .post import *
from .system import *
def all():
result = []
models = []
for m in models:
result += m.__all__
return result
__all__ = all()
| luke0922/MarkdownEditor | application/models/__init__.py | Python | gpl-2.0 | 255 |
"""
//=========================================================
// OOMidi
// OpenOctave Midi and Audio Editor
// (C) Copyright 2009 Mathias Gyllengahm (lunar_shuttle@users.sf.net)
//=========================================================
"""
import Pyro.core
import time
oom=Pyro.core.getProxyForURI('PYRONAME://:Default.oom')
for j in range(0,5):
for i in range(0,30):
oom.addMidiTrack("amiditrack" + str(i))
for i in range(0,30):
oom.deleteTrack("amiditrack" + str(i))
for i in range(0, 10):
print i
oom.addMidiTrack("amiditrack")
oom.addWaveTrack("awavetrack")
oom.addOutput("anoutput")
oom.addInput("aninput")
oom.setMute("aninput", False)
oom.setAudioTrackVolume("aninput",1.0)
oom.deleteTrack("amiditrack")
oom.deleteTrack("awavetrack")
oom.deleteTrack("anoutput")
oom.deleteTrack("aninput")
time.sleep(1)
| ccherrett/oom | share/pybridge/examples/addtrack.py | Python | gpl-2.0 | 930 |
__author__ = 'jbellino'
import os
import csv
import gdal
import gdalconst
import zipfile as zf
import numpy as np
import pandas as pd
from unitconversion import *
prismGrid_shp = r'G:\archive\datasets\PRISM\shp\prismGrid_p.shp'
prismGrid_pts = r'G:\archive\datasets\PRISM\shp\prismGrid_p.txt'
prismProj = r'G:\archive\datasets\PRISM\shp\PRISM_ppt_bil.prj'
ncol = 1405
nrow = 621
max_grid_id = ncol * nrow
def getMonthlyPrecipData(year, month, mask=None, conversion=None):
# print 'Getting data for', year, month
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}{1:0>2d}_bil.bil'.format(year, month)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getAnnualPrecipData(year, mask=None, conversion=None):
# print 'Getting data for year', year
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}_bil.bil'.format(year)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getGridIdFromRowCol(row, col):
"""
Determines the PRISM grid id based on a row, col input.
"""
assert 1 <= row <= nrow, 'Valid row numbers are bewteen 1 and {}.'.format(nrow)
assert 1 <= col <= ncol, 'Valid col numbers are bewteen 1 and {}.'.format(ncol)
grid_id = ((row-1)*ncol)+col
return grid_id
def getRowColFromGridId(grid_id):
"""
Determines the row, col based on a PRISM grid id.
"""
assert 1 <= grid_id <= max_grid_id, 'Valid Grid IDs are bewteen 1 and {}, inclusively.'.format(max_grid_id)
q, r = divmod(grid_id, ncol)
return q+1, r
def writeGridPointsToTxt(prismGrid_shp=prismGrid_shp, out_file=prismGrid_pts):
"""
Writes the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
import arcpy
data = []
rowends = range(ncol, max_grid_id+1, ncol)
with arcpy.da.SearchCursor(prismGrid_shp, ['grid_code', 'row', 'col']) as cur:
rowdata = []
for rec in cur:
rowdata.append(rec[0])
if rec[2] in rowends:
data.append(rowdata)
rowdata = []
a = np.array(data, dtype=np.int)
np.savetxt(out_file, a)
def getGridPointsFromTxt(prismGrid_pts=prismGrid_pts):
"""
Returns an array of the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
a = np.genfromtxt(prismGrid_pts, dtype=np.int, usemask=True)
return a
def makeGridMask(grid_pnts, grid_codes=None):
"""
Makes a mask with the same shape as the PRISM grid.
'grid_codes' is a list containing the grid id's of those cells to INCLUDE in your analysis.
"""
mask = np.ones((nrow, ncol), dtype=bool)
for row in range(mask.shape[0]):
mask[row] = np.in1d(grid_pnts[row], grid_codes, invert=True)
return mask
def downloadPrismFtpData(parm, output_dir=os.getcwd(), timestep='monthly', years=None, server='prism.oregonstate.edu'):
"""
Downloads ESRI BIL (.hdr) files from the PRISM FTP site.
'parm' is the parameter of interest: 'ppt', precipitation; 'tmax', temperature, max' 'tmin', temperature, min /
'tmean', temperature, mean
'timestep' is either 'monthly' or 'daily'. This string is used to direct the function to the right set of remote folders.
'years' is a list of the years for which data is desired.
"""
from ftplib import FTP
def handleDownload(block):
file.write(block)
# print ".\n"
# Play some defense
assert parm in ['ppt', 'tmax', 'tmean', 'tmin'], "'parm' must be one of: ['ppt', 'tmax', 'tmean', 'tmin']"
assert timestep in ['daily', 'monthly'], "'timestep' must be one of: ['daily', 'monthly']"
assert years is not None, 'Please enter a year for which data will be fetched.'
if isinstance(years, int):
years = list(years)
ftp = FTP(server)
print 'Logging into', server
ftp.login()
# Wrap everything in a try clause so we close the FTP connection gracefully
try:
for year in years:
dir = 'monthly'
if timestep == 'daily':
dir = timestep
dir_string = '{}/{}/{}'.format(dir, parm, year)
remote_files = []
ftp.dir(dir_string, remote_files.append)
for f_string in remote_files:
f = f_string.rsplit(' ')[-1]
if not '_all_bil' in f:
continue
print 'Downloading', f
if not os.path.isdir(os.path.join(output_dir, str(year))):
os.makedirs(os.path.join(output_dir, str(year)))
local_f = os.path.join(output_dir, str(year), f)
with open(local_f, 'wb') as file:
f_path = '{}/{}'.format(dir_string, f)
ftp.retrbinary('RETR ' + f_path, handleDownload)
except Exception as e:
print e
finally:
print('Closing the connection.')
ftp.close()
return
class BilFile(object):
"""
This class returns a BilFile object using GDAL to read the array data. Data units are in millimeters.
"""
def __init__(self, bil_file, mask=None):
self.bil_file = bil_file
self.hdr_file = bil_file[:-3]+'hdr'
gdal.GetDriverByName('EHdr').Register()
self.get_array(mask=mask)
self.originX = self.geotransform[0]
self.originY = self.geotransform[3]
self.pixelWidth = self.geotransform[1]
self.pixelHeight = self.geotransform[5]
def get_array(self, mask=None):
self.data = None
img = gdal.Open(self.bil_file, gdalconst.GA_ReadOnly)
band = img.GetRasterBand(1)
self.nodatavalue = band.GetNoDataValue()
self.data = band.ReadAsArray()
self.data = np.ma.masked_where(self.data==self.nodatavalue, self.data)
if mask is not None:
self.data = np.ma.masked_where(mask==True, self.data)
self.ncol = img.RasterXSize
self.nrow = img.RasterYSize
self.geotransform = img.GetGeoTransform()
def save_to_esri_grid(self, out_grid, conversion_factor=None, proj=None):
import arcpy
arcpy.env.overwriteOutput = True
arcpy.env.workspace = os.getcwd()
arcpy.CheckOutExtension('Spatial')
arcpy.env.outputCoordinateSystem = prismProj
if proj is not None:
arcpy.env.outputCoordinateSystem = proj
df = np.ma.filled(self.data, self.nodatavalue)
llx = self.originX
lly = self.originY - (self.nrow * -1 * self.pixelHeight)
point = arcpy.Point(llx, lly)
r = arcpy.NumPyArrayToRaster(df, lower_left_corner=point, x_cell_size=self.pixelWidth,
y_cell_size=-1*self.pixelHeight, value_to_nodata=self.nodatavalue)
if conversion_factor is not None:
r *= conversion_factor
r.save(out_grid)
def __extract_bil_from_zip(self, parent_zip):
with zf.ZipFile(parent_zip, 'r') as myzip:
if self.bil_file in myzip.namelist():
myzip.extract(self.bil_file, self.pth)
myzip.extract(self.hdr_file, self.pth)
return
def __clean_up(self):
try:
os.remove(os.path.join(self.pth, self.bil_file))
os.remove(os.path.join(self.pth, self.hdr_file))
except:
pass
if __name__ == '__main__':
grid_id = getGridIdFromRowCol(405, 972)
print grid_id
row, col = getRowColFromGridId(grid_id)
print row, col
| inkenbrandt/EPAEN | prism/prism.py | Python | gpl-2.0 | 7,910 |
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2002-2003 Free Software Foundation
#
# FILE:
# ScrollBar.py
#
# DESCRIPTION:
#
# NOTES:
#
#from gnue.common.apps import GDebug
import math
import string
import curses
from constants import *
from Control import Control
from Button import Button
class ScrollBar(Control):
"""
horizontal only :-(
"""
def __init__(self, Parent, SBName, Y, X, W, **properties):
apply(Control.__init__, (self,Parent,SBName),properties)
self.CANGETFOCUS = 1
self.H = 1
self.W = W
self.Y = Y
self.X = X
self.PARENT = Parent
self.SetMethod("SYSPAINT", self.Paint)
self.SetMethod("SYSRUN",self.Run)
self.SetMethod("GOTFOCUS", self.__GotFocus)
self.SetMethod("LOSTFOCUS", self.__LostFocus)
self.SetMethod("CLICK", self._ChangePos)
self._max = 1
self._val = 0
self.stepsize = 1
self.__initButtons()
def __initButtons(self):
if string.find(str(self.__class__), '.ScrollBar') != -1:
Y = self.Y
X = self.X
W = self.W
Parent = self.PARENT
self.rightarrow = Button(Parent,'rightarrow',Y,X+W - 3,3,'>')
self.rightarrow.SetMethod("CLICK",self._Inc)
Parent.AddControl(self.rightarrow)
self.left2arrow = Button(Parent,'left2arrow',Y,X+W - 6,3,'<')
self.left2arrow.SetMethod("CLICK",self._Dec)
Parent.AddControl(self.left2arrow)
self.leftarrow = Button(Parent,'leftarrow',Y,X,3,'<')
self.leftarrow.SetMethod("CLICK",self._Dec)
Parent.AddControl(self.leftarrow)
def __GotFocus(self,v1,v2,v3):
self.FOCUS = 1
self.Paint(None,None,None)
return 1
def __LostFocus(self,v1,v2,v3):
self.FOCUS = 0
self.Paint(None,None,None)
return 1
def Run(self, v1,v2,v3):
if v1 :
self.ExecMethod("CLICK", self, v2, v3)
Container = self.PARENT.Screen()
global BACKWARDS
while 1:
ch = Container.GetChar()
if self.PARENT.BreakOrder(ch) :
return
if ch in (Container.TokNextField, Container.TokDownArrow, Container.TokUpArrow):
BACKWARDS = 0
if ch == Container.TokUpArrow:
BACKWARDS = 1
return
elif ch == Container.TokLeftArrow:
self._Dec(None,None,None)
elif ch == Container.TokRightArrow:
self._Inc(None, None, None)
def _ChangePos(self,arg1,arg2,newX):
X = newX - self.start
if X >= (self.WorkingArea-1):
val = self._max
else:
val = float(X) / self.stepsize
val = int(math.ceil(val))
self.Set(val)
self._Action()
def Init(self, Max):
self._max = Max
self._val = 0
self.WorkingArea = float(self.W-9)
self.start = 3
self.UsedSpace = int(math.floor(self.WorkingArea / float(self._max)))
self.stepsize = self.WorkingArea / self._max
if self.UsedSpace < 1:
self.UsedSpace = 1
self.Paint(None,None,None)
def Paint(self,v1,v2,v3):
## TODO: This is all wrong... it only partially supports _ABSX
Pos = int(math.ceil(float(self._val) * (self.stepsize))) + self.start + self._ABSX
Screen = self.PARENT.Screen()
Screen.AutoRefresh = 0
# clear the bar region in reverse standard-color
self.SetColor(1)
self.LoLight()
for i in range(0, int(self.WorkingArea)):
Screen.PutAt(self._ABSY, self.start + i + self._ABSX, ' ', curses.A_REVERSE)
# correct position
if Pos >= (self.WorkingArea + self.start):
Pos = (self.start + self.WorkingArea)
elif Pos < (self.start + self.UsedSpace):
Pos = self.start + self.UsedSpace
# draw the handle hilight
if self.FOCUS:
self.SetColor(3)
else:
self.SetColor(2)
self.LoLight()
for i in range(0, self.UsedSpace):
Screen.PutAt(self._ABSY, (Pos - self.UsedSpace) + i, ' ', curses.A_REVERSE)
Screen.AutoRefresh = 1
Screen.Refresh()
def Dec(self,arg1,arg2,arg3):
if self._val > 0:
self._val -= 1
self.Paint(None,None,None)
def _Dec(self, arg1,arg2,arg3):
self.Dec(None,None,None)
self._Action()
def Inc(self,arg1,arg2,arg3):
if self._val < self._max:
self._val += 1
self.Paint(None,None,None)
def _Inc(self, arg1,arg2,arg3):
self.Inc(None,None,None)
self._Action()
def Set(self,newVal):
if newVal < 0:
newVal = 0
elif newVal > self._max:
newVal =self._max
self._val = newVal
self.Paint(None,None,None)
def __del__(self):
Parent = self.PARENT
Parent.DelControl(self.rightarrow)
Parent.DelControl(self.leftarrow)
Parent.DelControl(self.left2arrow)
def _Action(self):
action = self.GetMethod("CHANGED")
if action != None:
apply(action,(self._val,self._max,None))
| fxia22/ASM_xf | PythonD/lib/python2.4/site-packages/display/cursing/ScrollBar.py | Python | gpl-2.0 | 5,591 |
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygameui as ui
from osci import client, gdata, res
from ige import log
class ColorDefinitionDlg:
def __init__(self, app):
self.app = app
self.createUI()
def display(self, color = None, confirmAction = None):
self.confirmAction = confirmAction
if color == None:
self.color = (0xff,0xff,0xff)
else:
self.color = color
self.show()
def show(self):
self.win.vR.text = hex(self.color[0])
self.win.vG.text = hex(self.color[1])
self.win.vB.text = hex(self.color[2])
self.win.vRS.slider.min = 0
self.win.vRS.slider.max = 265
self.win.vRS.slider.position = self.color[0]
self.win.vGS.slider.min = 0
self.win.vGS.slider.max = 265
self.win.vGS.slider.position = self.color[1]
self.win.vBS.slider.min = 0
self.win.vBS.slider.max = 265
self.win.vBS.slider.position = self.color[2]
log.debug("ColorDefinitionDlg(%s,%s,%s)" % (self.win.vR.text,self.win.vG.text,self.win.vB.text))
self.win.show()
# colorbox
self.win.vColor.color = self.color
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
def hide(self):
self.win.setStatus(_("Ready."))
self.win.hide()
# unregister updates
if self in gdata.updateDlgs:
gdata.updateDlgs.remove(self)
def update(self):
self.show()
def onChangeRed(self, widget, action, data):
self.color = (int(self.win.vRS.slider.position), self.color[1], self.color[2])
self.win.vR.text = hex(self.color[0])
self.win.vColor.color = (int(self.win.vRS.slider.position), self.color[1], self.color[2])
def onChangeGreen(self, widget, action, data):
self.color = (self.color[0], int(self.win.vGS.slider.position), self.color[2])
self.win.vG.text = hex(self.color[1])
self.win.vColor.color = (self.color[0], int(self.win.vGS.slider.position), self.color[2])
def onChangeBlue(self, widget, action, data):
self.color = ( self.color[0], self.color[1], int(self.win.vBS.slider.position))
self.win.vB.text = hex(self.color[2])
self.win.vColor.color = ( self.color[0], self.color[1], int(self.win.vBS.slider.position))
def onOK(self, widget, action, data):
try:
r = int(self.win.vR.text,16)
g = int(self.win.vG.text,16)
b = int(self.win.vB.text,16)
if not r in range(0,256):
self.app.setFocus(self.win.vR)
raise ValueError
elif not g in range(0,256):
self.app.setFocus(self.win.vG)
raise ValueError
elif not b in range(0,256):
self.app.setFocus(self.win.vB)
raise ValueError
except ValueError:
self.win.setStatus(_("Values must be hexa numbers between 0x00 - 0xff"))
return
self.hide()
self.color = (r, g, b)
if self.confirmAction:
self.confirmAction()
def onCancel(self, widget, action, data):
self.color = None
self.hide()
def createUI(self):
w, h = gdata.scrnSize
cols = 14
rows = 8
width = cols * 20 + 5
height = rows * 20 + 4
self.win = ui.Window(self.app,
modal = 1,
escKeyClose = 1,
movable = 0,
title = _('Color Definition'),
rect = ui.Rect((w - width) / 2, (h - height) / 2, width, height),
layoutManager = ui.SimpleGridLM(),
tabChange = True,
)
# creating dialog window
self.win.subscribeAction('*', self)
# R
ui.Label(self.win,text = _("Red:"), align = ui.ALIGN_W, layout = (0, 0, 3, 1))
ui.Entry(self.win, id = 'vR',align = ui.ALIGN_W,layout = (7, 0, 3, 1), orderNo = 1, reportValueChanged = True,)
ui.Scrollbar(self.win,layout = ( 0,1,10,1), id='vRS',action = "onChangeRed")
# G
ui.Label(self.win,text = _("Green:"),align = ui.ALIGN_W,layout = (0, 2, 3, 1))
ui.Entry(self.win, id = 'vG',align = ui.ALIGN_W,layout = (7, 2, 3, 1), orderNo = 2, reportValueChanged = True,)
ui.Scrollbar(self.win,layout = (0,3,10,1), id='vGS',action = "onChangeGreen")
# B
ui.Label(self.win,text = _("Blue:"),align = ui.ALIGN_W,layout = (0, 4, 3, 1))
ui.Entry(self.win, id = 'vB',align = ui.ALIGN_W,layout = (7, 4, 3, 1), orderNo = 3, reportValueChanged = True,)
ui.Scrollbar(self.win,layout = (0,5,10,1), id='vBS',action = "onChangeBlue")
# color example
ui.ColorBox(self.win, id = 'vColor', layout = (10, 0, 4, 6), margins = (4, 3, 4, 4))
#i.Title(self.win, layout = (0, 4, 2, 1))
ui.TitleButton(self.win, layout = (0, 6, 7, 1), text = _("Cancel"), action = "onCancel")
okBtn = ui.TitleButton(self.win, layout = (7, 6, 7, 1), text = _("OK"), action = 'onOK')
self.win.acceptButton = okBtn
def onValueChanged(self, widget, action, data):
try:
r = int(self.win.vR.text,16)
g = int(self.win.vG.text,16)
b = int(self.win.vB.text,16)
except:
return
if not r in range(0,256) or not g in range(0,256) or not b in range(0,256):
return
self.win.vColor.color = (r, g, b)
self.win.vRS.slider.position = r
self.win.vGS.slider.position = g
self.win.vBS.slider.position = b
| ospaceteam/outerspace | client/osci/dialog/ColorDefinitionDlg.py | Python | gpl-2.0 | 6,398 |
#! /usr/bin/python
#
# This file reads through a vcf file and prints a space-separated text file, containing the coverage for each SNV (rows) and individual (columns). Alt and ref alleles are summed to total coverage at each SNV and locus.
# Usage: ~/hts_tools/get_cov_per_ind.py fltrd_pubRetStri_dipUG35_200bp.vcf > outfile
from sys import argv
with open(argv[1], 'rb') as file:
for line in file:
if line[0:2] == '##':
continue #print line.split('\n')[0]
elif line[0:2] == "#C":
line_list = line.split('\t')
inds = line_list[9:len(line_list)]
last_ind = inds[-1].split('\n')[0]
inds[-1] = last_ind
print ' '.join(inds)
else:
line_list = line.split('\t')
ref_count = dict()
if len(line_list[4]) > 1:
continue
else:
scaf, bp = line.split('\t')[0:2]
vcf_inds = line_list[9:len(line_list)]
count_list = list()
for i, ind in enumerate(inds):
count = int()
if vcf_inds[i] == './.':
count += 0
else:
ad = vcf_inds[i].split(':')[1]
if ad == '.':
count += 0
else:
ad = ad.split(',')
count += int(ad[0]) + int(ad[1])
count_list.append(str(count))
print ' '.join(count_list)
file.close()
| schimar/hts_tools | get_cov_per_ind.py | Python | gpl-2.0 | 1,589 |
import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
print '================ERROR============================'
print 'Not support gcc yet!'
print '================================================='
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
IAR_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m0 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-lpc824.map,-cref,-u,Reset_Handler -T lpc824_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M0+'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-lpc824.map --scatter lpc824_rom.sct'
CFLAGS += ' -I./'
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CFLAGS += ' --split_sections'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M0'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M0'
AFLAGS += ' --fpu None'
LFLAGS = ' --config lpc824_rom.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
| Quintin-Z/rt-thread | bsp/lpc824/rtconfig.py | Python | gpl-2.0 | 3,324 |
# #
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Repository tools
Svn repository
:author: Stijn De Weirdt (Ghent University)
:author: Dries Verdegem (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Pieter De Baets (Ghent University)
:author: Jens Timmerman (Ghent University)
:author: Toon Willems (Ghent University)
:author: Ward Poelmans (Ghent University)
:author: Fotis Georgatos (Uni.Lu, NTUA)
"""
import getpass
import os
import socket
import tempfile
import time
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import rmtree2
from easybuild.tools.repository.filerepo import FileRepository
from easybuild.tools.utilities import only_if_module_is_available
_log = fancylogger.getLogger('svnrepo', fname=False)
# optional Python packages, these might be missing
# failing imports are just ignored
# PySVN
try:
import pysvn # @UnusedImport
from pysvn import ClientError # IGNORE:E0611 pysvn fails to recognize ClientError is available
HAVE_PYSVN = True
except ImportError:
_log.debug("Failed to import pysvn module")
HAVE_PYSVN = False
class SvnRepository(FileRepository):
"""
Class for svn repositories
"""
DESCRIPTION = ("An SVN repository. The 1st argument contains the "
"subversion repository location, this can be a directory or an URL. "
"The 2nd argument is a path inside the repository where to save the files.")
USABLE = HAVE_PYSVN
@only_if_module_is_available('pysvn', url='http://pysvn.tigris.org/')
def __init__(self, *args):
"""
Set self.client to None. Real logic is in setup_repo and create_working_copy
"""
self.client = None
FileRepository.__init__(self, *args)
def setup_repo(self):
"""
Set up SVN repository.
"""
self.repo = os.path.join(self.repo, self.subdir)
# try to connect to the repository
self.log.debug("Try to connect to repository %s" % self.repo)
try:
self.client = pysvn.Client()
self.client.exception_style = 0
except ClientError:
raise EasyBuildError("Svn Client initialization failed.")
try:
if not self.client.is_url(self.repo):
raise EasyBuildError("Provided repository %s is not a valid svn url", self.repo)
except ClientError:
raise EasyBuildError("Can't connect to svn repository %s", self.repo)
def create_working_copy(self):
"""
Create SVN working copy.
"""
self.wc = tempfile.mkdtemp(prefix='svn-wc-')
# check if tmppath exists
# this will trigger an error if it does not exist
try:
self.client.info2(self.repo, recurse=False)
except ClientError:
raise EasyBuildError("Getting info from %s failed.", self.wc)
try:
res = self.client.update(self.wc)
self.log.debug("Updated to revision %s in %s" % (res, self.wc))
except ClientError:
raise EasyBuildError("Update in wc %s went wrong", self.wc)
if len(res) == 0:
raise EasyBuildError("Update returned empy list (working copy: %s)", self.wc)
if res[0].number == -1:
# revision number of update is -1
# means nothing has been checked out
try:
res = self.client.checkout(self.repo, self.wc)
self.log.debug("Checked out revision %s in %s" % (res.number, self.wc))
except ClientError, err:
raise EasyBuildError("Checkout of path / in working copy %s went wrong: %s", self.wc, err)
def add_easyconfig(self, cfg, name, version, stats, append):
"""
Add easyconfig to SVN repository.
"""
dest = FileRepository.add_easyconfig(self, cfg, name, version, stats, append)
self.log.debug("destination = %s" % dest)
if dest:
self.log.debug("destination status: %s" % self.client.status(dest))
if self.client and not self.client.status(dest)[0].is_versioned:
# add it to version control
self.log.debug("Going to add %s (working copy: %s, cwd %s)" % (dest, self.wc, os.getcwd()))
self.client.add(dest)
def commit(self, msg=None):
"""
Commit working copy to SVN repository
"""
tup = (socket.gethostname(), time.strftime("%Y-%m-%d_%H-%M-%S"), getpass.getuser(), msg)
completemsg = "EasyBuild-commit from %s (time: %s, user: %s) \n%s" % tup
try:
self.client.checkin(self.wc, completemsg, recurse=True)
except ClientError, err:
raise EasyBuildError("Commit from working copy %s (msg: %s) failed: %s", self.wc, msg, err)
def cleanup(self):
"""
Clean up SVN working copy.
"""
try:
rmtree2(self.wc)
except OSError, err:
raise EasyBuildError("Can't remove working copy %s: %s", self.wc, err)
| wpoely86/easybuild-framework | easybuild/tools/repository/svnrepo.py | Python | gpl-2.0 | 6,109 |
import ctypes
import os
import types
from platform_utils import paths
def load_library(libname):
if paths.is_frozen():
libfile = os.path.join(paths.embedded_data_path(), 'accessible_output2', 'lib', libname)
else:
libfile = os.path.join(paths.module_path(), 'lib', libname)
return ctypes.windll[libfile]
def get_output_classes():
import outputs
module_type = types.ModuleType
classes = [m.output_class for m in outputs.__dict__.itervalues() if type(m) == module_type and hasattr(m, 'output_class')]
return sorted(classes, key=lambda c: c.priority)
def find_datafiles():
import os
import platform
from glob import glob
import accessible_output2
if platform.system() != 'Windows':
return []
path = os.path.join(accessible_output2.__path__[0], 'lib', '*.dll')
results = glob(path)
dest_dir = os.path.join('accessible_output2', 'lib')
return [(dest_dir, results)]
| codeofdusk/ProjectMagenta | src/accessible_output2/__init__.py | Python | gpl-2.0 | 885 |
# This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : gobry@pybliographer.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
''' Generic XML bibliographic style handler '''
import string
from Pyblio.Style import Parser
from Pyblio import Autoload, recode
def author_desc (group, coding, initials = 0, reverse = 0):
""" Create a nice string describing a group of authors.
coding : name of the output coding (as requested for recode)
initials : if = 1, uses initials instead of complete first names
reverse :
-1 use First Last format
0 use Last, First, excepted for the first entry
1 use Last, First for all the authors, not only the first
"""
l = len (group)
fulltext = ""
for i in range (0, l):
(honorific, first, last, lineage) = group [i].format (coding)
if initials:
first = group [i].initials (coding)
text = ""
if reverse == 1 or (i == 0 and reverse == 0):
if last: text = text + last
if lineage: text = text + ", " + lineage
if first: text = text + ", " + first
else:
if first: text = first + " "
if last: text = text + last
if lineage: text = text + ", " + lineage
if text:
if i < l - 2:
text = text + ", "
elif i == l - 2:
text = text + " and "
fulltext = fulltext + text
# avoid a dot at the end of the author list
if fulltext [-1] == '.':
fulltext = fulltext [0:-1]
return fulltext
def string_key (entry, fmt, table):
""" Generates an alphabetical key for an entry. fmt is the
output coding """
rc = recode.recode ("latin1.." + fmt)
if entry.has_key ('author'): aut = entry ['author']
elif entry.has_key ('editor'): aut = entry ['editor']
else: aut = ()
if len (aut) > 0:
if len (aut) > 1:
key = ''
for a in aut:
honorific, first, last, lineage = a.format (fmt)
key = key + string.join (map (lambda x:
x [0], string.split (last, ' ')), '')
if len (key) >= 3:
if len (aut) > 3:
key = key + '+'
break
else:
honorific, first, last, lineage = aut [0].format (fmt)
parts = string.split (last, ' ')
if len (parts) == 1:
key = parts [0][0:3]
else:
key = string.join (map (lambda x: x [0], parts), '')
else:
key = rc (entry.key.key [0:3])
if entry.has_key ('date'):
year = entry ['date'].format (fmt) [0]
if year:
key = key + year [2:]
if table.has_key (key) or table.has_key (key + 'a'):
if table.has_key (key):
# rename the old entry
new = key + 'a'
table [new] = table [key]
del table [key]
base = key
suff = ord ('b')
key = base + chr (suff)
while table.has_key (key):
suff = suff + 1
key = base + chr (suff)
return key
def numeric_key (entry, fmt, table):
count = 1
while table.has_key (str (count)):
count = count + 1
return str (count)
def create_string_key (database, keys, fmt):
table = {}
for key in keys:
s = string_key (database [key], fmt, table)
table [s] = key
skeys = table.keys ()
skeys.sort ()
return table, skeys
def create_numeric_key (database, keys, fmt):
table = {}
skeys = []
for key in keys:
s = numeric_key (database [key], fmt, table)
table [s] = key
skeys.append (s)
return table, skeys
def standard_date (entry, coding):
(text, month, day) = entry.format (coding)
if month: text = "%s/%s" % (month, text)
if day : text = "%s/%s" % (day, text)
return text
def last_first_full_authors (entry, coding):
return author_desc (entry, coding, 0, 1)
def first_last_full_authors (entry, coding):
return author_desc (entry, coding, 0, -1)
def full_authors (entry, coding):
return author_desc (entry, coding, 0, 0)
def initials_authors (entry, coding):
return author_desc (entry, coding, 1, 0)
def first_last_initials_authors (entry, coding):
return author_desc (entry, coding, 1, -1)
def last_first_initials_authors (entry, coding):
return author_desc (entry, coding, 1, 1)
Autoload.register ('style', 'Generic', {
'first_last_full_authors' : first_last_full_authors,
'last_first_full_authors' : last_first_full_authors,
'full_authors' : full_authors,
'first_last_initials_authors' : first_last_initials_authors,
'last_first_initials_authors' : last_first_initials_authors,
'initials_authors' : initials_authors,
'string_keys' : create_string_key,
'numeric_keys' : create_numeric_key,
'european_date' : standard_date,
})
| matthew-brett/pyblio | Pyblio/Style/Generic.py | Python | gpl-2.0 | 5,843 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclass = {'build_ext' : build_ext},
ext_modules=[Extension("_snaphu",
sources=["_snaphu.pyx",
"../src/snaphu.c",
"../src/snaphu_solver.c",
"../src/snaphu_util.c",
"../src/snaphu_cost.c",
"../src/snaphu_cs2.c",
"../src/snaphu_io.c",
"../src/snaphu_tile.c"],
include_dirs=['../src'],
extra_compile_args=['-Wstrict-prototypes', ],
language="c")]
)
| bosmanoglu/adore-doris | lib/ext/snaphu-v1.4.2/cython/setup.py | Python | gpl-2.0 | 550 |
# -*- coding: utf-8 -*-
#
# This tool helps you to rebase package to the latest version
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# he Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hracek <phracek@redhat.com>
# Tomas Hozza <thozza@redhat.com>
import six
from rebasehelper.base_output import OutputLogger
class TestBaseOutput(object):
"""
Class is used for testing OutputTool
"""
old_rpm_data = {'rpm': ['rpm-0.1.0.x86_64.rpm', ' rpm-devel-0.1.0.x86_64.rpm'],
'srpm': 'rpm-0.1.0.src.rpm',
'logs': ['logfile1.log', 'logfile2.log']}
new_rpm_data = {'rpm': ['rpm-0.2.0.x86_64.rpm', ' rpm-devel-0.2.0.x86_64.rpm'],
'srpm': 'rpm-0.2.0.src.rpm',
'logs': ['logfile3.log', 'logfile4.log']}
patches_data = {'deleted': ['del_patch1.patch', 'del_patch2.patch'],
'modified': ['mod_patch1.patch', 'mod_patch2.patch']}
info_data = {'Information text': 'some information text'}
info_data2 = {'Next Information': 'some another information text'}
def setup(self):
OutputLogger.set_info_text('Information text', 'some information text')
OutputLogger.set_info_text('Next Information', 'some another information text')
OutputLogger.set_patch_output('Patches:', self.patches_data)
OutputLogger.set_build_data('old', self.old_rpm_data)
OutputLogger.set_build_data('new', self.new_rpm_data)
def test_base_output_global(self):
expect_dict = self.info_data
expect_dict.update(self.info_data2)
build_dict = {'old': self.old_rpm_data,
'new': self.new_rpm_data}
expected_result = {'build': build_dict,
'patch': self.patches_data,
'information': expect_dict}
for key, value in six.iteritems(expected_result):
assert value == expected_result[key]
def test_base_output_info(self):
"""
Test Output logger info
:return:
"""
info_results = OutputLogger.get_summary_info()
expect_dict = self.info_data
expect_dict.update(self.info_data2)
assert info_results == expect_dict
def test_base_output_patches(self):
"""
Test Output logger patches
:return:
"""
patch_results = OutputLogger.get_patches()
expected_patches = self.patches_data
assert patch_results == expected_patches
def test_base_output_builds_old(self):
"""
Test Output logger old builds
:return:
"""
build_results = OutputLogger.get_build('old')
assert build_results == self.old_rpm_data
def test_base_output_builds_new(self):
"""
Test Output logger new builds
:return:
"""
build_results = OutputLogger.get_build('new')
assert build_results == self.new_rpm_data
| uhliarik/rebase-helper | test/test_base_output.py | Python | gpl-2.0 | 3,601 |
import unittest
import warnings
from tests.baseclass import ParserTest
from pykickstart.constants import KS_MISSING_IGNORE
from pykickstart.errors import KickstartParseError
from pykickstart.version import F21, RHEL6
from pykickstart.parser import Group
class GroupsAreHashable_TestCase(ParserTest):
def runTest(self):
hash(Group(name="groupA"))
class Packages_Options_TestCase(ParserTest):
ks = """
%packages --ignoremissing --default --instLangs="bg_BG"
%end
"""
def runTest(self):
self.parser.readKickstartFromString(self.ks)
# Verify that the options are parsed as expected
self.assertTrue(self.handler.packages.default)
self.assertEqual(self.handler.packages.handleMissing, KS_MISSING_IGNORE)
self.assertEqual(self.handler.packages.instLangs, "bg_BG")
# extra test coverage
self.assertTrue(self.parser._sections['%packages'].seen)
class Packages_Contains_Comments_TestCase(ParserTest):
ks = """
%packages
packageA # this is an end-of-line comment
# this is a whole line comment
packageB
packageC
%end
"""
def runTest(self):
self.parser.readKickstartFromString(self.ks)
# Verify that the packages are what we think they are.
self.assertEqual(len(self.handler.packages.packageList), 3)
self.assertEqual(self.handler.packages.packageList[0], "packageA")
self.assertEqual(self.handler.packages.packageList[1], "packageB")
self.assertEqual(self.handler.packages.packageList[2], "packageC")
class Packages_Contains_Nobase_1_TestCase(ParserTest):
version = F21
ks = """
%packages --nobase
bash
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.parser.readKickstartFromString(self.ks)
self.assertEqual(len(w), 1)
self.assertIsInstance(w[-1].message, DeprecationWarning)
class Packages_Contains_Nobase_2_TestCase(ParserTest):
version = RHEL6
ks = """
%packages --nobase
bash
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.parser.readKickstartFromString(self.ks)
self.assertEqual(len(w), 0)
class Packages_Contains_Nobase_3_TestCase(ParserTest):
ks = """
%packages --nobase
bash
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertRaises(KickstartParseError, self.parser.readKickstartFromString, self.ks)
class Packages_Contains_Nobase_Default_TestCase(ParserTest):
version = F21
ks = """
%packages --nobase --default
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertRaises(KickstartParseError, self.parser.readKickstartFromString, self.ks)
class Packages_Contains_Nocore_Default_TestCase(ParserTest):
ks = """
%packages --nocore --default
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertRaises(KickstartParseError, self.parser.readKickstartFromString, self.ks)
class Packages_Contains_Environment_1_TestCase(ParserTest):
ks = """
%packages
@^whatever-environment
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.parser.readKickstartFromString(self.ks)
self.assertEqual(self.handler.packages.environment, "whatever-environment")
class Packages_Contains_Environment_2_TestCase(ParserTest):
ks = """
%packages
@^whatever-environment
@^another-environment
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.parser.readKickstartFromString(self.ks)
self.assertEqual(self.handler.packages.environment, "another-environment")
class Packages_Contains_Environment_3_TestCase(ParserTest):
ks = """
%packages
@^whatever-environment
-@^another-environment
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.parser.readKickstartFromString(self.ks)
self.assertEqual(self.handler.packages.environment, "whatever-environment")
class Packages_Contains_Environment_4_TestCase(ParserTest):
ks = """
%packages
@^whatever-environment
-@^whatever-environment
@^another-environment
%end
"""
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.parser.readKickstartFromString(self.ks)
self.assertEqual(self.handler.packages.environment, "another-environment")
if __name__ == "__main__":
unittest.main()
| boonchu/pykickstart | tests/parser/packages.py | Python | gpl-2.0 | 4,941 |
import xml.etree.ElementTree as etree
import base64
from struct import unpack, pack
import sys
import io
import os
import time
import itertools
import xbmcaddon
import xbmc
import urllib2,urllib
import traceback
import urlparse
import posixpath
import re
import socket
from flvlib import tags
from flvlib import helpers
from flvlib.astypes import MalformedFLV
import zlib
from StringIO import StringIO
import hmac
import hashlib
import base64
addon_id = 'plugin.video.israelive'
selfAddon = xbmcaddon.Addon(id=addon_id)
__addonname__ = selfAddon.getAddonInfo('name')
__icon__ = selfAddon.getAddonInfo('icon')
downloadPath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))#selfAddon["profile"])
#F4Mversion=''
class interalSimpleDownloader():
outputfile =''
clientHeader=None
def __init__(self):
self.init_done=False
def thisme(self):
return 'aaaa'
def openUrl(self,url, ischunkDownloading=False):
try:
post=None
openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
return response
except:
#print 'Error in getUrl'
traceback.print_exc()
return None
def getUrl(self,url, ischunkDownloading=False):
try:
post=None
openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
data=response.read()
return data
except:
#print 'Error in getUrl'
traceback.print_exc()
return None
def init(self, out_stream, url, proxy=None,g_stopEvent=None, maxbitRate=0):
try:
self.init_done=False
self.init_url=url
self.clientHeader=None
self.status='init'
self.proxy = proxy
self.maxbitRate=maxbitRate
if self.proxy and len(self.proxy)==0:
self.proxy=None
self.out_stream=out_stream
self.g_stopEvent=g_stopEvent
if '|' in url:
sp = url.split('|')
url = sp[0]
self.clientHeader = sp[1]
self.clientHeader= urlparse.parse_qsl(self.clientHeader)
#print 'header recieved now url and headers are',url, self.clientHeader
self.status='init done'
self.url=url
#self.downloadInternal( url)
return True
#os.remove(self.outputfile)
except:
traceback.print_exc()
self.status='finished'
return False
def keep_sending_video(self,dest_stream, segmentToStart=None, totalSegmentToSend=0):
try:
self.status='download Starting'
self.downloadInternal(self.url,dest_stream)
except:
traceback.print_exc()
self.status='finished'
def downloadInternal(self,url,dest_stream):
try:
url=self.url
fileout=dest_stream
self.status='bootstrap done'
while True:
response=self.openUrl(url)
buf="start"
firstBlock=True
try:
while (buf != None and len(buf) > 0):
if self.g_stopEvent and self.g_stopEvent.isSet():
return
buf = response.read(200 * 1024)
fileout.write(buf)
#print 'writing something..............'
fileout.flush()
try:
if firstBlock:
firstBlock=False
if self.maxbitRate and self.maxbitRate>0:# this is for being sports for time being
#print 'maxbitrate',self.maxbitRate
ec=EdgeClass(buf,url,'http://www.en.beinsports.net/i/PerformConsole_BEIN/player/bin-release/PerformConsole.swf',sendToken=False)
ec.switchStream(self.maxbitRate,"DOWN")
except:
traceback.print_exc()
response.close()
fileout.close()
#print time.asctime(), "Closing connection"
except socket.error, e:
#print time.asctime(), "Client Closed the connection."
try:
response.close()
fileout.close()
except Exception, e:
return
except Exception, e:
traceback.print_exc(file=sys.stdout)
response.close()
fileout.close()
except:
traceback.print_exc()
return
class EdgeClass():
def __init__(self, data, url, swfUrl, sendToken=False, switchStream=None):
self.url = url
self.swfUrl = swfUrl
self.domain = self.url.split('://')[1].split('/')[0]
self.control = 'http://%s/control/' % self.domain
self.onEdge = self.extractTags(data,onEdge=True)
self.sessionID=self.onEdge['session']
self.path=self.onEdge['streamName']
#print 'session',self.onEdge['session']
#print 'Edge variable',self.onEdge
#print 'self.control',self.control
#self.MetaData = self.extractTags(data,onMetaData=True)
if sendToken:
self.sendNewToken(self.onEdge['session'],self.onEdge['streamName'],self.swfUrl,self.control)
def getURL(self, url, post=False, sessionID=False, sessionToken=False):
try:
#print 'GetURL --> url = '+url
opener = urllib2.build_opener()
if sessionID and sessionToken:
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' ),
('x-Akamai-Streaming-SessionToken', sessionToken ),
('x-Akamai-Streaming-SessionID', sessionID ),
('Content-Type', 'text/xml' )]
elif sessionID:
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' ),
('x-Akamai-Streaming-SessionID', sessionID ),
('Content-Type', 'text/xml' )]
else:
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' )]
if not post:
usock=opener.open(url)
else:
usock=opener.open(url,':)')
response=usock.read()
usock.close()
except urllib2.URLError, e:
#print 'Error reason: ', e
return False
else:
return response
def extractTags(self, filedata, onEdge=True,onMetaData=False):
f = StringIO(filedata)
flv = tags.FLV(f)
try:
tag_generator = flv.iter_tags()
for i, tag in enumerate(tag_generator):
if isinstance(tag, tags.ScriptTag):
if tag.name == "onEdge" and onEdge:
return tag.variable
elif tag.name == "onMetaData" and onMetaData:
return tag.variable
except MalformedFLV, e:
return False
except tags.EndOfFile:
return False
f.close()
return False
def decompressSWF(self,f):
if type(f) is str:
f = StringIO(f)
f.seek(0, 0)
magic = f.read(3)
if magic == "CWS":
return "FWS" + f.read(5) + zlib.decompress(f.read())
elif magic == "FWS":
#SWF Not Compressed
f.seek(0, 0)
return f.read()
else:
#Not SWF
return None
def MD5(self,data):
m = hashlib.md5()
m.update(data)
return m.digest()
def makeToken(self,sessionID,swfUrl):
swfData = self.getURL(swfUrl)
decData = self.decompressSWF(swfData)
swfMD5 = self.MD5(decData)
data = sessionID+swfMD5
sig = hmac.new('foo', data, hashlib.sha1)
return base64.encodestring(sig.digest()).replace('\n','')
def sendNewToken(self,sessionID,path,swf,domain):
sessionToken = self.makeToken(sessionID,swf)
commandUrl = domain+path+'?cmd=sendingNewToken&v=2.7.6&swf='+swf.replace('http://','http%3A//')
self.getURL(commandUrl,True,sessionID,sessionToken)
def switchStream(self, bitrate, upDown="UP"):
newStream=self.path
#print 'newStream before ',newStream
newStream=re.sub('_[0-9]*@','_'+str(bitrate)+'@',newStream)
#print 'newStream after ',newStream,bitrate
sessionToken =None# self.makeToken(sessionID,swf)
commandUrl = self.control+newStream+'?cmd=&reason=SWITCH_'+upDown+',1784,1000,1.3,2,'+self.path+'v=2.11.3'
self.getURL(commandUrl,True,self.sessionID,sessionToken)
| noam09/kodi | script.module.israeliveresolver/lib/interalSimpleDownloader.py | Python | gpl-3.0 | 10,978 |
import colorsys
import sys
import xml.etree.cElementTree as ET
# from io import BytesIO
from gi.repository import Gtk, Gdk, GObject, Pango
from gi.repository.GdkPixbuf import Pixbuf
from pychess.System import conf
from pychess.System.Log import log
from pychess.System.prefix import addDataPrefix
def createCombo(combo, data=[], name=None, ellipsize_mode=None):
if name is not None:
combo.set_name(name)
lst_store = Gtk.ListStore(Pixbuf, str)
for row in data:
lst_store.append(row)
combo.clear()
combo.set_model(lst_store)
crp = Gtk.CellRendererPixbuf()
crp.set_property('xalign', 0)
crp.set_property('xpad', 2)
combo.pack_start(crp, False)
combo.add_attribute(crp, 'pixbuf', 0)
crt = Gtk.CellRendererText()
crt.set_property('xalign', 0)
crt.set_property('xpad', 4)
combo.pack_start(crt, True)
combo.add_attribute(crt, 'text', 1)
if ellipsize_mode is not None:
crt.set_property('ellipsize', ellipsize_mode)
def updateCombo(combo, data):
def get_active(combobox):
model = combobox.get_model()
active = combobox.get_active()
if active < 0:
return None
return model[active][1]
last_active = get_active(combo)
lst_store = combo.get_model()
lst_store.clear()
new_active = 0
for i, row in enumerate(data):
lst_store.append(row)
if last_active == row[1]:
new_active = i
combo.set_active(new_active)
def genColor(n, startpoint=0):
assert n >= 1
# This splits the 0 - 1 segment in the pizza way
hue = (2 * n - 1) / (2.**(n - 1).bit_length()) - 1
hue = (hue + startpoint) % 1
# We set saturation based on the amount of green, scaled to the interval
# [0.6..0.8]. This ensures a consistent lightness over all colors.
rgb = colorsys.hsv_to_rgb(hue, 1, 1)
rgb = colorsys.hsv_to_rgb(hue, 1, (1 - rgb[1]) * 0.2 + 0.6)
# This algorithm ought to balance colors more precisely, but it overrates
# the lightness of yellow, and nearly makes it black
# yiq = colorsys.rgb_to_yiq(*rgb)
# rgb = colorsys.yiq_to_rgb(.125, yiq[1], yiq[2])
return rgb
def keepDown(scrolledWindow):
def changed(vadjust):
if not hasattr(vadjust, "need_scroll") or vadjust.need_scroll:
vadjust.set_value(vadjust.get_upper() - vadjust.get_page_size())
vadjust.need_scroll = True
scrolledWindow.get_vadjustment().connect("changed", changed)
def value_changed(vadjust):
vadjust.need_scroll = abs(vadjust.get_value() + vadjust.get_page_size() -
vadjust.get_upper()) < vadjust.get_step_increment()
scrolledWindow.get_vadjustment().connect("value-changed", value_changed)
# wrap analysis text column. thanks to
# http://www.islascruz.org/html/index.php?blog/show/Wrap-text-in-a-TreeView-column.html
def appendAutowrapColumn(treeview, name, **kvargs):
cell = Gtk.CellRendererText()
# cell.props.wrap_mode = Pango.WrapMode.WORD
# TODO:
# changed to ellipsize instead until "never ending grow" bug gets fixed
# see https://github.com/pychess/pychess/issues/1054
cell.props.ellipsize = Pango.EllipsizeMode.END
column = Gtk.TreeViewColumn(name, cell, **kvargs)
treeview.append_column(column)
def callback(treeview, allocation, column, cell):
otherColumns = [c for c in treeview.get_columns() if c != column]
newWidth = allocation.width - sum(c.get_width() for c in otherColumns)
hsep = GObject.Value()
hsep.init(GObject.TYPE_INT)
hsep.set_int(0)
treeview.style_get_property("horizontal-separator", hsep)
newWidth -= hsep.get_int() * (len(otherColumns) + 1) * 2
if cell.props.wrap_width == newWidth or newWidth <= 0:
return
cell.props.wrap_width = newWidth
store = treeview.get_model()
store_iter = store.get_iter_first()
while store_iter and store.iter_is_valid(store_iter):
store.row_changed(store.get_path(store_iter), store_iter)
store_iter = store.iter_next(store_iter)
treeview.set_size_request(0, -1)
# treeview.connect_after("size-allocate", callback, column, cell)
scroll = treeview.get_parent()
if isinstance(scroll, Gtk.ScrolledWindow):
scroll.set_policy(Gtk.PolicyType.NEVER, scroll.get_policy()[1])
return cell
METHODS = (
# Gtk.SpinButton should be listed prior to Gtk.Entry, as it is a
# subclass, but requires different handling
(Gtk.SpinButton, ("get_value", "set_value", "value-changed")),
(Gtk.Entry, ("get_text", "set_text", "changed")),
(Gtk.Expander, ("get_expanded", "set_expanded", "notify::expanded")),
(Gtk.ComboBox, ("get_active", "set_active", "changed")),
(Gtk.IconView, ("_get_active", "_set_active", "selection-changed")),
(Gtk.ToggleButton, ("get_active", "set_active", "toggled")),
(Gtk.CheckMenuItem, ("get_active", "set_active", "toggled")),
(Gtk.Range, ("get_value", "set_value", "value-changed")),
(Gtk.TreeSortable, ("get_value", "set_value", "sort-column-changed")),
(Gtk.Paned, ("get_position", "set_position", "notify::position")),
)
def keep(widget, key, get_value_=None, set_value_=None): # , first_value=None):
if widget is None:
raise AttributeError("key '%s' isn't in widgets" % key)
for class_, methods_ in METHODS:
# Use try-except just to make spinx happy...
try:
if isinstance(widget, class_):
getter, setter, signal = methods_
break
except TypeError:
getter, setter, signal = methods_
break
else:
raise AttributeError("I don't have any knowledge of type: '%s'" %
widget)
if get_value_:
def get_value():
return get_value_(widget)
else:
get_value = getattr(widget, getter)
if set_value_:
def set_value(v):
return set_value_(widget, v)
else:
set_value = getattr(widget, setter)
def setFromConf():
try:
v = conf.get(key)
except TypeError:
log.warning("uistuff.keep.setFromConf: Key '%s' from conf had the wrong type '%s', ignored" %
(key, type(conf.get(key))))
# print("uistuff.keep TypeError %s %s" % (key, conf.get(key)))
else:
set_value(v)
def callback(*args):
if not conf.hasKey(key) or conf.get(key) != get_value():
conf.set(key, get_value())
widget.connect(signal, callback)
conf.notify_add(key, lambda *args: setFromConf())
if conf.hasKey(key):
setFromConf()
elif conf.get(key) is not None:
conf.set(key, conf.get(key))
# loadDialogWidget() and saveDialogWidget() are similar to uistuff.keep() but are needed
# for saving widget values for Gtk.Dialog instances that are loaded with different
# sets of values/configurations and which also aren't instant save like in
# uistuff.keep(), but rather are saved later if and when the user clicks
# the dialog's OK button
def loadDialogWidget(widget,
widget_name,
config_number,
get_value_=None,
set_value_=None,
first_value=None):
key = widget_name + "-" + str(config_number)
if widget is None:
raise AttributeError("key '%s' isn't in widgets" % widget_name)
for class_, methods_ in METHODS:
if isinstance(widget, class_):
getter, setter, signal = methods_
break
else:
if set_value_ is None:
raise AttributeError("I don't have any knowledge of type: '%s'" %
widget)
if get_value_:
def get_value():
return get_value_(widget)
else:
get_value = getattr(widget, getter)
if set_value_:
def set_value(v):
return set_value_(widget, v)
else:
set_value = getattr(widget, setter)
if conf.hasKey(key):
try:
v = conf.get(key)
except TypeError:
log.warning("uistuff.loadDialogWidget: Key '%s' from conf had the wrong type '%s', ignored" %
(key, type(conf.get(key))))
if first_value is not None:
conf.set(key, first_value)
else:
conf.set(key, get_value())
else:
set_value(v)
elif first_value is not None:
conf.set(key, first_value)
set_value(conf.get(key))
else:
log.warning("Didn't load widget \"%s\": no conf value and no first_value arg" % widget_name)
def saveDialogWidget(widget, widget_name, config_number, get_value_=None):
key = widget_name + "-" + str(config_number)
if widget is None:
raise AttributeError("key '%s' isn't in widgets" % widget_name)
for class_, methods_ in METHODS:
if isinstance(widget, class_):
getter, setter, signal = methods_
break
else:
if get_value_ is None:
raise AttributeError("I don't have any knowledge of type: '%s'" %
widget)
if get_value_:
def get_value():
return get_value_(widget)
else:
get_value = getattr(widget, getter)
if not conf.hasKey(key) or conf.get(key) != get_value():
conf.set(key, get_value())
POSITION_NONE, POSITION_CENTER, POSITION_GOLDEN = range(3)
def keepWindowSize(key,
window,
defaultSize=None,
defaultPosition=POSITION_NONE):
""" You should call keepWindowSize before show on your windows """
key = key + "window"
def savePosition(window, *event):
log.debug("keepWindowSize.savePosition: %s" % window.get_title())
width = window.get_allocation().width
height = window.get_allocation().height
x_loc, y_loc = window.get_position()
if width <= 0:
log.error("Setting width = '%d' for %s to conf" % (width, key))
if height <= 0:
log.error("Setting height = '%d' for %s to conf" % (height, key))
log.debug("Saving window position width=%s height=%s x=%s y=%s" %
(width, height, x_loc, y_loc))
conf.set(key + "_width", width)
conf.set(key + "_height", height)
conf.set(key + "_x", x_loc)
conf.set(key + "_y", y_loc)
return False
window.connect("delete-event", savePosition, "delete-event")
def loadPosition(window):
# log.debug("keepWindowSize.loadPosition: %s" % window.title)
# Just to make sphinx happy...
try:
width, height = window.get_size_request()
except TypeError:
pass
if conf.hasKey(key + "_width") and conf.hasKey(key + "_height"):
width = conf.get(key + "_width")
height = conf.get(key + "_height")
log.debug("Resizing window to width=%s height=%s" %
(width, height))
window.resize(width, height)
elif defaultSize:
width, height = defaultSize
log.debug("Resizing window to width=%s height=%s" %
(width, height))
window.resize(width, height)
elif key == "mainwindow":
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
width = int(monitor_width / 2)
height = int(monitor_height / 4) * 3
log.debug("Resizing window to width=%s height=%s" %
(width, height))
window.resize(width, height)
elif key == "preferencesdialogwindow":
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
width = int(monitor_width / 2)
height = int(monitor_height / 4) * 3
window.resize(1, 1)
else:
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
width = int(monitor_width / 2)
height = int(monitor_height / 4) * 3
if conf.hasKey(key + "_x") and conf.hasKey(key + "_y"):
x = max(0, conf.get(key + "_x"))
y = max(0, conf.get(key + "_y"))
log.debug("Moving window to x=%s y=%s" % (x, y))
window.move(x, y)
elif defaultPosition in (POSITION_CENTER, POSITION_GOLDEN):
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
x_loc = int(monitor_width / 2 - width / 2) + monitor_x
if defaultPosition == POSITION_CENTER:
y_loc = int(monitor_height / 2 - height / 2) + monitor_y
else:
# Place the window on the upper golden ratio line
y_loc = int(monitor_height / 2.618 - height / 2) + monitor_y
log.debug("Moving window to x=%s y=%s" % (x_loc, y_loc))
window.move(x_loc, y_loc)
loadPosition(window)
# In rare cases, gtk throws some gtk_size_allocation error, which is
# probably a race condition. To avoid the window forgets its size in
# these cases, we add this extra hook
def callback(window):
loadPosition(window)
onceWhenReady(window, callback)
# Some properties can only be set, once the window is sufficiently initialized,
# This function lets you queue your request until that has happened.
def onceWhenReady(window, func, *args, **kwargs):
def cb(window, alloc, func, *args, **kwargs):
func(window, *args, **kwargs)
window.disconnect(handler_id)
handler_id = window.connect_after("size-allocate", cb, func, *args, **
kwargs)
def getMonitorBounds():
screen = Gdk.Screen.get_default()
root_window = screen.get_root_window()
# Just to make sphinx happy...
try:
ptr_window, mouse_x, mouse_y, mouse_mods = root_window.get_pointer()
current_monitor_number = screen.get_monitor_at_point(mouse_x, mouse_y)
monitor_geometry = screen.get_monitor_geometry(current_monitor_number)
return monitor_geometry.x, monitor_geometry.y, monitor_geometry.width, monitor_geometry.height
except TypeError:
return (0, 0, 0, 0)
def makeYellow(box):
def on_box_expose_event(box, context):
# box.style.paint_flat_box (box.window,
# Gtk.StateType.NORMAL, Gtk.ShadowType.NONE, None, box, "tooltip",
# box.allocation.x, box.allocation.y,
# box.allocation.width, box.allocation.height)
pass
def cb(box):
tooltip = Gtk.Window(Gtk.WindowType.POPUP)
tooltip.set_name('gtk-tooltip')
tooltip.ensure_style()
tooltipStyle = tooltip.get_style()
box.set_style(tooltipStyle)
box.connect("draw", on_box_expose_event)
onceWhenReady(box, cb)
class GladeWidgets:
""" A simple class that wraps a the glade get_widget function
into the python __getitem__ version """
def __init__(self, filename):
# TODO: remove this when upstream fixes translations with Python3+Windows
if sys.platform == "win32" and not conf.no_gettext:
tree = ET.parse(addDataPrefix("glade/%s" % filename))
for node in tree.iter():
if 'translatable' in node.attrib:
node.text = _(node.text)
del node.attrib['translatable']
if node.get('name') in ('pixbuf', 'logo'):
node.text = addDataPrefix("glade/%s" % node.text)
xml_text = ET.tostring(tree.getroot(), encoding='unicode', method='xml')
self.builder = Gtk.Builder.new_from_string(xml_text, -1)
else:
self.builder = Gtk.Builder()
if not conf.no_gettext:
self.builder.set_translation_domain("pychess")
self.builder.add_from_file(addDataPrefix("glade/%s" % filename))
def __getitem__(self, key):
return self.builder.get_object(key)
def getGlade(self):
return self.builder
| pychess/pychess | lib/pychess/System/uistuff.py | Python | gpl-3.0 | 16,160 |
"""Tool to loop over fls_h.inc files. Based on nens/asc.py and NumPy
masked arrays. Stripped out all unnecessary flexibility.
Usage:
# Opens zipfile if path ends with zip; inside it opens the only file,
# or raises ValueError if there are several. Currently we need to no
# data value passed in because we don't get it from the file; you may
# need to use some asc file present to get one.
flsh = flshinc.Flsh(path, no_data_value=-999.0)
geo_transform = flsh.geo_transform() # Format same as GDAL's, in
# Rijksdriehoek probably
cellsize_in_m2 = geo_transform[1]*geo_transform[1]
for timestamp, grid in flsh:
print("Total inundated area at timestamp {0}: {1} m2".format(
timestamp, numpy.greater(grid, 0).sum() * cellsize_in_m2))
Extra boolean options to Flsh:
one_per_hour: only yield the first grid of each hour (assumes
timestamp is in hours)
mutate: constantly yield the same grid object. Means that previously
yielded grids change. Faster because no copies are made, but
only use when you understand the risk.
If anything unexpected is encountered in a file, a possibly cryptic
ValueError is raised.
"""
# Python 3 is coming to town
from __future__ import print_function, unicode_literals
from __future__ import absolute_import, division
import logging
import math
import numpy
import numpy.ma
import zipfile
from flooding_lib.util import files
logger = logging.getLogger(__name__)
def splitline(f):
return f.readline().decode('utf8').strip().split()
def ints(f):
return [int(i) for i in splitline(f)]
def floats(f):
return [float(fl) for fl in splitline(f)]
def distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 +
(p1[1] - p2[1]) ** 2)
def check(line, expected):
if line[:len(expected)] != expected:
raise ValueError("line {0} was expected to start with {1}".
format(line, expected))
def y0_is_south(header, helper_geotransform):
if helper_geotransform:
helper_y0 = helper_geotransform[3]
# In old FLS files, header['y0'] is the y value of the
# southwest corner, in newer ones it's the y of the northwest
# corner. We have no way to distinguish them based on the FLS
# file alone.
# The helper geotransform's y0 is always at the north of the
# region. If it is sufficiently northwards of the FLS' y0,
# the y0 must be to the south. "Sufficient" is defined as at
# least 10% of the FLS height -- I'm afraid that without that
# margin, we're going to find maxwaterdepth grids that are a
# tiny bit to the north of the FLS, that would cause false
# souths.
north_of_fls_y0 = (
header['y0'] + 0.1 * (header['nrows'] * header['dx']))
if helper_y0 > north_of_fls_y0:
return True
return False
class Flsh(object):
def __init__(
self, path, no_data_value=-999.0, one_per_hour=False,
mutate=False, helper_geotransform=None):
self.path = path
self.no_data_value = no_data_value
self.one_per_hour = one_per_hour
self.mutate = mutate
self.helper_geotransform = helper_geotransform
def geo_transform(self):
header = self._parse_header()
# y0 can be north or south, dy is positive or negative depending
if y0_is_south(header, self.helper_geotransform):
y0 = header['y0'] + (header['nrows'] * header['dx'])
else:
y0 = header['y0']
return [header['x0'], header['dx'], 0.0,
y0, 0.0, -header['dx']]
def get_classes(self):
header = self._parse_header()
return header['classes']
def _open_path(self):
if self.path.endswith('.zip'):
try:
zipf = zipfile.ZipFile(self.path)
namelist = zipf.namelist()
if len(namelist) != 1:
raise ValueError(
"Can only open .zip files with 1 file inside, "
"{p} has {n}.".format(p=self.path, n=len(namelist)))
return zipf.open(namelist[0], mode='rU')
except zipfile.BadZipfile:
raise ValueError(
"{} ends in .zip but can't be opened as one."
.format(self.path))
else:
return file(self.path, 'rU')
@property
def ncols(self):
return self._parse_header()['ncols']
@property
def nrows(self):
return self._parse_header()['nrows']
def _parse_header(self):
if hasattr(self, '_header'):
return self._header
self.f = self._open_path()
# 1: dimensions
while True:
try:
check(
splitline(self.f),
['MAIN', 'DIMENSIONS', 'MMAX', 'NMAX'])
break
except ValueError:
pass
colrowline = splitline(self.f)
try:
ncols, nrows = [int(c) for c in colrowline]
except ValueError:
if colrowline[0] == '***':
nrows, ncols = self.find_max_col()
# logger.debug("nrows={0} ncols={1}".format(nrows, ncols))
# 2: grid
while True:
try:
spl = splitline(self.f)
check(spl, ['GRID'])
break
except ValueError:
pass
grid = floats(self.f)
spl = spl[1:]
dx = grid[spl.index('DX')]
x0 = grid[spl.index('X0')]
y0 = grid[spl.index('Y0')]
# logger.debug("dx={0} x0={1} y0={2}".format(dx, x0, y0))
# 3: classes
while True:
try:
check(
splitline(self.f),
['CLASSES', 'OF', 'INCREMENTAL', 'FILE'])
break
except ValueError:
pass
classes = []
line = splitline(self.f)
while line != ['ENDCLASSES']:
classes += [[float(fl) for fl in line]]
line = splitline(self.f)
# logger.debug("classes: {0}".format(classes))
self._header = {
'nrows': nrows,
'ncols': ncols,
'dx': dx,
'x0': x0,
'y0': y0,
'classes': classes,
}
return self._header
def find_max_col(self):
opened = self._open_path()
maxcol = 0
maxrow = 0
for line in opened:
line = line.strip().decode('utf8').split()
if not line or '.' in line[0]:
continue
try:
row, col, value = [int(elem) for elem in line]
except ValueError:
continue
maxcol = max(maxcol, col)
maxrow = max(maxrow, row)
logger.debug("Found max col: {}".format(maxcol))
logger.debug("Found max row: {}".format(maxrow))
return maxcol, maxrow
def __iter__(self):
header = self._parse_header()
the_array = numpy.zeros((header['nrows'] + 1, header['ncols'] + 1))
current_timestamp = False
yield_this_grid = False
last_yielded_hour = None
for line in self.f:
line = line.strip().decode('utf8').split()
if not line or '.' in line[0]:
if yield_this_grid:
if self.mutate:
yield current_timestamp, the_array
else:
yield current_timestamp, numpy.array(the_array)
last_yielded_hour = int(current_timestamp)
if not line:
# End of file
return
# Start of a new timestamp
timestamp, _, class_column = line[:3]
current_timestamp = float(timestamp)
class_column = int(class_column) - 1
yield_this_grid = (
not self.one_per_hour
or int(current_timestamp) != last_yielded_hour)
else:
row, col, classvalue = [int(l) for l in line]
if classvalue == 0:
value = 0.0
else:
value = header['classes'][classvalue - 1][class_column]
try:
the_array[-col, row - 1] = value
except IndexError:
print(the_array.shape)
print("col: {}".format(col))
print("row: {}".format(row))
raise
self.f.close() # When the file is closed, it can be deleted
# on Windows
def save_grid_to_image(grid, path, classes, colormap, geo_transform=None):
"""Save this grid as an image.
Assumes that all values in the grid are values that come from
one of the classes. Translates the values in the classes to colors
from the colormap, then finds all the places in the grid that are
equal to that class and sets all those to the right color.
Because of the above (classes) this save functions is not exactly
the same as the ColorMap.apply_to_grid() and files.save_geopng()
functions.
The type of image is decided by the path, but I only test with
PNG."""
classvalues = set()
for classline in classes:
for value in classline:
classvalues.add(value)
class_to_color = dict()
for classvalue in classvalues:
class_to_color[classvalue] = (
colormap.value_to_color(classvalue) or (0, 0, 0, 0))
n, m = grid.shape
colorgrid = numpy.zeros((4, n, m), dtype=numpy.uint8)
redgrid = numpy.zeros((n, m))
greengrid = numpy.zeros((n, m))
bluegrid = numpy.zeros((n, m))
for classvalue, color in class_to_color.items():
mask = (grid == classvalue)
redgrid += mask * color[0]
greengrid += mask * color[1]
bluegrid += mask * color[2]
colorgrid[0] = redgrid
colorgrid[1] = greengrid
colorgrid[2] = bluegrid
# Colored pixels get opacity 255, non-colored pixels opacity 0
# (transparent)
colorgrid[3] = (
((redgrid > 0) | (greengrid > 0) | (bluegrid > 0)) * 255)
files.save_geopng(path, colorgrid, geo_transform)
| lizardsystem/flooding-lib | flooding_lib/util/flshinc.py | Python | gpl-3.0 | 10,414 |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Employee and Attendance"),
"items": [
{
"type": "doctype",
"name": "Employee",
"description": _("Employee records."),
},
{
"type": "doctype",
"name": "Employee Attendance Tool",
"label": _("Employee Attendance Tool"),
"description":_("Mark Attendance for multiple employees"),
"hide_count": True
},
{
"type": "doctype",
"name": "Attendance",
"description": _("Attendance record."),
},
{
"type": "doctype",
"name": "Upload Attendance",
"description":_("Upload attendance from a .csv file"),
"hide_count": True
},
]
},
{
"label": _("Recruitment"),
"items": [
{
"type": "doctype",
"name": "Job Applicant",
"description": _("Applicant for a Job."),
},
{
"type": "doctype",
"name": "Job Opening",
"description": _("Opening for a Job."),
},
{
"type": "doctype",
"name": "Offer Letter",
"description": _("Offer candidate a Job."),
},
]
},
{
"label": _("Leaves and Holiday"),
"items": [
{
"type": "doctype",
"name": "Leave Application",
"description": _("Applications for leave."),
},
{
"type": "doctype",
"name":"Leave Type",
"description": _("Type of leaves like casual, sick etc."),
},
{
"type": "doctype",
"name": "Holiday List",
"description": _("Holiday master.")
},
{
"type": "doctype",
"name": "Leave Allocation",
"description": _("Allocate leaves for a period.")
},
{
"type": "doctype",
"name": "Leave Control Panel",
"label": _("Leave Allocation Tool"),
"description":_("Allocate leaves for the year."),
"hide_count": True
},
{
"type": "doctype",
"name": "Leave Block List",
"description": _("Block leave applications by department.")
},
]
},
{
"label": _("Payroll"),
"items": [
{
"type": "doctype",
"name": "Salary Slip",
"description": _("Monthly salary statement."),
},
{
"type": "doctype",
"name": "Process Payroll",
"label": _("Process Payroll"),
"description":_("Generate Salary Slips"),
"hide_count": True
},
{
"type": "doctype",
"name": "Salary Structure",
"description": _("Salary template master.")
},
{
"type": "doctype",
"name": "Salary Component",
"label": _("Salary Components"),
"description": _("Earnings, Deductions and other Salary components")
},
]
},
{
"label": _("Expense Claims"),
"items": [
{
"type": "doctype",
"name": "Expense Claim",
"description": _("Claims for company expense."),
},
{
"type": "doctype",
"name": "Expense Claim Type",
"description": _("Types of Expense Claim.")
},
]
},
{
"label": _("Appraisals"),
"items": [
{
"type": "doctype",
"name": "Appraisal",
"description": _("Performance appraisal."),
},
{
"type": "doctype",
"name": "Appraisal Template",
"description": _("Template for performance appraisals.")
},
]
},
{
"label": _("Training"),
"items": [
{
"type": "doctype",
"name": "Training Event"
},
{
"type": "doctype",
"name": "Training Result"
},
{
"type": "doctype",
"name": "Training Feedback"
},
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "HR Settings",
"description": _("Settings for HR Module")
},
{
"type": "doctype",
"name": "Employment Type",
"description": _("Types of employment (permanent, contract, intern etc.).")
},
{
"type": "doctype",
"name": "Branch",
"description": _("Organization branch master.")
},
{
"type": "doctype",
"name": "Department",
"description": _("Organization unit (department) master.")
},
{
"type": "doctype",
"name": "Designation",
"description": _("Employee designation (e.g. CEO, Director etc.).")
},
{
"type": "doctype",
"name": "Daily Work Summary Settings"
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Employee Leave Balance",
"doctype": "Leave Application"
},
{
"type": "report",
"is_query_report": True,
"name": "Employee Birthday",
"doctype": "Employee"
},
{
"type": "report",
"is_query_report": True,
"name": "Employees working on a holiday",
"doctype": "Employee"
},
{
"type": "report",
"name": "Employee Information",
"doctype": "Employee"
},
{
"type": "report",
"is_query_report": True,
"name": "Monthly Salary Register",
"doctype": "Salary Slip"
},
{
"type": "report",
"is_query_report": True,
"name": "Monthly Attendance Sheet",
"doctype": "Attendance"
},
]
},
]
| elba7r/system | erpnext/config/hr.py | Python | gpl-3.0 | 5,192 |
import math
import numpy as np
from Orange import data
def _get_variable(variable, dat, attr_name,
expected_type=None, expected_name=""):
failed = False
if isinstance(variable, data.Variable):
datvar = getattr(dat, "variable", None)
if datvar is not None and datvar is not variable:
raise ValueError("variable does not match the variable"
"in the data")
elif hasattr(dat, "domain"):
variable = dat.domain[variable]
elif hasattr(dat, attr_name):
variable = dat.variable
else:
failed = True
if failed or (expected_type is not None and
not isinstance(variable, expected_type)):
if not expected_type or isinstance(variable, data.Variable):
raise ValueError(
"expected %s variable not %s" % (expected_name, variable))
else:
raise ValueError("expected %s, not '%s'" %
(expected_type.__name__, type(variable).__name__))
return variable
class Discrete(np.ndarray):
def __new__(cls, dat=None, col_variable=None, row_variable=None, unknowns=None):
if isinstance(dat, data.Storage):
if unknowns is not None:
raise TypeError(
"incompatible arguments (data storage and 'unknowns'")
return cls.from_data(dat, col_variable, row_variable)
if row_variable is not None:
row_variable = _get_variable(row_variable, dat, "row_variable")
rows = len(row_variable.values)
else:
rows = dat.shape[0]
if col_variable is not None:
col_variable = _get_variable(col_variable, dat, "col_variable")
cols = len(col_variable.values)
else:
cols = dat.shape[1]
self = super().__new__(cls, (rows, cols))
self.row_variable = row_variable
self.col_variable = col_variable
if dat is None:
self[:] = 0
self.unknowns = unknowns or 0
else:
self[...] = dat
self.unknowns = (unknowns if unknowns is not None
else getattr(dat, "unknowns", 0))
return self
@classmethod
def from_data(cls, data, col_variable, row_variable=None):
if row_variable is None:
row_variable = data.domain.class_var
if row_variable is None:
raise ValueError("row_variable needs to be specified (data "
"has no class)")
row_variable = _get_variable(row_variable, data, "row_variable")
col_variable = _get_variable(col_variable, data, "col_variable")
try:
dist, unknowns = data._compute_contingency(
[col_variable], row_variable)[0]
self = super().__new__(cls, dist.shape)
self[...] = dist
self.unknowns = unknowns
except NotImplementedError:
self = np.zeros(
(len(row_variable.values), len(col_variable.values)))
self.unknowns = 0
rind = data.domain.index(row_variable)
cind = data.domain.index(col_variable)
for row in data:
rval, cval = row[rind], row[cind]
if math.isnan(rval):
continue
w = row.weight
if math.isnan(cval):
self.unknowns[cval] += w
else:
self[rval, cval] += w
self.row_variable = row_variable
self.col_variable = col_variable
return self
def __eq__(self, other):
return np.array_equal(self, other) and (
not hasattr(other, "unknowns") or
np.array_equal(self.unknowns, other.unknowns))
def __getitem__(self, index):
if isinstance(index, str):
if len(self.shape) == 2: # contingency
index = self.row_variable.to_val(index)
contingency_row = super().__getitem__(index)
contingency_row.col_variable = self.col_variable
return contingency_row
else: # Contingency row
column = self.strides == self.base.strides[:1]
if column:
index = self.row_variable.to_val(index)
else:
index = self.col_variable.to_val(index)
elif isinstance(index, tuple):
if isinstance(index[0], str):
index = (self.row_variable.to_val(index[0]), index[1])
if isinstance(index[1], str):
index = (index[0], self.col_variable.to_val(index[1]))
result = super().__getitem__(index)
if result.strides:
result.col_variable = self.col_variable
result.row_variable = self.row_variable
return result
def __setitem__(self, index, value):
if isinstance(index, str):
index = self.row_variable.to_val(index)
elif isinstance(index, tuple):
if isinstance(index[0], str):
index = (self.row_variable.to_val(index[0]), index[1])
if isinstance(index[1], str):
index = (index[0], self.col_variable.to_val(index[1]))
super().__setitem__(index, value)
def normalize(self, axis=None):
t = np.sum(self, axis=axis)
if t > 1e-6:
self[:] /= t
if axis is None or axis == 1:
self.unknowns /= t
class Continuous:
def __init__(self, dat=None, col_variable=None, row_variable=None,
unknowns=None):
if isinstance(dat, data.Storage):
if unknowns is not None:
raise TypeError(
"incompatible arguments (data storage and 'unknowns'")
return self.from_data(dat, col_variable, row_variable)
if row_variable is not None:
row_variable = _get_variable(row_variable, dat, "row_variable")
if col_variable is not None:
col_variable = _get_variable(col_variable, dat, "col_variable")
self.values, self.counts = dat
self.row_variable = row_variable
self.col_variable = col_variable
if unknowns is not None:
self.unknowns = unknowns
elif row_variable:
self.unknowns = np.zeros(len(row_variable.values))
else:
self.unknowns = None
def from_data(self, data, col_variable, row_variable=None):
if row_variable is None:
row_variable = data.domain.class_var
if row_variable is None:
raise ValueError("row_variable needs to be specified (data"
"has no class)")
self.row_variable = _get_variable(row_variable, data, "row_variable")
self.col_variable = _get_variable(col_variable, data, "col_variable")
try:
(self.values, self.counts), self.unknowns = data._compute_contingency(
[col_variable], row_variable)[0]
except NotImplementedError:
raise NotImplementedError("Fallback method for computation of "
"contingencies is not implemented yet")
def __eq__(self, other):
return (np.array_equal(self.values, other.values) and
np.array_equal(self.counts, other.counts) and
(not hasattr(other, "unknowns") or
np.array_equal(self.unknowns, other.unknowns)))
def __getitem__(self, index):
""" Return contingencies for a given class value. """
if isinstance(index, (str, float)):
index = self.row_variable.to_val(index)
C = self.counts[index]
ind = C > 0
return np.vstack((self.values[ind], C[ind]))
def __len__(self):
return self.counts.shape[0]
def __setitem__(self, index, value):
raise NotImplementedError("Setting individual class contingencies is "
"not implemented yet. Set .values and .counts.")
def normalize(self, axis=None):
if axis is None:
t = sum(np.sum(x[:, 1]) for x in self)
if t > 1e-6:
for x in self:
x[:, 1] /= t
elif axis != 1:
raise ValueError("contingencies can be normalized only with axis=1"
" or without axis")
else:
for i, x in enumerate(self):
t = np.sum(x[:, 1])
if t > 1e-6:
x[:, 1] /= t
self.unknowns[i] /= t
else:
if self.unknowns[i] > 1e-6:
self.unknowns[i] = 1
def get_contingency(dat, col_variable, row_variable=None, unknowns=None):
variable = _get_variable(col_variable, dat, "col_variable")
if isinstance(variable, data.DiscreteVariable):
return Discrete(dat, col_variable, row_variable, unknowns)
elif isinstance(variable, data.ContinuousVariable):
return Continuous(dat, col_variable, row_variable, unknowns)
else:
raise TypeError("cannot compute distribution of '%s'" %
type(variable).__name__)
def get_contingencies(dat, skipDiscrete=False, skipContinuous=False):
vars = dat.domain.attributes
row_var = dat.domain.class_var
if row_var is None:
raise ValueError("data has no target variable")
if skipDiscrete:
if skipContinuous:
return []
columns = [i for i, var in enumerate(vars)
if isinstance(var, data.ContinuousVariable)]
elif skipContinuous:
columns = [i for i, var in enumerate(vars)
if isinstance(var, data.DiscreteVariable)]
else:
columns = None
try:
dist_unks = dat._compute_contingency(columns)
if columns is None:
columns = np.arange(len(vars))
contigs = []
for col, (cont, unks) in zip(columns, dist_unks):
contigs.append(get_contingency(cont, vars[col], row_var, unks))
except NotImplementedError:
if columns is None:
columns = range(len(vars))
contigs = [get_contingency(dat, i) for i in columns]
return contigs
| jzbontar/orange-tree | Orange/statistics/contingency.py | Python | gpl-3.0 | 10,334 |
#!/usr/bin/python
###
# Copyright (C) 2012 Shrinidhi Rao shrinidhi@clickbeetle.in
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###
import os
import sys
import time
import socket
import subprocess
import multiprocessing
import tempfile
taskId = os.environ['rbhus_taskId']
frameId = os.environ['rbhus_frameId']
user = os.environ['rbhus_user']
fileName = os.environ['rbhus_fileName']
minRam = os.environ['rbhus_minRam']
maxRam = os.environ['rbhus_maxRam']
logBase = os.environ['rbhus_logBase']
framePad = os.environ['rbhus_pad']
rThreads = os.environ['rbhus_threads']
pad = os.environ['rbhus_pad']
outDir = os.environ['rbhus_outDir']
outFile = os.environ['rbhus_outName']
afterFrameCmd = os.environ['rbhus_afCmd']
os.system("del /q \"c:\\Users\\blue\\AppData\\Local\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\"")
#os.system("mklink \"c:\\Users\\blue\\AppData\\Local\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\" \"X:\\standard\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\"")
os.system("copy \"X:\\standard\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\" \"c:\\Users\\blue\\AppData\\Local\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\\" /y") | shrinidhi666/rbhus | etc/3dsmax2013/beforeFrame.py | Python | gpl-3.0 | 1,807 |
"""CactusBot."""
from .cactus import run, __version__
__all__ = ["__version__", "run"]
| CactusBot/CactusBot | cactusbot/__init__.py | Python | gpl-3.0 | 89 |
# -*- coding: utf-8 -*-
gettext("""La “Biblioteca Multimedia” es el lugar desde donde se clasifican nuestros contenidos multimedia, es decir, todo lo que es imágenes, audios, videos, documentos, etc. que luego podremos relacionar entre sí y con artículos, y tendrán vistas HTML en nuestro sitio web.""")
| MauHernandez/cyclope | demo/cyclope_project/locale/dbgettext/articles/article/como-funciona-la-biblioteca-biblioteca-multimedia/summary.py | Python | gpl-3.0 | 312 |
__author__ = 'Viktor Kerkez <alefnula@gmail.com>'
__date__ = '18 February 2010'
__copyright__ = 'Copyright (c) 2010 Viktor Kerkez'
| alefnula/perart | src/tea/qt/__init__.py | Python | gpl-3.0 | 139 |
../../../../../share/pyshared/twisted/names/hosts.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/twisted/names/hosts.py | Python | gpl-3.0 | 52 |
""" The Racing Kings Variation"""
from pychess.Utils.const import RACINGKINGSCHESS, VARIANTS_OTHER_NONSTANDARD, \
A8, B8, C8, D8, E8, F8, G8, H8
from pychess.Utils.Board import Board
RACINGKINGSSTART = "8/8/8/8/8/8/krbnNBRK/qrbnNBRQ w - - 0 1"
RANK8 = (A8, B8, C8, D8, E8, F8, G8, H8)
class RacingKingsBoard(Board):
""" :Description: The Racing Kings variation is where the object of the game
is to bring your king to the eight row.
"""
variant = RACINGKINGSCHESS
__desc__ = _(
"In this game, check is entirely forbidden: not only is it forbidden\n" +
"to move ones king into check, but it is also forbidden to check the opponents king.\n" +
"The purpose of the game is to be the first player that moves his king to the eight row.\n" +
"When white moves their king to the eight row, and black moves directly after that also\n" +
"their king to the last row, the game is a draw\n" +
"(this rule is to compensate for the advantage of white that they may move first.)\n" +
"Apart from the above, pieces move and capture precisely as in normal chess."
)
name = _("Racing Kings")
cecp_name = "racingkings"
need_initial_board = True
standard_rules = False
variant_group = VARIANTS_OTHER_NONSTANDARD
def __init__(self, setup=False, lboard=None):
if setup is True:
Board.__init__(self, setup=RACINGKINGSSTART, lboard=lboard)
else:
Board.__init__(self, setup=setup, lboard=lboard)
def testKingInEightRow(board):
""" Test for a winning position """
return board.kings[board.color - 1] in RANK8
def test2KingInEightRow(board):
""" Test for a winning position """
return board.kings[board.color] in RANK8 and board.kings[board.color - 1] in RANK8
| pychess/pychess | lib/pychess/Variants/racingkings.py | Python | gpl-3.0 | 1,813 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Clouder Template Docker',
'version': '1.0',
'category': 'Clouder',
'depends': ['clouder'],
'author': 'Yannick Buron (Clouder)',
'license': 'Other OSI approved licence',
'website': 'https://github.com/clouder-community/clouder',
'description': """
Clouder Template Docker
""",
'demo': [],
'data': ['clouder_template_docker_data.xml'],
'installable': True,
'application': True,
}
| microcom/clouder | clouder_template_docker/__openerp__.py | Python | gpl-3.0 | 1,398 |
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from bs4 import BeautifulSoup, UnicodeDammit
import urllib2
import re
import helpers
import logger
import datetime
import sys
from decimal import Decimal
from HTMLParser import HTMLParseError
from time import strptime
import mylar
def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
NOWyr = datetime.date.today().year
if datetime.date.today().month == 12:
NOWyr = NOWyr + 1
logger.fdebug("We're in December, incremented search Year to increase search results: " + str(NOWyr))
comicnm = ComicName.encode('utf-8').strip()
comicyr = ComicYear
comicis = Total
comicid = ComicID
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
#print ( "comichave: " + str(comicis) )
#print ( "comicid: " + str(comicid) )
comicnm_1 = re.sub('\+', '%2B', comicnm)
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
#print (str(cnt) + " results")
resultName = []
resultID = []
resultYear = []
resultIssues = []
resultURL = None
n_odd = -1
n_even = -1
n = 0
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
resultName.append(helpers.cleanName(rtp.findNext(text=True)))
#print ( "Comic Name: " + str(resultName[n]) )
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
#print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
#print ( "Year: " + str(resultYear[n]) )
#print ( "Issues: " + str(resultIssues[n]) )
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
CleanResultName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', resultName[n])
CleanResultName = re.sub(' ', '', CleanResultName).lower()
#print ("CleanComicName: " + str(CleanComicName))
#print ("CleanResultName: " + str(CleanResultName))
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
#this has been seen in a few instances already, so trying to adjust.
#when the series year is 2011, in gcd it might be 2012 due to publication
#dates overlapping between Dec/11 and Jan/12. Let's accept a match with a
#1 year grace space, and then pull in the first issue to see the actual pub
# date and if coincides with the other date..match it.
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
#print ("n:" + str(n) + "...matched by year to Mylar!")
#print ( "Year: " + str(resultYear[n]) )
#Occasionally there are discrepancies in comic count between
#GCD and CV. 99% it's CV not updating to the newest issue as fast
#as GCD does. Therefore, let's increase the CV count by 1 to get it
#to match, any more variation could cause incorrect matching.
#ie. witchblade on GCD says 159 issues, CV states 161.
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total) +1 or (int(resultIssues[n]) +1) == int(Total):
#print ("initial issue match..continuing.")
if int(resultIssues[n]) == int(Total) +1:
issvariation = "cv"
elif int(resultIssues[n]) +1 == int(Total):
issvariation = "gcd"
else:
issvariation = "no"
#print ("n:" + str(n) + "...matched by issues to Mylar!")
#print ("complete match!...proceeding")
TotalIssues = resultIssues[n]
resultURL = str(resultID[n])
rptxt = resultp('td')[6]
resultPublished = rptxt.findNext(text=True)
#print ("Series Published: " + str(resultPublished))
break
n+=1
# it's possible that comicvine would return a comic name incorrectly, or gcd
# has the wrong title and won't match 100%...
# (ie. The Flash-2011 on comicvine is Flash-2011 on gcd)
# this section is to account for variations in spelling, punctuation, etc/
basnumbs = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12}
if resultURL is None:
#search for number as text, and change to numeric
for numbs in basnumbs:
#print ("numbs:" + str(numbs))
if numbs in ComicName.lower():
numconv = basnumbs[numbs]
#print ("numconv: " + str(numconv))
ComicNm = re.sub(str(numbs), str(numconv), ComicName.lower())
#print ("comicname-reVISED:" + str(ComicNm))
return GCDScraper(ComicNm, ComicYear, Total, ComicID)
break
if ComicName.lower().startswith('the '):
ComicName = ComicName[4:]
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if ':' in ComicName:
ComicName = re.sub(':', '', ComicName)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if '-' in ComicName:
ComicName = re.sub('-', ' ', ComicName)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if 'and' in ComicName.lower():
ComicName = ComicName.replace('and', '&')
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if not quickmatch: return 'No Match'
#vari_loop = 0
if quickmatch == "yes":
if resultURL is None: return 'No Match'
else: return 'Match'
return GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=ComicID, TotalIssues=TotalIssues, issvariation=issvariation, resultPublished=resultPublished)
def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariation, resultPublished):
gcdinfo = {}
gcdchoice = []
gcount = 0
i = 0
# datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
# #search for number as text, and change to numeric
# for numbs in basnumbs:
# #print ("numbs:" + str(numbs))
# if numbs in ComicName.lower():
# numconv = basnumbs[numbs]
# #print ("numconv: " + str(numconv))
if vari_loop > 1:
resultPublished = "Unknown"
if vari_loop == 99: vari_loop = 1
while (i <= vari_loop):
if vari_loop > 0:
try:
boong = comseries['comseries'][i]
except IndexError:
break
resultURL = boong['comseriesID']
ComicID = boong['comicid']
TotalIssues+= int(boong['comseriesIssues'])
else:
resultURL = resultURL
# if we're here - it means it's a mismatched name.
# let's pull down the publication date as it'll be blank otherwise
inputMIS = 'http://www.comics.org' + str(resultURL)
resp = urllib2.urlopen (inputMIS)
# soup = BeautifulSoup ( resp )
try:
soup = BeautifulSoup(urllib2.urlopen(inputMIS))
except UnicodeDecodeError:
logger.info("I've detected your system is using: " + sys.stdout.encoding)
logger.info("unable to parse properly due to utf-8 problem, ignoring wrong symbols")
try:
soup = BeautifulSoup(urllib2.urlopen(inputMIS)).decode('utf-8', 'ignore')
except UnicodeDecodeError:
logger.info("not working...aborting. Tell Evilhero.")
return
#If CV doesn't have the Series Year (Stupid)...Let's store the Comics.org stated year just in case.
pyearit = soup.find("div", {"class": "item_data"})
pyeartxt = pyearit.find(text=re.compile(r"Series"))
pyearst = pyeartxt.index('Series')
ParseYear = pyeartxt[int(pyearst) -5:int(pyearst)]
parsed = soup.find("div", {"id": "series_data"})
#recent structure changes - need to adjust now
subtxt3 = parsed.find("dd", {"id": "publication_dates"})
resultPublished = subtxt3.findNext(text=True).rstrip()
#print ("pubdate:" + str(resultPublished))
parsfind = parsed.findAll("dt", {"class": "long"})
seriesloop = len(parsfind)
resultFormat = ''
for pf in parsfind:
if 'Publishing Format:' in pf.findNext(text=True):
subtxt9 = pf.find("dd", {"id": "series_format"})
resultFormat = subtxt9.findNext(text=True).rstrip()
continue
# the caveat - if a series is ongoing but only has 1 issue published at a particular point in time,
# resultPublished will return just the date and not the word 'Present' which dictates on the main
# page if a series is Continuing / Ended .
if resultFormat != '':
if 'ongoing series' in resultFormat.lower() and 'was' not in resultFormat.lower() and 'present' not in resultPublished.lower():
resultPublished = resultPublished + " - Present"
if 'limited series' in resultFormat.lower() and '?' in resultPublished:
resultPublished = resultPublished + " (Limited Series)"
coverst = soup.find("div", {"id": "series_cover"})
if coverst < 0:
gcdcover = "None"
else:
subcoverst = coverst('img', src=True)[0]
gcdcover = subcoverst['src']
#print ("resultURL:" + str(resultURL))
#print ("comicID:" + str(ComicID))
input2 = 'http://www.comics.org' + str(resultURL) + 'details/'
resp = urllib2.urlopen(input2)
soup = BeautifulSoup(resp)
#for newer comics, on-sale date has complete date...
#for older comics, pub.date is to be used
# type = soup.find(text=' On-sale date ')
type = soup.find(text=' Pub. Date ')
if type:
#print ("on-sale date detected....adjusting")
datetype = "pub"
else:
#print ("pub date defaulting")
datetype = "on-sale"
cnt1 = len(soup.findAll("tr", {"class": "row_even_False"}))
cnt2 = len(soup.findAll("tr", {"class": "row_even_True"}))
cnt = int(cnt1 + cnt2)
#print (str(cnt) + " Issues in Total (this may be wrong due to alternate prints, etc")
n_odd = -1
n_even = -1
n = 0
PI = "1.00"
altcount = 0
PrevYRMO = "0000-00"
while (n < cnt):
if n%2==0:
n_odd+=1
parsed = soup.findAll("tr", {"class": "row_even_False"})[n_odd]
ntype = "odd"
else:
n_even+=1
ntype = "even"
parsed = soup.findAll("tr", {"class": "row_even_True"})[n_even]
subtxt3 = parsed.find("a")
ParseIssue = subtxt3.findNext(text=True)
fid = parsed('a', href=True)[0]
resultGID = fid['href']
resultID = resultGID[7:-1]
if ',' in ParseIssue: ParseIssue = re.sub("\,", "", ParseIssue)
variant="no"
if 'Vol' in ParseIssue or '[' in ParseIssue or 'a' in ParseIssue or 'b' in ParseIssue or 'c' in ParseIssue:
m = re.findall('[^\[\]]+', ParseIssue)
# ^^ takes care of []
# if it's a decimal - variant ...whoo-boy is messed.
if '.' in m[0]:
dec_chk = m[0]
#if it's a digit before and after decimal, assume decimal issue
dec_st = dec_chk.find('.')
dec_b4 = dec_chk[:dec_st]
dec_ad = dec_chk[dec_st +1:]
dec_ad = re.sub("\s", "", dec_ad)
if dec_b4.isdigit() and dec_ad.isdigit():
#logger.fdebug("Alternate decimal issue...*Whew* glad I caught that")
ParseIssue = dec_b4 + "." + dec_ad
else:
#logger.fdebug("it's a decimal, but there's no digits before or after decimal")
#not a decimal issue, drop it down to the regex below.
ParseIssue = re.sub("[^0-9]", " ", dec_chk)
else:
ParseIssue = re.sub("[^0-9]", " ", m[0])
# ^^ removes everything but the digits from the remaining non-brackets
logger.fdebug("variant cover detected : " + str(ParseIssue))
variant="yes"
altcount = 1
isslen = ParseIssue.find(' ')
if isslen < 0:
#logger.fdebug("just digits left..using " + str(ParseIssue))
isslen == 0
isschk = ParseIssue
#logger.fdebug("setting ParseIssue to isschk: " + str(isschk))
else:
#logger.fdebug("parse issue is " + str(ParseIssue))
#logger.fdebug("more than digits left - first space detected at position : " + str(isslen))
#if 'isslen' exists, it means that it's an alternative cover.
#however, if ONLY alternate covers exist of an issue it won't work.
#let's use the FIRST record, and ignore all other covers for the given issue.
isschk = ParseIssue[:isslen]
#logger.fdebug("Parsed Issue#: " + str(isschk))
ParseIssue = re.sub("\s", "", ParseIssue)
#check if decimal or '1/2' exists or not, and store decimal results
halfchk = "no"
if '.' in isschk:
isschk_find = isschk.find('.')
isschk_b4dec = isschk[:isschk_find]
isschk_decval = isschk[isschk_find +1:]
#logger.fdebug("decimal detected for " + str(isschk))
#logger.fdebug("isschk_decval is " + str(isschk_decval))
if len(isschk_decval) == 1:
ParseIssue = isschk_b4dec + "." + str(int(isschk_decval) * 10)
elif '/' in isschk:
ParseIssue = "0.50"
isslen = 0
halfchk = "yes"
else:
isschk_decval = ".00"
ParseIssue = ParseIssue + isschk_decval
if variant == "yes":
#logger.fdebug("alternate cover detected - skipping/ignoring.")
altcount = 1
# in order to get the compare right, let's decimialize the string to '.00'.
# if halfchk == "yes": pass
# else:
# ParseIssue = ParseIssue + isschk_decval
datematch="false"
if not any(d.get('GCDIssue', None) == str(ParseIssue) for d in gcdchoice):
#logger.fdebug("preparing to add issue to db : " + str(ParseIssue))
pass
else:
#logger.fdebug("2 identical issue #'s have been found...determining if it's intentional")
#get current issue & publication date.
#logger.fdebug("Issue #:" + str(ParseIssue))
#logger.fdebug("IssueDate: " + str(gcdinfo['ComicDate']))
#get conflicting issue from tuple
for d in gcdchoice:
if str(d['GCDIssue']) == str(ParseIssue):
#logger.fdebug("Issue # already in tuple - checking IssueDate:" + str(d['GCDDate']) )
if str(d['GCDDate']) == str(gcdinfo['ComicDate']):
#logger.fdebug("Issue #'s and dates match...skipping.")
datematch="true"
else:
#logger.fdebug("Issue#'s match but different publication dates, not skipping.")
datematch="false"
if datematch == "false":
gcdinfo['ComicIssue'] = ParseIssue
#--- let's use pubdate.
#try publicationd date first
ParseDate = GettheDate(parsed, PrevYRMO)
ParseDate = ParseDate.replace(' ', '')
PrevYRMO = ParseDate
gcdinfo['ComicDate'] = ParseDate
#^^ will retrieve date #
#logger.fdebug("adding: " + str(gcdinfo['ComicIssue']) + " - date: " + str(ParseDate))
if ComicID[:1] == "G":
gcdchoice.append({
'GCDid': ComicID,
'IssueID': resultID,
'GCDIssue': gcdinfo['ComicIssue'],
'GCDDate': gcdinfo['ComicDate']
})
gcount+=1
else:
gcdchoice.append({
'GCDid': ComicID,
'GCDIssue': gcdinfo['ComicIssue'],
'GCDDate': gcdinfo['ComicDate']
})
gcdinfo['gcdchoice'] = gcdchoice
altcount = 0
n+=1
i+=1
gcdinfo['gcdvariation'] = issvariation
if ComicID[:1] == "G":
gcdinfo['totalissues'] = gcount
else:
gcdinfo['totalissues'] = TotalIssues
gcdinfo['ComicImage'] = gcdcover
gcdinfo['resultPublished'] = resultPublished
gcdinfo['SeriesYear'] = ParseYear
gcdinfo['GCDComicID'] = resultURL.split('/')[0]
return gcdinfo
## -- end (GCD) -- ##
def GettheDate(parsed, PrevYRMO):
#--- let's use pubdate.
#try publicationd date first
#logger.fdebug("parsed:" + str(parsed))
subtxt1 = parsed('td')[1]
ParseDate = subtxt1.findNext(text=True).rstrip()
pformat = 'pub'
if ParseDate is None or ParseDate == '':
subtxt1 = parsed('td')[2]
ParseDate = subtxt1.findNext(text=True)
pformat = 'on-sale'
if len(ParseDate) < 7: ParseDate = '0000-00' #invalid on-sale date format , drop it 0000-00 to avoid errors
basmonths = {'january': '01', 'february': '02', 'march': '03', 'april': '04', 'may': '05', 'june': '06', 'july': '07', 'august': '08', 'september': '09', 'october': '10', 'november': '11', 'december': '12'}
pdlen = len(ParseDate)
pdfind = ParseDate.find(' ', 2)
#logger.fdebug("length: " + str(pdlen) + "....first space @ pos " + str(pdfind))
#logger.fdebug("this should be the year: " + str(ParseDate[pdfind+1:pdlen-1]))
if pformat == 'on-sale': pass # date is in correct format...
else:
if ParseDate[pdfind +1:pdlen -1].isdigit():
#assume valid date.
#search for number as text, and change to numeric
for numbs in basmonths:
if numbs in ParseDate.lower():
pconv = basmonths[numbs]
ParseYear = re.sub('/s', '', ParseDate[-5:])
ParseDate = str(ParseYear) + "-" + str(pconv)
#logger.fdebug("!success - Publication date: " + str(ParseDate))
break
# some comics are messed with pub.dates and have Spring/Summer/Fall/Winter
else:
baseseasons = {'spring': '03', 'summer': '06', 'fall': '09', 'winter': '12'}
for seas in baseseasons:
if seas in ParseDate.lower():
sconv = baseseasons[seas]
ParseYear = re.sub('/s', '', ParseDate[-5:])
ParseDate = str(ParseYear) + "-" + str(sconv)
break
# #try key date
# subtxt1 = parsed('td')[2]
# ParseDate = subtxt1.findNext(text=True)
# #logger.fdebug("no pub.date detected, attempting to use on-sale date: " + str(ParseDate))
# if (ParseDate) < 7:
# #logger.fdebug("Invalid on-sale date - less than 7 characters. Trying Key date")
# subtxt3 = parsed('td')[0]
# ParseDate = subtxt3.findNext(text=True)
# if ParseDate == ' ':
#increment previous month by one and throw it in until it's populated properly.
if PrevYRMO == '0000-00':
ParseDate = '0000-00'
else:
PrevYR = str(PrevYRMO)[:4]
PrevMO = str(PrevYRMO)[5:]
#let's increment the month now (if it's 12th month, up the year and hit Jan.)
if int(PrevMO) == 12:
PrevYR = int(PrevYR) + 1
PrevMO = 1
else:
PrevMO = int(PrevMO) + 1
if int(PrevMO) < 10:
PrevMO = "0" + str(PrevMO)
ParseDate = str(PrevYR) + "-" + str(PrevMO)
#logger.fdebug("parseDAte:" + str(ParseDate))
return ParseDate
def GCDAdd(gcdcomicid):
serieschoice = []
series = {}
logger.fdebug("I'm trying to find these GCD comicid's:" + str(gcdcomicid))
for gcdid in gcdcomicid:
logger.fdebug("looking at gcdid:" + str(gcdid))
input2 = 'http://www.comics.org/series/' + str(gcdid)
logger.fdebug("---url: " + str(input2))
resp = urllib2.urlopen (input2)
soup = BeautifulSoup (resp)
logger.fdebug("SeriesName section...")
parsen = soup.find("span", {"id": "series_name"})
#logger.fdebug("series name (UNPARSED): " + str(parsen))
subpar = parsen('a')[0]
resultName = subpar.findNext(text=True)
logger.fdebug("ComicName: " + str(resultName))
#covers-start
logger.fdebug("Covers section...")
coverst = soup.find("div", {"id": "series_cover"})
if coverst < 0:
gcdcover = "None"
logger.fdebug("unable to find any covers - setting to None")
else:
subcoverst = coverst('img', src=True)[0]
#logger.fdebug("cover (UNPARSED) : " + str(subcoverst))
gcdcover = subcoverst['src']
logger.fdebug("Cover: " + str(gcdcover))
#covers end
#publisher start
logger.fdebug("Publisher section...")
try:
pubst = soup.find("div", {"class": "item_data"})
catchit = pubst('a')[0]
except (IndexError, TypeError):
pubst = soup.findAll("div", {"class": "left"})[1]
catchit = pubst.find("a")
publisher = catchit.findNext(text=True)
logger.fdebug("Publisher: " + str(publisher))
#publisher end
parsed = soup.find("div", {"id": "series_data"})
#logger.fdebug("series_data: " + str(parsed))
#print ("parse:" + str(parsed))
subtxt3 = parsed.find("dd", {"id": "publication_dates"})
#logger.fdebug("publication_dates: " + str(subtxt3))
pubdate = subtxt3.findNext(text=True).rstrip()
logger.fdebug("pubdate:" + str(pubdate))
subtxt4 = parsed.find("dd", {"id": "issues_published"})
noiss = subtxt4.findNext(text=True)
lenwho = len(noiss)
lent = noiss.find(' ', 2)
lenf = noiss.find('(')
stringit = noiss[lenf:lenwho]
stringout = noiss[:lent]
noissues = stringout.rstrip(' \t\r\n\0')
numbering = stringit.rstrip(' \t\r\n\0')
logger.fdebug("noissues:" + str(noissues))
logger.fdebug("numbering:" + str(numbering))
serieschoice.append({
"ComicID": gcdid,
"ComicName": resultName,
"ComicYear": pubdate,
"ComicIssues": noissues,
"ComicPublisher": publisher,
"ComicCover": gcdcover
})
series['serieschoice'] = serieschoice
return series
def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
comchkchoice = []
comchoice = {}
NOWyr = datetime.date.today().year
if datetime.date.today().month == 12:
NOWyr = NOWyr + 1
logger.fdebug("We're in December, incremented search Year to increase search results: " + str(NOWyr))
comicnm = ComicName.encode('utf-8').strip()
comicyr = ComicYear
comicis = Total
comicid = ComicID
comicpub = ComicPublisher.encode('utf-8').strip()
#print ("...comchk parser initialization...")
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
#print ( "comichave: " + str(comicis) )
#print ( "comicpub: " + str(comicpub) )
#print ( "comicid: " + str(comicid) )
# do 3 runs at the comics.org search to get the best results
comicrun = []
# &pub_name=DC
# have to remove the spaces from Publisher or else will not work (ie. DC Comics vs DC will not match)
# take the 1st word ;)
#comicpub = comicpub.split()[0]
# if it's not one of the BIG publisher's it might fail - so let's increase the odds.
pubbiggies = ['DC',
'Marvel',
'Image',
'IDW']
uhuh = "no"
for pb in pubbiggies:
if pb in comicpub:
#keep publisher in url if a biggie.
uhuh = "yes"
#print (" publisher match : " + str(comicpub))
conv_pub = comicpub.split()[0]
#print (" converted publisher to : " + str(conv_pub))
#1st run setup - leave it all as it is.
comicrun.append(comicnm)
cruncnt = 0
#2nd run setup - remove the last character and do a broad search (keep year or else will blow up)
if len(str(comicnm).split()) > 2:
comicrun.append(' '.join(comicnm.split(' ')[:-1]))
cruncnt+=1
# to increase the likely hood of matches and to get a broader scope...
# lets remove extra characters
if re.sub('[\.\,\:]', '', comicnm) != comicnm:
comicrun.append(re.sub('[\.\,\:]', '', comicnm))
cruncnt+=1
# one more addition - if the title contains a 'the', remove it ;)
if comicnm.lower().startswith('the'):
comicrun.append(comicnm[4:].strip())
cruncnt+=1
totalcount = 0
cr = 0
#print ("cruncnt is " + str(cruncnt))
while (cr <= cruncnt):
#print ("cr is " + str(cr))
comicnm = comicrun[cr]
#leaving spaces in will screw up the search...let's take care of it
comicnm = re.sub(' ', '+', comicnm)
#print ("comicnm: " + str(comicnm))
if uhuh == "yes":
publink = "&pub_name=" + str(conv_pub)
if uhuh == "no":
publink = "&pub_name="
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&keywords=&order1=series&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31' + '&title=&feature=&job_number=&pages=&script=&pencils=&inks=&colors=&letters=&story_editing=&genre=&characters=&synopsis=&reprint_notes=&story_reprinted=None¬es=' + str(publink) + '&pub_notes=&brand=&brand_notes=&indicia_publisher=&is_surrogate=None&ind_pub_notes=&series=' + str(comicnm) + '&series_year_began=&series_notes=&tracking_notes=&issue_count=&is_comics=None&format=&color=&dimensions=&paper_stock=&binding=&publishing_format=&issues=&volume=&issue_title=&variant_name=&issue_date=&indicia_frequency=&price=&issue_pages=&issue_editing=&isbn=&barcode=&issue_notes=&issue_reprinted=None&is_indexed=None'
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
# print ("cnt1: " + str(cnt1))
# print ("cnt2: " + str(cnt2))
# print (str(cnt) + " results")
resultName = []
resultID = []
resultYear = []
resultIssues = []
resultPublisher = []
resultURL = None
n_odd = -1
n_even = -1
n = 0
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
rtpit = rtp.findNext(text=True)
rtpthis = rtpit.encode('utf-8').strip()
resultName.append(helpers.cleanName(rtpthis))
# print ( "Comic Name: " + str(resultName[n]) )
pub = resultp('a')[0]
pubit = pub.findNext(text=True)
# pubthis = u' '.join(pubit).encode('utf-8').strip()
pubthis = pubit.encode('utf-8').strip()
resultPublisher.append(pubthis)
# print ( "Publisher: " + str(resultPublisher[n]) )
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
# print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
# print ( "Year: " + str(resultYear[n]) )
# print ( "Issues: " + str(resultIssues[n]) )
# print ("comchkchoice: " + str(comchkchoice))
if not any(d.get('GCDID', None) == str(resultID[n]) for d in comchkchoice):
#print ( str(resultID[n]) + " not in DB...adding.")
comchkchoice.append({
"ComicID": str(comicid),
"ComicName": resultName[n],
"GCDID": str(resultID[n]).split('/')[2],
"ComicYear": str(resultYear[n]),
"ComicPublisher": resultPublisher[n],
"ComicURL": "http://www.comics.org" + str(resultID[n]),
"ComicIssues": str(resultIssues[n])
})
#else:
#print ( str(resultID[n]) + " already in DB...skipping" )
n+=1
cr+=1
totalcount= totalcount + cnt
comchoice['comchkchoice'] = comchkchoice
return comchoice, totalcount
def decode_html(html_string):
converted = UnicodeDammit(html_string)
if not converted.unicode:
raise UnicodeDecodeError(
"Failed to detect encoding, tried [%s]",
', '.join(converted.triedEncodings))
# print converted.originalEncoding
return converted.unicode
def annualCheck(gcomicid, comicid, comicname, comicyear):
# will only work if we already matched for gcd.
# search for <comicname> annual
# grab annual listing that hits on comicyear (seriesyear)
# grab results :)
print ("GcomicID: " + str(gcomicid))
print ("comicID: " + str(comicid))
print ("comicname: " + comicname)
print ("comicyear: " + str(comicyear))
comicnm = comicname.encode('utf-8').strip()
comicnm_1 = re.sub('\+', '%2B', comicnm + " annual")
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyear) + '-01-01&end_date=' + str(comicyear) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
print (str(cnt) + " results")
resultName = []
resultID = []
resultYear = []
resultIssues = []
resultURL = None
n_odd = -1
n_even = -1
n = 0
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
rtp1 = re.sub('Annual', '', rtp)
resultName.append(helpers.cleanName(rtp1.findNext(text=True)))
print ("Comic Name: " + str(resultName[n]))
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
print ("ID: " + str(resultID[n]))
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
print ("Year: " + str(resultYear[n]))
print ("Issues: " + str(resultIssues[n]))
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
CleanResultName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', resultName[n])
CleanResultName = re.sub(' ', '', CleanResultName).lower()
print ("CleanComicName: " + str(CleanComicName))
print ("CleanResultName: " + str(CleanResultName))
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
print ("n:" + str(n) + "...matched by year to Mylar!")
print ("Year: " + str(resultYear[n]))
TotalIssues = resultIssues[n]
resultURL = str(resultID[n])
rptxt = resultp('td')[6]
resultPublished = rptxt.findNext(text=True)
#print ("Series Published: " + str(resultPublished))
break
n+=1
return
| evilhero/mylar | mylar/parseit.py | Python | gpl-3.0 | 36,412 |
# -*- coding: utf-8 -*-
from openerp.osv import osv, fields
import openerp.addons.product.product
class res_users(osv.osv):
_inherit = 'res.users'
_columns = {
'target_sales_invoiced': fields.integer('Invoiced in Sale Orders Target'),
}
| zbqf109/goodo | openerp/addons/sale_crm/res_users.py | Python | gpl-3.0 | 261 |
#!/usr/bin/python
from gi.repository import Gtk, GObject
import time
import unittest
from testutils import setup_test_env
setup_test_env()
from softwarecenter.enums import XapianValues, ActionButtons
TIMEOUT=300
class TestCustomLists(unittest.TestCase):
def _debug(self, index, model, needle):
print ("Expected '%s' at index '%s', " +
"and custom list contained: '%s'") % (
needle, index, model[index][0].get_value(XapianValues.PKGNAME))
def assertPkgInListAtIndex(self, index, model, needle):
doc = model[index][0]
self.assertEqual(doc.get_value(XapianValues.PKGNAME),
needle, self._debug(index, model, needle))
def test_custom_lists(self):
from softwarecenter.ui.gtk3.panes.availablepane import get_test_window
win = get_test_window()
pane = win.get_data("pane")
self._p()
pane.on_search_terms_changed(None, "ark,artha,software-center")
self._p()
model = pane.app_view.tree_view.get_model()
# custom list should return three items
self.assertTrue(len(model) == 3)
# check package names, ordering is default "by relevance"
self.assertPkgInListAtIndex(0, model, "ark")
self.assertPkgInListAtIndex(1, model, "software-center")
self.assertPkgInListAtIndex(2, model, "artha")
# check that the status bar offers to install the packages
install_button = pane.action_bar.get_button(ActionButtons.INSTALL)
self.assertNotEqual(install_button, None)
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def _p(self):
for i in range(10):
time.sleep(0.1)
while Gtk.events_pending():
Gtk.main_iteration()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
unittest.main()
| vanhonit/xmario_center | test/gtk3/test_custom_lists.py | Python | gpl-3.0 | 1,958 |
#! /usr/bin/python
# Module:
# Author: Maxim Borisyak, 2014
import functools
partial = functools.partial
from pattern import MatchError
from pattern import case
from pattern import to_pattern
# Type patterns
from pattern import a_class
from pattern import a_str
from pattern import a_float
from pattern import an_int
# General patterns
from pattern import some
from pattern import otherwise
from pattern import constant
from match import match
from match import match_f
from match import case_f
from match import match_method
from match import case_method
from match import merge_matches
from match import to_match | ZloVechno/dummy-agent | functial/__init__.py | Python | gpl-3.0 | 621 |
#!/usr/bin/env python3
from shutil import rmtree
from os import remove, path
from crawler.swiftea_bot.data import BASE_LINKS
URL = "http://aetfiws.ovh"
SUGGESTIONS = ['http://suggestions.ovh/page1.html', 'http://suggestions.ovh/page2.html']
CODE1 = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="Description" content="Moteur de recherche">
<title>Swiftea</title>
<link rel="stylesheet" href="public/css/reset.css">
<link rel="icon" href="public/favicon.ico" type="image/x-icon">
</head>
<body>
<p>une <a href="demo">CSS Demo</a> ici!</p>
<h1>Gros titre🤣 </h1>
<h2>Moyen titre</h2>
<h3>petit titre</h3>
<p><strong>strong </strong><em>em</em></p>
<a href="index">
<img src="public/themes/default/img/logo.png" alt="Swiftea">
</a>
du texte au milieu
<a href="about/ninf.php" rel="noindex, nofollow">Why use Swiftea ?1</a>
<a href="about/ni.php" rel="noindex">Why use Swiftea ?2</a>
<a href="about/nf.php" rel="nofollow">Why use Swiftea ?3</a>
<img src="public/themes/default/img/github.png" alt="Github Swiftea">
<img src="public/themes/default/img/twitter.png" alt="Twitter Swiftea">
<p>©</p>
<p>></p>
</body>
</html>
"""
CODE2 = """<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-language" content="en">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-16 LE" />
<link rel="shortcut icon" href="public/favicon2.ico" type="image/x-icon">
</head>
<body>
</body>
</html>
"""
CODE3 = """<!DOCTYPE html>
<html>
<head>
<meta name="language" content="fr">
</head>
<body>
</body>
</html>
"""
INVERTED_INDEX = {'EN': {
'A': {'ab': {'above': {1: .3, 2: .1}, 'abort': {1: .3, 2: .1}}},
'W': {'wo': {'word': {1: .3, 30: .4}}}}, 'FR': {
'B': {'ba': {'bateau': {1: .5}}, 'bo': {'boule': {1: .25, 2: .8}}}}}
CLEANED_KEYWORDS = [
('le', 1),
('2015', 1),
('bureau', 1),
('word', 1),
('example', 1),
('oiseau', 1),
('quoi', 1),
('epee', 1),
('clock', 1),
('çochon', 1),
('12h', 1)
]
def reset(DIR_DATA):
if path.exists(DIR_DATA):
rmtree(DIR_DATA)
else:
rmtree('badwords')
rmtree('stopwords')
rmtree('inverted_index')
rmtree('links')
rmtree('config')
rmtree('stats')
# for global tests:
if path.exists('test_redirect_output.ext'):
remove('test_redirect_output.ext')
| Swiftea/Crawler | crawler/tests/test_data.py | Python | gpl-3.0 | 2,567 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 Jorge Solla Rubiales <jorgesolla@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from .protocoldata import *
# Pulse types
class Pulse:
INVALID, START, ZERO, ONE = range(4)
# Protocol stats
class Stat:
WAIT_START, GET_BITS, WAIT_EOM, WAIT_ACK = range(4)
# Pulse times in milliseconds
timing = {
Pulse.START: {
'low': { 'min': 3.5, 'max': 3.9 },
'total': { 'min': 4.3, 'max': 4.7 }
},
Pulse.ZERO: {
'low': { 'min': 1.3, 'max': 1.7 },
'total': { 'min': 2.05, 'max': 2.75 }
},
Pulse.ONE: {
'low': { 'min': 0.4, 'max': 0.8 },
'total': { 'min': 2.05, 'max': 2.75 }
}
}
class ChannelError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'cec'
name = 'CEC'
longname = 'HDMI-CEC'
desc = 'HDMI Consumer Electronics Control (CEC) protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['Display', 'PC']
channels = (
{'id': 'cec', 'name': 'CEC', 'desc': 'CEC bus data'},
)
annotations = (
('st', 'Start'),
('eom-0', 'End of message'),
('eom-1', 'Message continued'),
('nack', 'ACK not set'),
('ack', 'ACK set'),
('bits', 'Bits'),
('bytes', 'Bytes'),
('frames', 'Frames'),
('sections', 'Sections'),
('warnings', 'Warnings')
)
annotation_rows = (
('bits', 'Bits', (0, 1, 2, 3, 4, 5)),
('bytes', 'Bytes', (6,)),
('frames', 'Frames', (7,)),
('sections', 'Sections', (8,)),
('warnings', 'Warnings', (9,))
)
def __init__(self):
self.reset()
def precalculate(self):
# Restrict max length of ACK/NACK labels to 2 BIT pulses.
bit_time = timing[Pulse.ZERO]['total']['min'] * 2
self.max_ack_len_samples = round((bit_time / 1000) * self.samplerate)
def reset(self):
self.stat = Stat.WAIT_START
self.samplerate = None
self.fall_start = None
self.fall_end = None
self.rise = None
self.reset_frame_vars()
def reset_frame_vars(self):
self.eom = None
self.bit_count = 0
self.byte_count = 0
self.byte = 0
self.byte_start = None
self.frame_start = None
self.frame_end = None
self.is_nack = 0
self.cmd_bytes = []
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
self.precalculate()
def handle_frame(self, is_nack):
if self.fall_start is None or self.fall_end is None:
return
i = 0
string = ''
while i < len(self.cmd_bytes):
string += '{:02x}'.format(self.cmd_bytes[i]['val'])
if i != (len(self.cmd_bytes) - 1):
string += ':'
i += 1
self.put(self.frame_start, self.frame_end, self.out_ann, [7, [string]])
i = 0
operands = 0
string = ''
while i < len(self.cmd_bytes):
if i == 0: # Parse header
(src, dst) = decode_header(self.cmd_bytes[i]['val'])
string = 'HDR: ' + src + ', ' + dst
elif i == 1: # Parse opcode
string += ' | OPC: ' + opcodes.get(self.cmd_bytes[i]['val'], 'Invalid')
else: # Parse operands
if operands == 0:
string += ' | OPS: '
operands += 1
string += '0x{:02x}'.format(self.cmd_bytes[i]['val'])
if i != len(self.cmd_bytes) - 1:
string += ', '
i += 1
# Header only commands are PINGS
if i == 1:
string += ' | OPC: PING' if self.eom else ' | OPC: NONE. Aborted cmd'
# Add extra information (ack of the command from the destination)
string += ' | R: NACK' if is_nack else ' | R: ACK'
self.put(self.frame_start, self.frame_end, self.out_ann, [8, [string]])
def process(self):
zero_time = ((self.rise - self.fall_start) / self.samplerate) * 1000.0
total_time = ((self.fall_end - self.fall_start) / self.samplerate) * 1000.0
pulse = Pulse.INVALID
# VALIDATION: Identify pulse based on length of the low period
for key in timing:
if zero_time >= timing[key]['low']['min'] and zero_time <= timing[key]['low']['max']:
pulse = key
break
# VALIDATION: Invalid pulse
if pulse == Pulse.INVALID:
self.stat = Stat.WAIT_START
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Invalid pulse: Wrong timing']])
return
# VALIDATION: If waiting for start, discard everything else
if self.stat == Stat.WAIT_START and pulse != Pulse.START:
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Expected START: BIT found']])
return
# VALIDATION: If waiting for ACK or EOM, only BIT pulses (0/1) are expected
if (self.stat == Stat.WAIT_ACK or self.stat == Stat.WAIT_EOM) and pulse == Pulse.START:
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Expected BIT: START received)']])
self.stat = Stat.WAIT_START
# VALIDATION: ACK bit pulse remains high till the next frame (if any): Validate only min time of the low period
if self.stat == Stat.WAIT_ACK and pulse != Pulse.START:
if total_time < timing[pulse]['total']['min']:
pulse = Pulse.INVALID
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['ACK pulse below minimun time']])
self.stat = Stat.WAIT_START
return
# VALIDATION / PING FRAME DETECTION: Initiator doesn't sets the EOM = 1 but stops sending when ack doesn't arrive
if self.stat == Stat.GET_BITS and pulse == Pulse.START:
# Make sure we received a complete byte to consider it a valid ping
if self.bit_count == 0:
self.handle_frame(self.is_nack)
else:
self.put(self.frame_start, self.samplenum, self.out_ann, [9, ['ERROR: Incomplete byte received']])
# Set wait start so we receive next frame
self.stat = Stat.WAIT_START
# VALIDATION: Check timing of the BIT (0/1) pulse in any other case (not waiting for ACK)
if self.stat != Stat.WAIT_ACK and pulse != Pulse.START:
if total_time < timing[pulse]['total']['min'] or total_time > timing[pulse]['total']['max']:
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Bit pulse exceeds total pulse timespan']])
pulse = Pulse.INVALID
self.stat = Stat.WAIT_START
return
if pulse == Pulse.ZERO:
bit = 0
elif pulse == Pulse.ONE:
bit = 1
# STATE: WAIT START
if self.stat == Stat.WAIT_START:
self.stat = Stat.GET_BITS
self.reset_frame_vars()
self.put(self.fall_start, self.fall_end, self.out_ann, [0, ['ST']])
# STATE: GET BITS
elif self.stat == Stat.GET_BITS:
# Reset stats on first bit
if self.bit_count == 0:
self.byte_start = self.fall_start
self.byte = 0
# If 1st byte of the datagram save its sample num
if len(self.cmd_bytes) == 0:
self.frame_start = self.fall_start
self.byte += (bit << (7 - self.bit_count))
self.bit_count += 1
self.put(self.fall_start, self.fall_end, self.out_ann, [5, [str(bit)]])
if self.bit_count == 8:
self.bit_count = 0
self.byte_count += 1
self.stat = Stat.WAIT_EOM
self.put(self.byte_start, self.samplenum, self.out_ann, [6, ['0x{:02x}'.format(self.byte)]])
self.cmd_bytes.append({'st': self.byte_start, 'ed': self.samplenum, 'val': self.byte})
# STATE: WAIT EOM
elif self.stat == Stat.WAIT_EOM:
self.eom = bit
self.frame_end = self.fall_end
a = [2, ['EOM=Y']] if self.eom else [1, ['EOM=N']]
self.put(self.fall_start, self.fall_end, self.out_ann, a)
self.stat = Stat.WAIT_ACK
# STATE: WAIT ACK
elif self.stat == Stat.WAIT_ACK:
# If a frame with broadcast destination is being sent, the ACK is
# inverted: a 0 is considered a NACK, therefore we invert the value
# of the bit here, so we match the real meaning of it.
if (self.cmd_bytes[0]['val'] & 0x0F) == 0x0F:
bit = ~bit & 0x01
if (self.fall_end - self.fall_start) > self.max_ack_len_samples:
ann_end = self.fall_start + self.max_ack_len_samples
else:
ann_end = self.fall_end
if bit:
# Any NACK detected in the frame is enough to consider the
# whole frame NACK'd.
self.is_nack = 1
self.put(self.fall_start, ann_end, self.out_ann, [3, ['NACK']])
else:
self.put(self.fall_start, ann_end, self.out_ann, [4, ['ACK']])
# After ACK bit, wait for new datagram or continue reading current
# one based on EOM value.
if self.eom or self.is_nack:
self.stat = Stat.WAIT_START
self.handle_frame(self.is_nack)
else:
self.stat = Stat.GET_BITS
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
# Wait for first falling edge.
self.wait({0: 'f'})
self.fall_end = self.samplenum
while True:
self.wait({0: 'r'})
self.rise = self.samplenum
if self.stat == Stat.WAIT_ACK:
self.wait([{0: 'f'}, {'skip': self.max_ack_len_samples}])
else:
self.wait([{0: 'f'}])
self.fall_start = self.fall_end
self.fall_end = self.samplenum
self.process()
# If there was a timeout while waiting for ACK: RESYNC.
# Note: This is an expected situation as no new falling edge will
# happen until next frame is transmitted.
if self.matched == (False, True):
self.wait({0: 'f'})
self.fall_end = self.samplenum
| StefanBruens/libsigrokdecode | decoders/cec/pd.py | Python | gpl-3.0 | 11,367 |
r"""
Description: Generates 2-D data maps from OpenFoam data saved by paraview
as a CSV file. The data has to be saved as point data and the following fields
are expected p, points:0->2, u:0->2. An aperture map is the second main input
and is used to generate the interpolation coordinates as well as convert
the flow velocities into volumetic flow rates. This script assumes the OpenFoam
simulation was performed on a geometry symmetric about the X-Z plane.
For usage information run: ``apm_process_paraview_data -h``
| Written By: Matthew stadelman
| Date Written: 2016/09/29
| Last Modfied: 2017/04/23
|
"""
import argparse
from argparse import RawDescriptionHelpFormatter as RawDesc
import os
import scipy as sp
from scipy.interpolate import griddata
from apmapflow import _get_logger, set_main_logger_level, DataField
# setting up logger
set_main_logger_level('info')
logger = _get_logger('apmapflow.scripts')
# setting a few convenience globals
avg_fact = None
voxel_size = None
base_name = None
# creating arg parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawDesc)
# adding arguments
parser.add_argument('-v', '--verbose', action='store_true',
help='debug messages are printed to the screen')
parser.add_argument('-o', '--output-dir',
type=os.path.realpath, default=os.getcwd(),
help='''outputs file to the specified
directory, sub-directories are created as needed''')
parser.add_argument('--rho', type=float, default=1000,
help='fluid density for kinematic pressure conversion')
parser.add_argument('data_file', type=os.path.realpath,
help='paraview CSV data file')
parser.add_argument('map_file', type=os.path.realpath,
help='matching aperture map used for OpenFoam simulation')
parser.add_argument('voxel_size', type=float,
help='voxel to meter conversion factor of aperture map')
parser.add_argument('avg_fact', type=float,
help='''horizontal averaging factor of aperture map''')
parser.add_argument('base_name', nargs='?', default=None,
help='''base name to save fields as, i.e. base_name + "-p-map.txt",
defaults to the name of the CSV file''')
def main():
r"""
Processes commandline args and runs script
"""
global avg_fact, voxel_size, base_name
#
args = parser.parse_args()
if args.verbose:
set_main_logger_level('debug')
#
# these will be command-line args
para_infile = args.data_file
aper_infile = args.map_file
avg_fact = args.avg_fact
voxel_size = args.voxel_size
#
base_name = args.base_name
if base_name is None:
base_name = os.path.basename(para_infile).split('.')[0]
base_name = os.path.join(args.output_dir, base_name)
#
aper_map, data_dict = read_data_files(para_infile, aper_infile)
map_coords, data_coords = generate_coordinate_arrays(aper_map, data_dict)
save_data_maps(map_coords, data_coords, aper_map, data_dict, args.rho)
def read_data_files(para_file, map_file):
r"""
Reads in the paraview data file and aperture map file.
"""
#
# reading aperture map
logger.info('reading aperture map...')
aper_map = DataField(map_file)
#
# reading first line of paraview file to get column names
logger.info('reading paraview data file')
with open(para_file, 'r') as file:
cols = file.readline()
cols = cols.strip().replace('"', '').lower()
cols = cols.split(',')
#
# reading entire dataset and splitting into column vectors
data = sp.loadtxt(para_file, delimiter=',', dtype=float, skiprows=1)
data_dict = {}
for i, col in enumerate(cols):
data_dict[col] = data[:, i]
#
return aper_map, data_dict
def generate_coordinate_arrays(aper_map, para_data_dict):
r"""
Generates the coordinate arrays to use in data interpolation for coverting
paraview point data into a 2-D data map.
"""
#
# generating XYZ coordinates from map to interpolate to
logger.info('calculating aperture map cell center coordinates...')
temp = sp.arange(aper_map.data_map.size, dtype=int)
temp = sp.unravel_index(temp, aper_map.data_map.shape[::-1])
map_coords = sp.zeros((aper_map.data_map.size, 3), dtype=float)
#
# half voxel added to make map points be cell centers
map_coords[:, 0] = temp[0] * avg_fact * voxel_size + voxel_size/2.0
map_coords[:, 2] = temp[1] * avg_fact * voxel_size + voxel_size/2.0
#
# pulling XYZ coordinates from the data file
logger.info('processing data file data for coordinates...')
data_coords = sp.zeros((para_data_dict['points:0'].shape[0], 3))
data_coords[:, 0] = para_data_dict['points:0']
data_coords[:, 1] = para_data_dict['points:1']
data_coords[:, 2] = para_data_dict['points:2']
#
return map_coords, data_coords
def save_data_maps(map_coords, data_coords, aper_map, data_dict, density):
r"""
Converts the raw paraview point data into a 2-D data distribution and
saves the file by appending to the base_name.
"""
#
# generating p field
logger.info('generating and saving pressure field...')
field = data_dict['p'] * density # openFoam outputs kinematic pressure
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
sp.savetxt(base_name+'-p-map.txt', field.T, delimiter='\t')
#
# generating Ux -> Qx field
logger.info('generating and saving Qx field...')
field = data_dict['u:0']
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qx-map.txt', field.T, delimiter='\t')
#
# generating Uz -> Qz field
logger.info('generating and saving Qz field...')
field = data_dict['u:2']
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qz-map.txt', field.T, delimiter='\t')
#
# generating Um -> Qm field
logger.info('generating and saving Q magnitude field...')
field = sp.sqrt(data_dict['u:0'] ** 2 + data_dict['u:2'] ** 2)
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qm-map.txt', field.T, delimiter='\t')
| stadelmanma/netl-AP_MAP_FLOW | apmapflow/scripts/apm_process_paraview_data.py | Python | gpl-3.0 | 6,758 |
# -*- encoding: utf -*-
from abjad import *
def test_indicatortools_Tempo__to_markup_01():
tempo = Tempo(Duration(1, 4), 60)
markup = tempo._to_markup()
assert systemtools.TestManager.compare(
markup,
r'''
\markup {
\fontsize
#-6
\general-align
#Y
#DOWN
\note-by-number
#2
#0
#1
\upright
{
=
60
}
}
'''
), format(markup)
def test_indicatortools_Tempo__to_markup_02():
tempo = Tempo(Duration(3, 8), 60)
markup = tempo._to_markup()
assert systemtools.TestManager.compare(
markup,
r'''
\markup {
\fontsize
#-6
\general-align
#Y
#DOWN
\note-by-number
#3
#1
#1
\upright
{
=
60
}
}
'''
), format(markup) | mscuthbert/abjad | abjad/tools/indicatortools/test/test_indicatortools_Tempo__to_markup.py | Python | gpl-3.0 | 1,253 |
# -*- coding: utf-8 -*-
## @package color_histogram.core.hist_2d
#
# Implementation of 2D color histograms.
# @author tody
# @date 2015/08/28
import numpy as np
from color_histogram.core.color_pixels import ColorPixels
from color_histogram.core.hist_common import colorCoordinates, colorDensities, rgbColors, clipLowDensity, range2ticks,\
densitySizes, range2lims
## Implementation of 2D color histograms.
class Hist2D:
## Constructor
# @param image input image.
# @param num_bins target number of histogram bins.
# @param alpha low density clip.
# @param color_space target color space. 'rgb' or 'Lab' or 'hsv'.
# @param channels target color channels. [0, 1] with 'hsv' means (h, s) channels.
def __init__(self, image, num_bins=16, alpha=0.1, color_space='hsv', channels=[0, 1]):
self._computeTargetPixels(image, color_space, channels)
self._num_bins = num_bins
self._alpha = alpha
self._color_space = color_space
self._channels = channels
self._computeColorRange()
self._computeHistogram()
self._plotter = Hist2DPlot(self)
## Plot histogram with the given density size range.
def plot(self, ax, density_size_range=[10, 100]):
self._plotter.plot(ax, density_size_range)
def colorSpace(self):
return self._color_space
def channels(self):
return self._channels
def colorIDs(self):
color_ids = np.where(self._histPositive())
return color_ids
def colorCoordinates(self):
color_ids = self.colorIDs()
num_bins = self._num_bins
color_range = self._color_range
return colorCoordinates(color_ids, num_bins, color_range)
def colorDensities(self):
return colorDensities(self._hist_bins)
def rgbColors(self):
return rgbColors(self._hist_bins, self._color_bins)
def colorRange(self):
return self._color_range
def _computeTargetPixels(self, image, color_space, channels):
color_pixels = ColorPixels(image)
self._pixels = color_pixels.pixels(color_space)[:, channels]
self._rgb_pixels = color_pixels.rgb()
def _computeColorRange(self):
pixels = self._pixels
cs = pixels.shape[1]
c_min = np.zeros(cs)
c_max = np.zeros(cs)
for ci in xrange(cs):
c_min[ci] = np.min(pixels[:, ci])
c_max[ci] = np.max(pixels[:, ci])
self._color_range = [c_min, c_max]
def _computeHistogram(self):
pixels = self._pixels
num_bins = self._num_bins
c_min, c_max = self._color_range
hist_bins = np.zeros((num_bins, num_bins), dtype=np.float32)
color_bins = np.zeros((num_bins, num_bins, 3), dtype=np.float32)
color_ids = (num_bins - 1) * (pixels - c_min) / (c_max - c_min)
color_ids = np.int32(color_ids)
for pi, color_id in enumerate(color_ids):
hist_bins[color_id[0], color_id[1]] += 1
color_bins[color_id[0], color_id[1]] += self._rgb_pixels[pi]
self._hist_bins = hist_bins
hist_positive = self._hist_bins > 0.0
for ci in xrange(3):
color_bins[hist_positive, ci] /= self._hist_bins[hist_positive]
self._color_bins = color_bins
self._clipLowDensity()
def _clipLowDensity(self):
clipLowDensity(self._hist_bins, self._color_bins, self._alpha)
def _histPositive(self):
return self._hist_bins > 0.0
## 2D color histogram plotter.
class Hist2DPlot:
## Constructor.
# @param hist2D histogram for plotting.
def __init__(self, hist2D):
self._hist2D = hist2D
## Plot histogram with the given density size range.
def plot(self, ax, density_size_range=[10, 100]):
color_samples = self._hist2D.colorCoordinates()
density_sizes = self._densitySizes(density_size_range)
colors = self._hist2D.rgbColors()
ax.scatter(color_samples[:, 0], color_samples[:, 1], color=colors, s=density_sizes)
self._axisSetting(ax)
def _densitySizes(self, density_size_range):
color_densities = self._hist2D.colorDensities()
return densitySizes(color_densities, density_size_range)
def _axisSetting(self, ax):
color_space = self._hist2D.colorSpace()
channels = self._hist2D.channels()
ax.set_xlabel(color_space[channels[0]])
ax.set_ylabel(color_space[channels[1]], rotation='horizontal')
color_range = self._hist2D.colorRange()
tick_range = np.array(color_range).T
xticks, yticks = range2ticks(tick_range)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
xlim, ylim = range2lims(tick_range)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
| EliotBryant/ShadDetector | shadDetector_testing/Colour Based Methods/ColorHistogram-master/color_histogram/core/hist_2d.py | Python | gpl-3.0 | 4,830 |
HOST = "wfSciwoncWiki:enw1989@172.31.29.101:27001,172.31.29.102:27001,172.31.29.103:27001,172.31.29.104:27001,172.31.29.105:27001,172.31.29.106:27001,172.31.29.107:27001,172.31.29.108:27001,172.31.29.109:27001/?authSource=admin"
PORT = ""
USER = ""
PASSWORD = ""
DATABASE = "wiki"
READ_PREFERENCE = "primary"
COLLECTION_INPUT = "user_sessions"
COLLECTION_OUTPUT = "top_sessions"
PREFIX_COLUMN = "w_"
ATTRIBUTES = ["duration", "start time", "end time", "contributor_username", "edition_counts"]
SORT = ["duration", "end time"]
OPERATION_TYPE = "GROUP_BY_FIXED_WINDOW"
COLUMN = "end time"
VALUE = [(1236381526, 1238973525),(1238973526, 1241565525),(1241565526, 1244157525),(1244157526, 1246749525),(1246749526, 1249341525),(1249341526, 1251933525),(1251933526, 1254525525),(1254525526, 1257113925),(1257113926, 1259705925),(1259705926, 1262297925),(1262297926, 1264889925),(1264889926, 1265098299)]
INPUT_FILE = "user_info.csv"
OUTPUT_FILE = "top_sessions_8.csv"
| elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 2/instances/11_0_wikiflow_1sh_1s_annot/longestsession_8/ConfigDB_Longest_8.py | Python | gpl-3.0 | 983 |
#! /usr/bin/env python2
# coding: utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Anthon van der Neut <A.van.der.Neut@ruamel.eu>'
# Copyright (C) 2011 Anthon van der Neut, Ruamel bvba
# Adapted from Leon Bottou's djvulibre C++ code,
# ( ZPCodec.{cpp,h} and BSByteStream.{cpp,h} )
# that code was first converted to C removing any dependencies on the DJVU libre
# framework for ByteStream, making it into a ctypes callable shared object
# then to python, and remade into a class
original_copyright_notice = '''
//C- -------------------------------------------------------------------
//C- DjVuLibre-3.5
//C- Copyright (c) 2002 Leon Bottou and Yann Le Cun.
//C- Copyright (c) 2001 AT&T
//C-
//C- This software is subject to, and may be distributed under, the
//C- GNU General Public License, either Version 2 of the license,
//C- or (at your option) any later version. The license should have
//C- accompanied the software or you may obtain a copy of the license
//C- from the Free Software Foundation at http://www.fsf.org .
//C-
//C- This program is distributed in the hope that it will be useful,
//C- but WITHOUT ANY WARRANTY; without even the implied warranty of
//C- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
//C- GNU General Public License for more details.
//C-
//C- DjVuLibre-3.5 is derived from the DjVu(r) Reference Library from
//C- Lizardtech Software. Lizardtech Software has authorized us to
//C- replace the original DjVu(r) Reference Library notice by the following
//C- text (see doc/lizard2002.djvu and doc/lizardtech2007.djvu):
//C-
//C- ------------------------------------------------------------------
//C- | DjVu (r) Reference Library (v. 3.5)
//C- | Copyright (c) 1999-2001 LizardTech, Inc. All Rights Reserved.
//C- | The DjVu Reference Library is protected by U.S. Pat. No.
//C- | 6,058,214 and patents pending.
//C- |
//C- | This software is subject to, and may be distributed under, the
//C- | GNU General Public License, either Version 2 of the license,
//C- | or (at your option) any later version. The license should have
//C- | accompanied the software or you may obtain a copy of the license
//C- | from the Free Software Foundation at http://www.fsf.org .
//C- |
//C- | The computer code originally released by LizardTech under this
//C- | license and unmodified by other parties is deemed "the LIZARDTECH
//C- | ORIGINAL CODE." Subject to any third party intellectual property
//C- | claims, LizardTech grants recipient a worldwide, royalty-free,
//C- | non-exclusive license to make, use, sell, or otherwise dispose of
//C- | the LIZARDTECH ORIGINAL CODE or of programs derived from the
//C- | LIZARDTECH ORIGINAL CODE in compliance with the terms of the GNU
//C- | General Public License. This grant only confers the right to
//C- | infringe patent claims underlying the LIZARDTECH ORIGINAL CODE to
//C- | the extent such infringement is reasonably necessary to enable
//C- | recipient to make, have made, practice, sell, or otherwise dispose
//C- | of the LIZARDTECH ORIGINAL CODE (or portions thereof) and not to
//C- | any greater extent that may be necessary to utilize further
//C- | modifications or combinations.
//C- |
//C- | The LIZARDTECH ORIGINAL CODE is provided "AS IS" WITHOUT WARRANTY
//C- | OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
//C- | TO ANY WARRANTY OF NON-INFRINGEMENT, OR ANY IMPLIED WARRANTY OF
//C- | MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
//C- +------------------------------------------------------------------
//
// $Id: BSByteStream.cpp,v 1.9 2007/03/25 20:48:29 leonb Exp $
// $Name: release_3_5_23 $
'''
MAXBLOCK = 4096
FREQMAX = 4
CTXIDS = 3
MAXLEN = 1024 ** 2
# Exception classes used by this module.
class BZZDecoderError(Exception):
"""This exception is raised when BZZDecode runs into trouble
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "BZZDecoderError: %s" % (self.msg)
# This table has been designed for the ZPCoder
# * by running the following command in file 'zptable.sn':
# * (fast-crude (steady-mat 0.0035 0.0002) 260)))
default_ztable = [ # {{{
(0x8000, 0x0000, 84, 145), # 000: p=0.500000 ( 0, 0)
(0x8000, 0x0000, 3, 4), # 001: p=0.500000 ( 0, 0)
(0x8000, 0x0000, 4, 3), # 002: p=0.500000 ( 0, 0)
(0x6bbd, 0x10a5, 5, 1), # 003: p=0.465226 ( 0, 0)
(0x6bbd, 0x10a5, 6, 2), # 004: p=0.465226 ( 0, 0)
(0x5d45, 0x1f28, 7, 3), # 005: p=0.430708 ( 0, 0)
(0x5d45, 0x1f28, 8, 4), # 006: p=0.430708 ( 0, 0)
(0x51b9, 0x2bd3, 9, 5), # 007: p=0.396718 ( 0, 0)
(0x51b9, 0x2bd3, 10, 6), # 008: p=0.396718 ( 0, 0)
(0x4813, 0x36e3, 11, 7), # 009: p=0.363535 ( 0, 0)
(0x4813, 0x36e3, 12, 8), # 010: p=0.363535 ( 0, 0)
(0x3fd5, 0x408c, 13, 9), # 011: p=0.331418 ( 0, 0)
(0x3fd5, 0x408c, 14, 10), # 012: p=0.331418 ( 0, 0)
(0x38b1, 0x48fd, 15, 11), # 013: p=0.300585 ( 0, 0)
(0x38b1, 0x48fd, 16, 12), # 014: p=0.300585 ( 0, 0)
(0x3275, 0x505d, 17, 13), # 015: p=0.271213 ( 0, 0)
(0x3275, 0x505d, 18, 14), # 016: p=0.271213 ( 0, 0)
(0x2cfd, 0x56d0, 19, 15), # 017: p=0.243438 ( 0, 0)
(0x2cfd, 0x56d0, 20, 16), # 018: p=0.243438 ( 0, 0)
(0x2825, 0x5c71, 21, 17), # 019: p=0.217391 ( 0, 0)
(0x2825, 0x5c71, 22, 18), # 020: p=0.217391 ( 0, 0)
(0x23ab, 0x615b, 23, 19), # 021: p=0.193150 ( 0, 0)
(0x23ab, 0x615b, 24, 20), # 022: p=0.193150 ( 0, 0)
(0x1f87, 0x65a5, 25, 21), # 023: p=0.170728 ( 0, 0)
(0x1f87, 0x65a5, 26, 22), # 024: p=0.170728 ( 0, 0)
(0x1bbb, 0x6962, 27, 23), # 025: p=0.150158 ( 0, 0)
(0x1bbb, 0x6962, 28, 24), # 026: p=0.150158 ( 0, 0)
(0x1845, 0x6ca2, 29, 25), # 027: p=0.131418 ( 0, 0)
(0x1845, 0x6ca2, 30, 26), # 028: p=0.131418 ( 0, 0)
(0x1523, 0x6f74, 31, 27), # 029: p=0.114460 ( 0, 0)
(0x1523, 0x6f74, 32, 28), # 030: p=0.114460 ( 0, 0)
(0x1253, 0x71e6, 33, 29), # 031: p=0.099230 ( 0, 0)
(0x1253, 0x71e6, 34, 30), # 032: p=0.099230 ( 0, 0)
(0x0fcf, 0x7404, 35, 31), # 033: p=0.085611 ( 0, 0)
(0x0fcf, 0x7404, 36, 32), # 034: p=0.085611 ( 0, 0)
(0x0d95, 0x75d6, 37, 33), # 035: p=0.073550 ( 0, 0)
(0x0d95, 0x75d6, 38, 34), # 036: p=0.073550 ( 0, 0)
(0x0b9d, 0x7768, 39, 35), # 037: p=0.062888 ( 0, 0)
(0x0b9d, 0x7768, 40, 36), # 038: p=0.062888 ( 0, 0)
(0x09e3, 0x78c2, 41, 37), # 039: p=0.053539 ( 0, 0)
(0x09e3, 0x78c2, 42, 38), # 040: p=0.053539 ( 0, 0)
(0x0861, 0x79ea, 43, 39), # 041: p=0.045365 ( 0, 0)
(0x0861, 0x79ea, 44, 40), # 042: p=0.045365 ( 0, 0)
(0x0711, 0x7ae7, 45, 41), # 043: p=0.038272 ( 0, 0)
(0x0711, 0x7ae7, 46, 42), # 044: p=0.038272 ( 0, 0)
(0x05f1, 0x7bbe, 47, 43), # 045: p=0.032174 ( 0, 0)
(0x05f1, 0x7bbe, 48, 44), # 046: p=0.032174 ( 0, 0)
(0x04f9, 0x7c75, 49, 45), # 047: p=0.026928 ( 0, 0)
(0x04f9, 0x7c75, 50, 46), # 048: p=0.026928 ( 0, 0)
(0x0425, 0x7d0f, 51, 47), # 049: p=0.022444 ( 0, 0)
(0x0425, 0x7d0f, 52, 48), # 050: p=0.022444 ( 0, 0)
(0x0371, 0x7d91, 53, 49), # 051: p=0.018636 ( 0, 0)
(0x0371, 0x7d91, 54, 50), # 052: p=0.018636 ( 0, 0)
(0x02d9, 0x7dfe, 55, 51), # 053: p=0.015421 ( 0, 0)
(0x02d9, 0x7dfe, 56, 52), # 054: p=0.015421 ( 0, 0)
(0x0259, 0x7e5a, 57, 53), # 055: p=0.012713 ( 0, 0)
(0x0259, 0x7e5a, 58, 54), # 056: p=0.012713 ( 0, 0)
(0x01ed, 0x7ea6, 59, 55), # 057: p=0.010419 ( 0, 0)
(0x01ed, 0x7ea6, 60, 56), # 058: p=0.010419 ( 0, 0)
(0x0193, 0x7ee6, 61, 57), # 059: p=0.008525 ( 0, 0)
(0x0193, 0x7ee6, 62, 58), # 060: p=0.008525 ( 0, 0)
(0x0149, 0x7f1a, 63, 59), # 061: p=0.006959 ( 0, 0)
(0x0149, 0x7f1a, 64, 60), # 062: p=0.006959 ( 0, 0)
(0x010b, 0x7f45, 65, 61), # 063: p=0.005648 ( 0, 0)
(0x010b, 0x7f45, 66, 62), # 064: p=0.005648 ( 0, 0)
(0x00d5, 0x7f6b, 67, 63), # 065: p=0.004506 ( 0, 0)
(0x00d5, 0x7f6b, 68, 64), # 066: p=0.004506 ( 0, 0)
(0x00a5, 0x7f8d, 69, 65), # 067: p=0.003480 ( 0, 0)
(0x00a5, 0x7f8d, 70, 66), # 068: p=0.003480 ( 0, 0)
(0x007b, 0x7faa, 71, 67), # 069: p=0.002602 ( 0, 0)
(0x007b, 0x7faa, 72, 68), # 070: p=0.002602 ( 0, 0)
(0x0057, 0x7fc3, 73, 69), # 071: p=0.001843 ( 0, 0)
(0x0057, 0x7fc3, 74, 70), # 072: p=0.001843 ( 0, 0)
(0x003b, 0x7fd7, 75, 71), # 073: p=0.001248 ( 0, 0)
(0x003b, 0x7fd7, 76, 72), # 074: p=0.001248 ( 0, 0)
(0x0023, 0x7fe7, 77, 73), # 075: p=0.000749 ( 0, 0)
(0x0023, 0x7fe7, 78, 74), # 076: p=0.000749 ( 0, 0)
(0x0013, 0x7ff2, 79, 75), # 077: p=0.000402 ( 0, 0)
(0x0013, 0x7ff2, 80, 76), # 078: p=0.000402 ( 0, 0)
(0x0007, 0x7ffa, 81, 77), # 079: p=0.000153 ( 0, 0)
(0x0007, 0x7ffa, 82, 78), # 080: p=0.000153 ( 0, 0)
(0x0001, 0x7fff, 81, 79), # 081: p=0.000027 ( 0, 0)
(0x0001, 0x7fff, 82, 80), # 082: p=0.000027 ( 0, 0)
(0x5695, 0x0000, 9, 85), # 083: p=0.411764 ( 2, 3)
(0x24ee, 0x0000, 86, 226), # 084: p=0.199988 ( 1, 0)
(0x8000, 0x0000, 5, 6), # 085: p=0.500000 ( 3, 3)
(0x0d30, 0x0000, 88, 176), # 086: p=0.071422 ( 4, 0)
(0x481a, 0x0000, 89, 143), # 087: p=0.363634 ( 1, 2)
(0x0481, 0x0000, 90, 138), # 088: p=0.024388 ( 13, 0)
(0x3579, 0x0000, 91, 141), # 089: p=0.285711 ( 1, 3)
(0x017a, 0x0000, 92, 112), # 090: p=0.007999 ( 41, 0)
(0x24ef, 0x0000, 93, 135), # 091: p=0.199997 ( 1, 5)
(0x007b, 0x0000, 94, 104), # 092: p=0.002611 ( 127, 0)
(0x1978, 0x0000, 95, 133), # 093: p=0.137929 ( 1, 8)
(0x0028, 0x0000, 96, 100), # 094: p=0.000849 ( 392, 0)
(0x10ca, 0x0000, 97, 129), # 095: p=0.090907 ( 1, 13)
(0x000d, 0x0000, 82, 98), # 096: p=0.000276 ( 1208, 0)
(0x0b5d, 0x0000, 99, 127), # 097: p=0.061537 ( 1, 20)
(0x0034, 0x0000, 76, 72), # 098: p=0.001102 ( 1208, 1)
(0x078a, 0x0000, 101, 125), # 099: p=0.040815 ( 1, 31)
(0x00a0, 0x0000, 70, 102), # 100: p=0.003387 ( 392, 1)
(0x050f, 0x0000, 103, 123), # 101: p=0.027397 ( 1, 47)
(0x0117, 0x0000, 66, 60), # 102: p=0.005912 ( 392, 2)
(0x0358, 0x0000, 105, 121), # 103: p=0.018099 ( 1, 72)
(0x01ea, 0x0000, 106, 110), # 104: p=0.010362 ( 127, 1)
(0x0234, 0x0000, 107, 119), # 105: p=0.011940 ( 1, 110)
(0x0144, 0x0000, 66, 108), # 106: p=0.006849 ( 193, 1)
(0x0173, 0x0000, 109, 117), # 107: p=0.007858 ( 1, 168)
(0x0234, 0x0000, 60, 54), # 108: p=0.011925 ( 193, 2)
(0x00f5, 0x0000, 111, 115), # 109: p=0.005175 ( 1, 256)
(0x0353, 0x0000, 56, 48), # 110: p=0.017995 ( 127, 2)
(0x00a1, 0x0000, 69, 113), # 111: p=0.003413 ( 1, 389)
(0x05c5, 0x0000, 114, 134), # 112: p=0.031249 ( 41, 1)
(0x011a, 0x0000, 65, 59), # 113: p=0.005957 ( 2, 389)
(0x03cf, 0x0000, 116, 132), # 114: p=0.020618 ( 63, 1)
(0x01aa, 0x0000, 61, 55), # 115: p=0.009020 ( 2, 256)
(0x0285, 0x0000, 118, 130), # 116: p=0.013652 ( 96, 1)
(0x0286, 0x0000, 57, 51), # 117: p=0.013672 ( 2, 168)
(0x01ab, 0x0000, 120, 128), # 118: p=0.009029 ( 146, 1)
(0x03d3, 0x0000, 53, 47), # 119: p=0.020710 ( 2, 110)
(0x011a, 0x0000, 122, 126), # 120: p=0.005961 ( 222, 1)
(0x05c5, 0x0000, 49, 41), # 121: p=0.031250 ( 2, 72)
(0x00ba, 0x0000, 124, 62), # 122: p=0.003925 ( 338, 1)
(0x08ad, 0x0000, 43, 37), # 123: p=0.046979 ( 2, 47)
(0x007a, 0x0000, 72, 66), # 124: p=0.002586 ( 514, 1)
(0x0ccc, 0x0000, 39, 31), # 125: p=0.069306 ( 2, 31)
(0x01eb, 0x0000, 60, 54), # 126: p=0.010386 ( 222, 2)
(0x1302, 0x0000, 33, 25), # 127: p=0.102940 ( 2, 20)
(0x02e6, 0x0000, 56, 50), # 128: p=0.015695 ( 146, 2)
(0x1b81, 0x0000, 29, 131), # 129: p=0.148935 ( 2, 13)
(0x045e, 0x0000, 52, 46), # 130: p=0.023648 ( 96, 2)
(0x24ef, 0x0000, 23, 17), # 131: p=0.199999 ( 3, 13)
(0x0690, 0x0000, 48, 40), # 132: p=0.035533 ( 63, 2)
(0x2865, 0x0000, 23, 15), # 133: p=0.218748 ( 2, 8)
(0x09de, 0x0000, 42, 136), # 134: p=0.053434 ( 41, 2)
(0x3987, 0x0000, 137, 7), # 135: p=0.304346 ( 2, 5)
(0x0dc8, 0x0000, 38, 32), # 136: p=0.074626 ( 41, 3)
(0x2c99, 0x0000, 21, 139), # 137: p=0.241378 ( 2, 7)
(0x10ca, 0x0000, 140, 172), # 138: p=0.090907 ( 13, 1)
(0x3b5f, 0x0000, 15, 9), # 139: p=0.312499 ( 3, 7)
(0x0b5d, 0x0000, 142, 170), # 140: p=0.061537 ( 20, 1)
(0x5695, 0x0000, 9, 85), # 141: p=0.411764 ( 2, 3)
(0x078a, 0x0000, 144, 168), # 142: p=0.040815 ( 31, 1)
(0x8000, 0x0000, 141, 248), # 143: p=0.500000 ( 2, 2)
(0x050f, 0x0000, 146, 166), # 144: p=0.027397 ( 47, 1)
(0x24ee, 0x0000, 147, 247), # 145: p=0.199988 ( 0, 1)
(0x0358, 0x0000, 148, 164), # 146: p=0.018099 ( 72, 1)
(0x0d30, 0x0000, 149, 197), # 147: p=0.071422 ( 0, 4)
(0x0234, 0x0000, 150, 162), # 148: p=0.011940 ( 110, 1)
(0x0481, 0x0000, 151, 95), # 149: p=0.024388 ( 0, 13)
(0x0173, 0x0000, 152, 160), # 150: p=0.007858 ( 168, 1)
(0x017a, 0x0000, 153, 173), # 151: p=0.007999 ( 0, 41)
(0x00f5, 0x0000, 154, 158), # 152: p=0.005175 ( 256, 1)
(0x007b, 0x0000, 155, 165), # 153: p=0.002611 ( 0, 127)
(0x00a1, 0x0000, 70, 156), # 154: p=0.003413 ( 389, 1)
(0x0028, 0x0000, 157, 161), # 155: p=0.000849 ( 0, 392)
(0x011a, 0x0000, 66, 60), # 156: p=0.005957 ( 389, 2)
(0x000d, 0x0000, 81, 159), # 157: p=0.000276 ( 0, 1208)
(0x01aa, 0x0000, 62, 56), # 158: p=0.009020 ( 256, 2)
(0x0034, 0x0000, 75, 71), # 159: p=0.001102 ( 1, 1208)
(0x0286, 0x0000, 58, 52), # 160: p=0.013672 ( 168, 2)
(0x00a0, 0x0000, 69, 163), # 161: p=0.003387 ( 1, 392)
(0x03d3, 0x0000, 54, 48), # 162: p=0.020710 ( 110, 2)
(0x0117, 0x0000, 65, 59), # 163: p=0.005912 ( 2, 392)
(0x05c5, 0x0000, 50, 42), # 164: p=0.031250 ( 72, 2)
(0x01ea, 0x0000, 167, 171), # 165: p=0.010362 ( 1, 127)
(0x08ad, 0x0000, 44, 38), # 166: p=0.046979 ( 47, 2)
(0x0144, 0x0000, 65, 169), # 167: p=0.006849 ( 1, 193)
(0x0ccc, 0x0000, 40, 32), # 168: p=0.069306 ( 31, 2)
(0x0234, 0x0000, 59, 53), # 169: p=0.011925 ( 2, 193)
(0x1302, 0x0000, 34, 26), # 170: p=0.102940 ( 20, 2)
(0x0353, 0x0000, 55, 47), # 171: p=0.017995 ( 2, 127)
(0x1b81, 0x0000, 30, 174), # 172: p=0.148935 ( 13, 2)
(0x05c5, 0x0000, 175, 193), # 173: p=0.031249 ( 1, 41)
(0x24ef, 0x0000, 24, 18), # 174: p=0.199999 ( 13, 3)
(0x03cf, 0x0000, 177, 191), # 175: p=0.020618 ( 1, 63)
(0x2b74, 0x0000, 178, 222), # 176: p=0.235291 ( 4, 1)
(0x0285, 0x0000, 179, 189), # 177: p=0.013652 ( 1, 96)
(0x201d, 0x0000, 180, 218), # 178: p=0.173910 ( 6, 1)
(0x01ab, 0x0000, 181, 187), # 179: p=0.009029 ( 1, 146)
(0x1715, 0x0000, 182, 216), # 180: p=0.124998 ( 9, 1)
(0x011a, 0x0000, 183, 185), # 181: p=0.005961 ( 1, 222)
(0x0fb7, 0x0000, 184, 214), # 182: p=0.085105 ( 14, 1)
(0x00ba, 0x0000, 69, 61), # 183: p=0.003925 ( 1, 338)
(0x0a67, 0x0000, 186, 212), # 184: p=0.056337 ( 22, 1)
(0x01eb, 0x0000, 59, 53), # 185: p=0.010386 ( 2, 222)
(0x06e7, 0x0000, 188, 210), # 186: p=0.037382 ( 34, 1)
(0x02e6, 0x0000, 55, 49), # 187: p=0.015695 ( 2, 146)
(0x0496, 0x0000, 190, 208), # 188: p=0.024844 ( 52, 1)
(0x045e, 0x0000, 51, 45), # 189: p=0.023648 ( 2, 96)
(0x030d, 0x0000, 192, 206), # 190: p=0.016529 ( 79, 1)
(0x0690, 0x0000, 47, 39), # 191: p=0.035533 ( 2, 63)
(0x0206, 0x0000, 194, 204), # 192: p=0.010959 ( 120, 1)
(0x09de, 0x0000, 41, 195), # 193: p=0.053434 ( 2, 41)
(0x0155, 0x0000, 196, 202), # 194: p=0.007220 ( 183, 1)
(0x0dc8, 0x0000, 37, 31), # 195: p=0.074626 ( 3, 41)
(0x00e1, 0x0000, 198, 200), # 196: p=0.004750 ( 279, 1)
(0x2b74, 0x0000, 199, 243), # 197: p=0.235291 ( 1, 4)
(0x0094, 0x0000, 72, 64), # 198: p=0.003132 ( 424, 1)
(0x201d, 0x0000, 201, 239), # 199: p=0.173910 ( 1, 6)
(0x0188, 0x0000, 62, 56), # 200: p=0.008284 ( 279, 2)
(0x1715, 0x0000, 203, 237), # 201: p=0.124998 ( 1, 9)
(0x0252, 0x0000, 58, 52), # 202: p=0.012567 ( 183, 2)
(0x0fb7, 0x0000, 205, 235), # 203: p=0.085105 ( 1, 14)
(0x0383, 0x0000, 54, 48), # 204: p=0.019021 ( 120, 2)
(0x0a67, 0x0000, 207, 233), # 205: p=0.056337 ( 1, 22)
(0x0547, 0x0000, 50, 44), # 206: p=0.028571 ( 79, 2)
(0x06e7, 0x0000, 209, 231), # 207: p=0.037382 ( 1, 34)
(0x07e2, 0x0000, 46, 38), # 208: p=0.042682 ( 52, 2)
(0x0496, 0x0000, 211, 229), # 209: p=0.024844 ( 1, 52)
(0x0bc0, 0x0000, 40, 34), # 210: p=0.063636 ( 34, 2)
(0x030d, 0x0000, 213, 227), # 211: p=0.016529 ( 1, 79)
(0x1178, 0x0000, 36, 28), # 212: p=0.094593 ( 22, 2)
(0x0206, 0x0000, 215, 225), # 213: p=0.010959 ( 1, 120)
(0x19da, 0x0000, 30, 22), # 214: p=0.139999 ( 14, 2)
(0x0155, 0x0000, 217, 223), # 215: p=0.007220 ( 1, 183)
(0x24ef, 0x0000, 26, 16), # 216: p=0.199998 ( 9, 2)
(0x00e1, 0x0000, 219, 221), # 217: p=0.004750 ( 1, 279)
(0x320e, 0x0000, 20, 220), # 218: p=0.269229 ( 6, 2)
(0x0094, 0x0000, 71, 63), # 219: p=0.003132 ( 1, 424)
(0x432a, 0x0000, 14, 8), # 220: p=0.344827 ( 6, 3)
(0x0188, 0x0000, 61, 55), # 221: p=0.008284 ( 2, 279)
(0x447d, 0x0000, 14, 224), # 222: p=0.349998 ( 4, 2)
(0x0252, 0x0000, 57, 51), # 223: p=0.012567 ( 2, 183)
(0x5ece, 0x0000, 8, 2), # 224: p=0.434782 ( 4, 3)
(0x0383, 0x0000, 53, 47), # 225: p=0.019021 ( 2, 120)
(0x8000, 0x0000, 228, 87), # 226: p=0.500000 ( 1, 1)
(0x0547, 0x0000, 49, 43), # 227: p=0.028571 ( 2, 79)
(0x481a, 0x0000, 230, 246), # 228: p=0.363634 ( 2, 1)
(0x07e2, 0x0000, 45, 37), # 229: p=0.042682 ( 2, 52)
(0x3579, 0x0000, 232, 244), # 230: p=0.285711 ( 3, 1)
(0x0bc0, 0x0000, 39, 33), # 231: p=0.063636 ( 2, 34)
(0x24ef, 0x0000, 234, 238), # 232: p=0.199997 ( 5, 1)
(0x1178, 0x0000, 35, 27), # 233: p=0.094593 ( 2, 22)
(0x1978, 0x0000, 138, 236), # 234: p=0.137929 ( 8, 1)
(0x19da, 0x0000, 29, 21), # 235: p=0.139999 ( 2, 14)
(0x2865, 0x0000, 24, 16), # 236: p=0.218748 ( 8, 2)
(0x24ef, 0x0000, 25, 15), # 237: p=0.199998 ( 2, 9)
(0x3987, 0x0000, 240, 8), # 238: p=0.304346 ( 5, 2)
(0x320e, 0x0000, 19, 241), # 239: p=0.269229 ( 2, 6)
(0x2c99, 0x0000, 22, 242), # 240: p=0.241378 ( 7, 2)
(0x432a, 0x0000, 13, 7), # 241: p=0.344827 ( 3, 6)
(0x3b5f, 0x0000, 16, 10), # 242: p=0.312499 ( 7, 3)
(0x447d, 0x0000, 13, 245), # 243: p=0.349998 ( 2, 4)
(0x5695, 0x0000, 10, 2), # 244: p=0.411764 ( 3, 2)
(0x5ece, 0x0000, 7, 1), # 245: p=0.434782 ( 3, 4)
(0x8000, 0x0000, 244, 83), # 246: p=0.500000 ( 2, 2)
(0x8000, 0x0000, 249, 250), # 247: p=0.500000 ( 1, 1)
(0x5695, 0x0000, 10, 2), # 248: p=0.411764 ( 3, 2)
(0x481a, 0x0000, 89, 143), # 249: p=0.363634 ( 1, 2)
(0x481a, 0x0000, 230, 246), # 250: p=0.363634 ( 2, 1)
(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
]
xmtf = (
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
)
# }}}
class BZZDecoder():
def __init__(self, infile, outfile):
self.instream = infile
self.inptr = 0
self.outf = outfile
self.ieof = False
self.bptr = None
self.xsize = None
self.outbuf = [0] * (MAXBLOCK * 1024)
self.byte = None
self.scount = 0
self.delay = 25
self.a = 0
self.code = 0
self.bufint = 0
self.ctx = [0] * 300
# table
self.p = [0] * 256
self.m = [0] * 256
self.up = [0] * 256
self.dn = [0] * 256
# machine independent ffz
self.ffzt = [0] * 256
# Create machine independent ffz table
for i in range(256):
j = i
while(j & 0x80):
self.ffzt[i] += 1
j <<= 1
# Initialize table
self.newtable(default_ztable)
# Codebit counter
# Read first 16 bits of code
if not self.read_byte():
self.byte = 0xff
self.code = (self.byte << 8)
if not self.read_byte():
self.byte = 0xff
self.code = self.code | self.byte
# Preload buffer
self.preload()
# Compute initial fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
def convert(self, sz):
if self.ieof:
return 0
copied = 0
while sz > 0 and not self.ieof:
# Decode if needed
if not self.xsize:
self.bptr = 0
if not self.decode(): # input block size set in decode
self.xsize = 1
self.ieof = True
self.xsize -= 1
# Compute remaining
remaining = min(sz, self.xsize)
# Transfer
if remaining > 0:
self.outf.extend(self.outbuf[self.bptr:self.bptr + remaining])
self.xsize -= remaining
self.bptr += remaining
sz -= remaining
copied += remaining
# offset += bytes; // for tell()
return copied
def preload(self):
while self.scount <= 24:
if not self.read_byte():
self.byte = 0xff
self.delay -= 1
if self.delay < 1:
raise BZZDecoderError("BiteStream EOF")
self.bufint = (self.bufint << 8) | self.byte
self.scount += 8
def newtable(self, table):
for i in range(256):
self.p[i] = table[i][0]
self.m[i] = table[i][1]
self.up[i] = table[i][2]
self.dn[i] = table[i][3]
def decode(self):
outbuf = self.outbuf
# Decode block size
self.xsize = self.decode_raw(24)
if not self.xsize:
return 0
if self.xsize > MAXBLOCK * 1024: # 4MB (4096 * 1024) is max block
raise BZZDecoderError("BiteStream.corrupt")
# Dec11ode Estimation Speed
fshift = 0
if self.zpcodec_decoder():
fshift += 1
if self.zpcodec_decoder():
fshift += 1
# Prepare Quasi MTF
mtf = list(xmtf) # unsigned chars
freq = [0] * FREQMAX
fadd = 4
# Decode
mtfno = 3
markerpos = -1
zc = lambda i: self.zpcodec_decode(self.ctx, i)
dc = lambda i, bits: self.decode_binary(self.ctx, i, bits)
for i in xrange(self.xsize):
ctxid = CTXIDS - 1
if ctxid > mtfno:
ctxid = mtfno
if zc(ctxid):
mtfno = 0
outbuf[i] = mtf[mtfno]
elif zc(ctxid + CTXIDS):
mtfno = 1
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS):
mtfno = 2 + dc(2*CTXIDS + 1, 1)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS+2):
mtfno = 4 + dc(2*CTXIDS+2 + 1, 2)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 6):
mtfno = 8 + dc(2*CTXIDS + 6 + 1, 3)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 14):
mtfno = 16 + dc(2*CTXIDS + 14 + 1, 4)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 30):
mtfno = 32 + dc(2*CTXIDS + 30 + 1, 5)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 62):
mtfno = 64 + dc(2*CTXIDS + 62 + 1, 6)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 126):
mtfno = 128 + dc(2*CTXIDS + 126 + 1, 7)
outbuf[i] = mtf[mtfno]
else:
mtfno = 256 # EOB
outbuf[i] = 0
markerpos = i
continue
# Rotate mtf according to empirical frequencies (new!)
# :rotate label
# Adjust frequencies for overflow
fadd = fadd + (fadd >> fshift)
if fadd > 0x10000000:
fadd >>= 24
freq[0] >>= 24
freq[1] >>= 24
freq[2] >>= 24
freq[3] >>= 24
for k in range(4, FREQMAX):
freq[k] = freq[k] >> 24
# Relocate new char according to new freq
fc = fadd
if mtfno < FREQMAX:
fc += freq[mtfno]
k = mtfno
while (k >= FREQMAX):
mtf[k] = mtf[k - 1]
k -= 1
while (k > 0 and fc >= freq[k - 1]):
mtf[k] = mtf[k - 1]
freq[k] = freq[k - 1]
k -= 1
mtf[k] = outbuf[i]
freq[k] = fc
# ///////////////////////////////
# //////// Reconstruct the string
if markerpos < 1 or markerpos >= self.xsize:
raise BZZDecoderError("BiteStream.corrupt")
# Allocate pointers
posn = [0] * self.xsize
# Prepare count buffer
count = [0] * 256
# Fill count buffer
for i in range(markerpos):
c = outbuf[i]
posn[i] = (c << 24) | (count[c] & 0xffffff)
count[c] += 1
for i in range(markerpos + 1, self.xsize):
c = outbuf[i]
posn[i] = (c << 24) | (count[c] & 0xffffff)
count[c] += 1
# Compute sorted char positions
last = 1
for i in range(256):
tmp = count[i]
count[i] = last
last += tmp
# Undo the sort transform
i = 0
last = self.xsize - 1
while last > 0:
n = posn[i]
c = (posn[i] >> 24)
last -= 1
outbuf[last] = c
i = count[c] + (n & 0xffffff)
# Free and check
if i != markerpos:
raise BZZDecoderError("BiteStream.corrupt")
return self.xsize
def decode_raw(self, bits):
n = 1
m = (1 << bits)
while n < m:
b = self.zpcodec_decoder()
n = (n << 1) | b
return n - m
def decode_binary(self, ctx, index, bits):
n = 1
m = (1 << bits)
while n < m:
b = self.zpcodec_decode(ctx, index + n - 1)
n = (n << 1) | b
return n - m
def zpcodec_decoder(self):
return self.decode_sub_simple(0, 0x8000 + (self.a >> 1))
def decode_sub_simple(self, mps, z):
# Test MPS/LPS
if z > self.code:
# LPS branch
z = 0x10000 - z
self.a += +z
self.code = self.code + z
# LPS renormalization
shift = self.ffz()
self.scount -= shift
self.a = self.a << shift
self.a &= 0xffff
self.code = (self.code << shift) | ((self.bufint >> self.scount) & ((1 << shift) - 1))
self.code &= 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
result = mps ^ 1
else:
# MPS renormalization
self.scount -= 1
self.a = (z << 1) & 0xffff
self.code = ((self.code << 1) | ((self.bufint >> self.scount) & 1))
self.code &= 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
result = mps
return result
def decode_sub(self, ctx, index, z):
# Save bit
bit = (ctx[index] & 1)
# Avoid interval reversion
d = 0x6000 + ((z + self.a) >> 2)
if z > d:
z = d
# Test MPS/LPS
if z > self.code:
# LPS branch
z = 0x10000 - z
self.a += +z
self.code = self.code + z
# LPS adaptation
ctx[index] = self.dn[ctx[index]]
# LPS renormalization
shift = self.ffz()
self.scount -= shift
self.a = (self.a << shift) & 0xffff
self.code = ((self.code << shift) | ((self.bufint >> self.scount) & ((1 << shift) - 1))) & 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
return bit ^ 1
else:
# MPS adaptation
if self.a >= self.m[ctx[index]]:
ctx[index] = self.up[ctx[index]]
# MPS renormalization
self.scount -= 1
self.a = z << 1 & 0xffff
self.code = ((self.code << 1) | ((self.bufint >> self.scount) & 1)) & 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
return bit
def zpcodec_decode(self, ctx, index):
z = self.a + self.p[ctx[index]]
if z <= self.fence:
self.a = z
res = (ctx[index] & 1)
else:
res = self.decode_sub(ctx, index, z)
return res
def read_byte(self):
try:
self.byte = self.instream[self.inptr]
self.inptr += 1
return True
except IndexError:
return False
def ffz(self):
x = self.a
if (x >= 0xff00):
return (self.ffzt[x & 0xff] + 8)
else:
return (self.ffzt[(x >> 8) & 0xff])
# for testing
def main():
import sys
from calibre.constants import plugins
raw = file(sys.argv[1], "rb").read()
d = plugins['bzzdec'][0]
print (d.decompress(raw))
if __name__ == "__main__":
main()
| jelly/calibre | src/calibre/ebooks/djvu/djvubzzdec.py | Python | gpl-3.0 | 32,959 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Deepin, Inc.
# 2011 Wang Yong
#
# Author: Wang Yong <lazycat.manatee@gmail.com>
# Maintainer: Wang Yong <lazycat.manatee@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from appItem import *
from constant import *
from draw import *
from lang import __, getDefaultLanguage
import appView
import gtk
import pango
import utils
class SearchItem(DownloadItem):
'''Application item.'''
MAX_CHARS = 50
VERSION_MAX_CHARS = 30
APP_LEFT_PADDING_X = 5
STAR_PADDING_X = 2
NORMAL_PADDING_X = 2
VOTE_PADDING_X = 3
VOTE_PADDING_Y = 1
LIKE_PADDING_X = 10
RATE_PADDING_X = 3
SIZE_LABEL_WIDTH = 60
def __init__(self, appInfo, switchStatus, downloadQueue,
entryDetailCallback, sendVoteCallback, index, getSelectIndex, setSelectIndex,
launchApplicationCallback):
'''Init for application item.'''
DownloadItem.__init__(self, appInfo, switchStatus, downloadQueue)
self.appInfo = appInfo
self.entryDetailCallback = entryDetailCallback
self.sendVoteCallback = sendVoteCallback
self.index = index
self.setSelectIndex = setSelectIndex
self.launchApplicationCallback = launchApplicationCallback
# Init.
self.itemBox = gtk.HBox()
self.itemEventBox = gtk.EventBox()
self.itemEventBox.connect("button-press-event", self.clickItem)
drawListItem(self.itemEventBox, index, getSelectIndex)
self.itemFrame = gtk.Alignment()
self.itemFrame.set(0.0, 0.5, 1.0, 1.0)
self.appBasicView = AppBasicView(self.appInfo, 200 + APP_BASIC_WIDTH_ADJUST, self.itemBox, self.entryDetailView)
# Widget that status will change.
self.installingProgressbar = None
self.installingFeedbackLabel = None
self.upgradingProgressbar = None
self.upgradingFeedbackLabel = None
# Connect components.
self.itemBox.pack_start(self.appBasicView.align, True, True, self.APP_LEFT_PADDING_X)
self.appAdditionBox = gtk.HBox()
self.appAdditionAlign = gtk.Alignment()
self.appAdditionAlign.set(1.0, 0.5, 0.0, 0.0)
self.appAdditionAlign.add(self.appAdditionBox)
self.itemBox.pack_start(self.appAdditionAlign, False, False)
self.initAdditionStatus()
self.itemEventBox.add(self.itemBox)
self.itemFrame.add(self.itemEventBox)
self.itemFrame.show_all()
def entryDetailView(self):
'''Entry detail view.'''
self.entryDetailCallback(PAGE_REPO, self.appInfo)
def clickItem(self, widget, event):
'''Click item.'''
if utils.isDoubleClick(event):
self.entryDetailView()
else:
self.setSelectIndex(self.index)
def initAdditionStatus(self):
'''Add addition status.'''
status = self.appInfo.status
if status in [APP_STATE_NORMAL, APP_STATE_UPGRADE, APP_STATE_INSTALLED]:
self.initNormalStatus()
elif status == APP_STATE_DOWNLOADING:
self.initDownloadingStatus(self.appAdditionBox)
elif status == APP_STATE_DOWNLOAD_PAUSE:
self.initDownloadPauseStatus(self.appAdditionBox)
elif status == APP_STATE_INSTALLING:
self.initInstallingStatus()
elif status == APP_STATE_UPGRADING:
self.initUpgradingStatus()
self.itemFrame.show_all()
def initNormalStatus(self):
'''Init normal status.'''
pkg = self.appInfo.pkg
# Clean right box first.
utils.containerRemoveAll(self.appAdditionBox)
# Add application vote information.
self.appVoteView = VoteView(
self.appInfo, PAGE_REPO,
self.sendVoteCallback)
self.appAdditionBox.pack_start(self.appVoteView.eventbox, False, False)
# Add application size.
size = utils.getPkgSize(pkg)
appSizeLabel = DynamicSimpleLabel(
self.appAdditionBox,
utils.formatFileSize(size),
appTheme.getDynamicColor("appSize"),
LABEL_FONT_SIZE,
)
appSize = appSizeLabel.getLabel()
appSize.set_size_request(self.SIZE_LABEL_WIDTH, -1)
appSize.set_alignment(1.0, 0.5)
self.appAdditionBox.pack_start(appSize, False, False, self.LIKE_PADDING_X)
# Add action button.
(actionButtonBox, actionButtonAlign) = createActionButton()
self.appAdditionBox.pack_start(actionButtonAlign, False, False)
if self.appInfo.status == APP_STATE_NORMAL:
(appButton, appButtonAlign) = newActionButton(
"install", 0.5, 0.5,
"cell", False, __("Action Install"), BUTTON_FONT_SIZE_SMALL, "buttonFont"
)
appButton.connect("button-release-event", lambda widget, event: self.switchToDownloading())
actionButtonBox.pack_start(appButtonAlign)
elif self.appInfo.status == APP_STATE_UPGRADE:
(appButton, appButtonAlign) = newActionButton(
"update", 0.5, 0.5,
"cell", False, __("Action Update"), BUTTON_FONT_SIZE_SMALL, "buttonFont"
)
appButton.connect("button-release-event", lambda widget, event: self.switchToDownloading())
actionButtonBox.pack_start(appButtonAlign)
else:
execPath = self.appInfo.execPath
if execPath:
(appButton, appButtonAlign) = newActionButton(
"update", 0.5, 0.5,
"cell", False, __("Action Startup"), BUTTON_FONT_SIZE_SMALL, "buttonFont"
)
appButton.connect("button-release-event", lambda widget, event: self.launchApplicationCallback(execPath))
actionButtonBox.pack_start(appButtonAlign)
else:
appInstalledDynamicLabel = DynamicSimpleLabel(
actionButtonBox,
__("Action Installed"),
appTheme.getDynamicColor("installed"),
LABEL_FONT_SIZE,
)
appInstalledLabel = appInstalledDynamicLabel.getLabel()
buttonImage = appTheme.getDynamicPixbuf("cell/update_hover.png").getPixbuf()
appInstalledLabel.set_size_request(buttonImage.get_width(), buttonImage.get_height())
actionButtonBox.pack_start(appInstalledLabel)
def updateVoteView(self, starLevel, commentNum):
'''Update vote view.'''
if self.appInfo.status in [APP_STATE_NORMAL, APP_STATE_UPGRADE, APP_STATE_INSTALLED] and self.appVoteView != None:
self.appVoteView.updateVote(starLevel, commentNum)
self.appBasicView.updateCommentNum(commentNum)
class SearchView(appView.AppView):
'''Search view.'''
def __init__(self, appNum, getListFunc, switchStatus, downloadQueue,
entryDetailCallback, sendVoteCallback, fetchVoteCallback,
launchApplicationCallback):
'''Init for search view.'''
appView.AppView.__init__(self, appNum, PAGE_REPO, True)
# Init.
self.getListFunc = getListFunc
self.switchStatus = switchStatus
self.downloadQueue = downloadQueue
self.entryDetailCallback = entryDetailCallback
self.sendVoteCallback = sendVoteCallback
self.fetchVoteCallback = fetchVoteCallback
self.launchApplicationCallback = launchApplicationCallback
self.itemDict = {}
self.show()
def updateSearch(self, appNum):
'''Update view.'''
self.appNum = appNum
self.calculateMaxPageIndex()
self.pageIndex = 1
self.show()
def createAppList(self, appList):
'''Create application list.'''
# Init.
itemPaddingY = 5
box = gtk.VBox()
for (index, appInfo) in enumerate(appList):
appItem = SearchItem(appInfo, self.switchStatus, self.downloadQueue,
self.entryDetailCallback,
self.sendVoteCallback,
index, self.getSelectItemIndex, self.setSelectItemIndex,
self.launchApplicationCallback)
box.pack_start(appItem.itemFrame, False, False)
self.itemDict[utils.getPkgName(appItem.appInfo.pkg)] = appItem
return box
| manateelazycat/deepin-software-center | src/searchView.py | Python | gpl-3.0 | 9,264 |
3#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import io
import re
init_py = io.open('pigshare/__init__.py').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_py))
metadata['doc'] = re.findall('"""(.+)"""', init_py)[0]
requirements = [
"argparse",
"setuptools",
"restkit",
"booby",
"simplejson",
"parinx",
"pyclist",
"argcomplete"
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pigshare',
version=metadata['version'],
description=metadata['doc'],
author=metadata['author'],
author_email=metadata['email'],
url=metadata['url'],
packages=[
'pigshare',
],
package_dir={'pigshare':
'pigshare'},
include_package_data=True,
install_requires=requirements,
license="GPLv3",
zip_safe=False,
keywords='pigshare figshare client rest api',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements,
entry_points={
'console_scripts': [
'pigshare = pigshare.pigshare:run'
],
}
)
| makkus/pigshare | setup.py | Python | gpl-3.0 | 1,523 |
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
# MIMICS THE requests API (http://docs.python-requests.org/en/latest/)
# DEMANDS data IS A JSON-SERIALIZABLE STRUCTURE
# WITH ADDED default_headers THAT CAN BE SET USING mo_logs.settings
# EG
# {"debug.constants":{
# "pyLibrary.env.http.default_headers":{"From":"klahnakoski@mozilla.com"}
# }}
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from copy import copy
from mmap import mmap
from numbers import Number
from tempfile import TemporaryFile
from requests import sessions, Response
import mo_json
from pyLibrary import convert
from mo_logs.exceptions import Except
from mo_logs import Log
from mo_dots import Data, coalesce, wrap, set_default, unwrap
from pyLibrary.env.big_data import safe_size, ibytes2ilines, icompressed2ibytes
from mo_math import Math
from jx_python import jx
from mo_threads import Thread, Lock
from mo_threads import Till
from mo_times.durations import Duration
DEBUG = False
FILE_SIZE_LIMIT = 100 * 1024 * 1024
MIN_READ_SIZE = 8 * 1024
ZIP_REQUEST = False
default_headers = Data() # TODO: MAKE THIS VARIABLE A SPECIAL TYPE OF EXPECTED MODULE PARAMETER SO IT COMPLAINS IF NOT SET
default_timeout = 600
_warning_sent = False
def request(method, url, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
if not default_headers and not _warning_sent:
_warning_sent = True
Log.warning(
"The pyLibrary.env.http module was meant to add extra "
"default headers to all requests, specifically the 'Referer' "
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` "
"function to set `pyLibrary.env.http.default_headers`"
)
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, zip=zip, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error("Tried {{num}} urls", num=len(url), cause=failures)
if b"session" in kwargs:
session = kwargs[b"session"]
del kwargs[b"session"]
else:
session = sessions.Session()
session.headers.update(default_headers)
if zip is None:
zip = ZIP_REQUEST
if isinstance(url, unicode):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode("ascii")
_to_ascii_dict(kwargs)
timeout = kwargs[b'timeout'] = coalesce(kwargs.get(b'timeout'), default_timeout)
if retry == None:
retry = Data(times=1, sleep=0)
elif isinstance(retry, Number):
retry = Data(times=retry, sleep=1)
else:
retry = wrap(retry)
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
set_default(retry, {"times": 1, "sleep": 0})
if b'json' in kwargs:
kwargs[b'data'] = convert.value2json(kwargs[b'json']).encode("utf8")
del kwargs[b'json']
try:
headers = kwargs[b"headers"] = unwrap(coalesce(wrap(kwargs)[b"headers"], {}))
set_default(headers, {b"accept-encoding": b"compress, gzip"})
if zip and len(coalesce(kwargs.get(b"data"))) > 1000:
compressed = convert.bytes2zip(kwargs[b"data"])
headers[b'content-encoding'] = b'gzip'
kwargs[b"data"] = compressed
_to_ascii_dict(headers)
else:
_to_ascii_dict(headers)
except Exception as e:
Log.error("Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
if DEBUG:
Log.note("http {{method}} to {{url}}", method=method, url=url)
return session.request(method=method, url=url, **kwargs)
except Exception as e:
errors.append(Except.wrap(e))
if " Read timed out." in errors[0]:
Log.error("Tried {{times}} times: Timeout failure (timeout was {{timeout}}", timeout=timeout, times=retry.times, cause=errors[0])
else:
Log.error("Tried {{times}} times: Request failure of {{url}}", url=url, times=retry.times, cause=errors[0])
def _to_ascii_dict(headers):
if headers is None:
return
for k, v in copy(headers).items():
if isinstance(k, unicode):
del headers[k]
if isinstance(v, unicode):
headers[k.encode("ascii")] = v.encode("ascii")
else:
headers[k.encode("ascii")] = v
elif isinstance(v, unicode):
headers[k] = v.encode("ascii")
def get(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'get', url, **kwargs))
def get_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
response = get(url, **kwargs)
c = response.all_content
return mo_json.json2value(convert.utf82unicode(c))
def options(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'options', url, **kwargs))
def head(url, **kwargs):
kwargs.setdefault(b'allow_redirects', False)
kwargs[b"stream"] = True
return HttpResponse(request(b'head', url, **kwargs))
def post(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'post', url, **kwargs))
def delete(url, **kwargs):
return HttpResponse(request(b'delete', url, **kwargs))
def post_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
if b"json" in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"json"]))
elif b'data' in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"data"]))
else:
Log.error("Expecting `json` parameter")
response = post(url, **kwargs)
c = response.content
try:
details = mo_json.json2value(convert.utf82unicode(c))
except Exception as e:
Log.error("Unexpected return value {{content}}", content=c, cause=e)
if response.status_code not in [200, 201]:
Log.error("Bad response", cause=Except.wrap(details))
return details
def put(url, **kwargs):
return HttpResponse(request(b'put', url, **kwargs))
def patch(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'patch', url, **kwargs))
def delete(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'delete', url, **kwargs))
class HttpResponse(Response):
def __new__(cls, resp):
resp.__class__ = HttpResponse
return resp
def __init__(self, resp):
pass
self._cached_content = None
@property
def all_content(self):
# response.content WILL LEAK MEMORY (?BECAUSE OF PYPY"S POOR HANDLING OF GENERATORS?)
# THE TIGHT, SIMPLE, LOOP TO FILL blocks PREVENTS THAT LEAK
if self._content is not False:
self._cached_content = self._content
elif self._cached_content is None:
def read(size):
if self.raw._fp.fp is not None:
return self.raw.read(amt=size, decode_content=True)
else:
self.close()
return None
self._cached_content = safe_size(Data(read=read))
if hasattr(self._cached_content, "read"):
self._cached_content.seek(0)
return self._cached_content
@property
def all_lines(self):
return self.get_all_lines()
def get_all_lines(self, encoding="utf8", flexible=False):
try:
iterator = self.raw.stream(4096, decode_content=False)
if self.headers.get('content-encoding') == 'gzip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.headers.get('content-type') == 'application/zip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.url.endswith(".gz"):
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
else:
return ibytes2ilines(iterator, encoding=encoding, flexible=flexible, closer=self.close)
except Exception as e:
Log.error("Can not read content", cause=e)
class Generator_usingStream(object):
"""
A BYTE GENERATOR USING A STREAM, AND BUFFERING IT FOR RE-PLAY
"""
def __init__(self, stream, length, _shared=None):
"""
:param stream: THE STREAM WE WILL GET THE BYTES FROM
:param length: THE MAX NUMBER OF BYTES WE ARE EXPECTING
:param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
:return:
"""
self.position = 0
file_ = TemporaryFile()
if not _shared:
self.shared = Data(
length=length,
locker=Lock(),
stream=stream,
done_read=0,
file=file_,
buffer=mmap(file_.fileno(), length)
)
else:
self.shared = _shared
self.shared.ref_count += 1
def __iter__(self):
return Generator_usingStream(None, self.shared.length, self.shared)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def next(self):
if self.position >= self.shared.length:
raise StopIteration
end = min(self.position + MIN_READ_SIZE, self.shared.length)
s = self.shared
with s.locker:
while end > s.done_read:
data = s.stream.read(MIN_READ_SIZE)
s.buffer.write(data)
s.done_read += MIN_READ_SIZE
if s.done_read >= s.length:
s.done_read = s.length
s.stream.close()
try:
return s.buffer[self.position:end]
finally:
self.position = end
def close(self):
with self.shared.locker:
if self.shared:
s, self.shared = self.shared, None
s.ref_count -= 1
if s.ref_count==0:
try:
s.stream.close()
except Exception:
pass
try:
s.buffer.close()
except Exception:
pass
try:
s.file.close()
except Exception:
pass
def __del__(self):
self.close()
| maggienj/ActiveData | pyLibrary/env/http.py | Python | mpl-2.0 | 11,843 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import datetime
import hashlib
import json
import time
import urlparse
from contextlib import contextmanager
import boto.exception
import mock
import moto
import pytz
from nose.tools import eq_
from relengapi.blueprints import tooltool
from relengapi.blueprints.tooltool import tables
from relengapi.blueprints.tooltool import util
from relengapi.lib import auth
from relengapi.lib import time as relengapi_time
from relengapi.lib.permissions import p
from relengapi.lib.testing.context import TestContext
def userperms(perms, email='me'):
u = auth.HumanUser(email)
u._permissions = set(perms)
return u
cfg = {
'AWS': {
'access_key_id': 'aa',
'secret_access_key': 'ss',
},
'TOOLTOOL_REGIONS': {
'us-east-1': 'tt-use1',
'us-west-2': 'tt-usw2',
}
}
test_context = TestContext(config=cfg, databases=['relengapi'],
user=userperms([p.tooltool.download.public,
p.tooltool.upload.public]))
allow_anon_cfg = cfg.copy()
allow_anon_cfg['TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD'] = True
ONE = '1\n'
ONE_DIGEST = hashlib.sha512(ONE).hexdigest()
TWO = '22\n'
TWO_DIGEST = hashlib.sha512(TWO).hexdigest()
NOW = 1425592922
class NoEmailUser(auth.BaseUser):
type = 'no-email'
def get_id(self):
return 'no-email:sorry'
def get_permissions(self):
return [p.tooltool.upload.public]
def mkbatch(message="a batch"):
return {
'message': message,
'files': {
'one': {
'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST,
'visibility': 'public',
},
},
}
def upload_batch(client, batch, region=None):
region_arg = '?region={}'.format(region) if region else ''
return client.post_json('/tooltool/upload' + region_arg, data=batch)
def add_file_to_db(app, content, regions=['us-east-1'],
pending_regions=[], visibility='public'):
with app.app_context():
session = app.db.session('relengapi')
file_row = tables.File(size=len(content),
visibility=visibility,
sha512=hashlib.sha512(content).hexdigest())
session.add(file_row)
session.commit()
for region in regions:
session.add(tables.FileInstance(
file_id=file_row.id, region=region))
for region in pending_regions:
session.add(tables.PendingUpload(
file=file_row, region=region,
expires=relengapi_time.now() + datetime.timedelta(seconds=60)))
session.commit()
return file_row
def add_batch_to_db(app, author, message, files):
with app.app_context():
session = app.db.session('relengapi')
batch = tables.Batch(author=author, message=message,
uploaded=relengapi_time.now())
session.add(batch)
for filename, file in files.iteritems():
session.add(tables.BatchFile(filename=filename, batch=batch, file=file))
session.commit()
return batch
def add_file_to_s3(app, content, region='us-east-1'):
with app.app_context():
conn = app.aws.connect_to('s3', region)
bucket_name = cfg['TOOLTOOL_REGIONS'][region]
try:
conn.head_bucket(bucket_name)
except boto.exception.S3ResponseError:
conn.create_bucket(bucket_name)
bucket = conn.get_bucket(bucket_name)
key_name = util.keyname(hashlib.sha512(content).hexdigest())
key = bucket.new_key(key_name)
key.set_contents_from_string(content)
@contextmanager
def set_time(now=NOW):
with mock.patch('time.time') as fake_time, \
mock.patch('relengapi.lib.time.now') as fake_now:
fake_time.return_value = now
fake_now.return_value = datetime.datetime.fromtimestamp(now, pytz.UTC)
yield
@contextmanager
def not_so_random_choice():
with mock.patch('random.choice') as choice:
choice.side_effect = lambda seq: sorted(seq)[0]
yield
def assert_signed_302(resp, digest, method='GET', region=None,
expires_in=60, bucket=None):
eq_(resp.status_code, 302)
url = resp.headers['Location']
assert_signed_url(url, digest, method=method, region=region,
expires_in=expires_in, bucket=bucket)
def assert_signed_url(url, digest, method='GET', region=None,
expires_in=60, bucket=None):
region = region or 'us-east-1'
bucket = bucket or cfg['TOOLTOOL_REGIONS'][region]
if region == 'us-east-1':
host = '{}.s3.amazonaws.com'.format(bucket)
else:
host = '{}.s3-{}.amazonaws.com'.format(bucket, region)
url = urlparse.urlparse(url)
eq_(url.scheme, 'https')
eq_(url.netloc, host)
eq_(url.path, '/' + util.keyname(digest))
query = urlparse.parse_qs(url.query)
assert 'Signature' in query
# sadly, headers are not represented in the URL
eq_(query['AWSAccessKeyId'][0], 'aa')
eq_(int(query['Expires'][0]), time.time() + expires_in)
def assert_batch_response(resp, author='me', message='a batch',
files={}):
eq_(resp.status_code, 200, resp.data)
result = json.loads(resp.data)['result']
eq_(result['author'], author)
# TODO: eq_(result[
eq_(result['message'], message)
eq_(set(result['files']), set(files))
for name, file in files.iteritems():
for k, v in file.iteritems():
eq_(result['files'][name][k], v,
"result['files'][{}][{}] {} != {}".format(
name, k, result['files'][name][k], v))
return result
def assert_batch_row(app, id, author='me', message='a batch', files=[]):
with app.app_context():
tbl = tables.Batch
batch_row = tbl.query.filter(tbl.id == id).first()
eq_(batch_row.author, author)
eq_(batch_row.message, message)
got_files = [(n, f.size, f.sha512, sorted(i.region for i in f.instances))
for n, f in batch_row.files.iteritems()]
eq_(sorted(got_files), sorted(files))
def assert_pending_upload(app, digest, region, expires=None):
with app.app_context():
tbl = tables.File
file = tbl.query.filter(tbl.sha512 == digest).first()
regions = [pu.region for pu in file.pending_uploads]
assert region in regions, regions
if expires:
eq_(pu.expires, expires)
def assert_no_upload_rows(app):
with app.app_context():
eq_(tables.Batch.query.all(), [])
eq_(tables.PendingUpload.query.all(), [])
def assert_file_response(resp, content, visibility='public', instances=['us-east-1']):
eq_(resp.status_code, 200)
exp = {
"algorithm": "sha512",
"digest": hashlib.sha512(content).hexdigest(),
"size": len(content),
"visibility": visibility,
'instances': instances,
"has_instances": any(instances),
}
eq_(json.loads(resp.data)['result'], exp, resp.data)
def do_patch(client, algo, digest, ops):
return client.open(method='PATCH',
path='/tooltool/file/sha512/{}'.format(digest),
headers=[('Content-Type', 'application/json')],
data=json.dumps(ops))
# tests
def test_is_valid_sha512():
"""is_valid_sha512 recgnizes valid digests and rejects others"""
assert tooltool.is_valid_sha512(ONE_DIGEST)
assert tooltool.is_valid_sha512(TWO_DIGEST)
assert not tooltool.is_valid_sha512(ONE_DIGEST[-1])
assert not tooltool.is_valid_sha512(ONE_DIGEST + 'a')
assert not tooltool.is_valid_sha512('a' + ONE_DIGEST)
assert not tooltool.is_valid_sha512('j' * 128)
@test_context
def test_ui(client):
"""The root of the blueprint renders an angular HTML page"""
assert 'angular' in client.get('/tooltool/').data
@moto.mock_s3
@test_context
def test_upload_batch_empty_message(app, client):
"""A POST to /upload with an empty message is rejected."""
batch = mkbatch()
batch['message'] = ''
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_author(app, client):
"""A POST to /upload with an author is rejected."""
batch = mkbatch()
batch['author'] = 'me' # matches authentication
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context.specialize(user=NoEmailUser())
def test_upload_batch_no_user(app, client):
"""A POST to /upload with non-user-associated authentication succeeds,
using the string form of the token as author"""
batch = mkbatch()
resp = upload_batch(client, batch)
eq_(resp.status_code, 200)
assert_batch_response(resp, author='no-email:sorry', files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
@moto.mock_s3
@test_context
def test_upload_batch_empty_files(app, client):
"""A POST to /upload with no files is rejected."""
batch = mkbatch()
batch['files'] = {}
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_bad_algo(app, client):
"""A POST to /upload with an algorithm that is not sha512 is rejected."""
batch = mkbatch()
batch['files']['one']['algorithm'] = 'md4'
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_bad_digest(app, client):
"""A POST to /upload with a bad sha512 digest is rejected."""
batch = mkbatch()
batch['files']['one']['digest'] = 'x' * 128
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_bad_size(app, client):
"""A POST to /upload with a file with the same digest and a different length
is rejected"""
batch = mkbatch()
batch['files']['one']['size'] *= 2 # that ain't right!
add_file_to_db(app, ONE)
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context.specialize(user=userperms([]))
def test_upload_batch_no_permissions(app, client):
"""A POST to /upload of a public file without permission to upload fails
with 403."""
batch = mkbatch()
add_file_to_db(app, ONE)
resp = upload_batch(client, batch)
eq_(resp.status_code, 403, resp.data)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_mixed_visibility_no_permissions(app, client):
"""A POST to /upload of public and internal files fails with 403 if the
user only has permission to upload public files."""
batch = mkbatch()
batch['files']['two'] = {
'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST,
'visibility': 'internal',
}
add_file_to_db(app, ONE)
resp = upload_batch(client, batch)
eq_(resp.status_code, 403, resp.data)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_no_visibility(app, client):
"""If no visibility is supplied for a file in a batch, the request is
invalid (400)"""
# note that it's WSME that enforces this validity
batch = mkbatch()
del batch['files']['one']['visibility']
resp = upload_batch(client, batch)
eq_(resp.status_code, 400, resp.data)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_success_fresh(client, app):
"""A POST to /upload with a good batch succeeds, returns signed URLs expiring
in one hour, and inserts the new batch into the DB with links to files, but
no instances, and inserts a pending upload row."""
batch = mkbatch()
with set_time():
with not_so_random_choice():
resp = upload_batch(client, batch)
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
assert_signed_url(result['files']['one']['put_url'], ONE_DIGEST,
method='PUT', expires_in=60)
assert_batch_row(
app, result['id'], files=[('one', len(ONE), ONE_DIGEST, [])])
assert_pending_upload(app, ONE_DIGEST, 'us-east-1')
@moto.mock_s3
@test_context
def test_upload_batch_success_existing_pending_upload(client, app):
"""A successful POST to /upload updates the 'expires' column of any relevant
pending uploads."""
with set_time(NOW - 30):
add_file_to_db(app, ONE, regions=[], pending_regions=['us-east-1'])
batch = mkbatch()
with set_time():
with not_so_random_choice():
resp = upload_batch(client, batch)
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
assert_signed_url(result['files']['one']['put_url'], ONE_DIGEST,
method='PUT', expires_in=60)
assert_pending_upload(
app, ONE_DIGEST, 'us-east-1',
expires=relengapi_time.now() + datetime.timedelta(seconds=60))
assert_batch_row(
app, result['id'], files=[('one', len(ONE), ONE_DIGEST, [])])
@moto.mock_s3
@test_context
def test_upload_batch_success_no_instances(client, app):
"""A POST to /upload with a batch containing a file that already exists, but
has no instances, succeeds, returns signed URLs expiring in one hour,
inserts the new batch into the DB with links to files, but no instances,
and inserts a pending upload row. This could occur when, for example,
re-trying a failed upload."""
batch = mkbatch()
add_file_to_db(app, ONE, regions=[])
with set_time():
with not_so_random_choice():
resp = upload_batch(client, batch)
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
assert_signed_url(result['files']['one']['put_url'], ONE_DIGEST,
method='PUT', expires_in=60)
assert_batch_row(
app, result['id'], files=[('one', len(ONE), ONE_DIGEST, [])])
assert_pending_upload(app, ONE_DIGEST, 'us-east-1')
@moto.mock_s3
@test_context
def test_upload_batch_success_some_existing_files(client, app):
"""A POST to /upload with a good batch containing some files already present
succeeds, returns signed URLs expiring in one hour, and inserts the new
batch into the DB with links to files, but no instances. Also, the
``region`` query parameter selects a preferred region."""
batch = mkbatch()
batch['files']['two'] = {
'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST,
'visibility': 'public',
}
# make sure ONE is already in the DB with at least once instance
add_file_to_db(app, ONE, regions=['us-east-1'])
with set_time():
resp = upload_batch(client, batch, region='us-west-2')
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST},
'two': {'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST},
})
# no put_url for the existing file
assert 'put_url' not in result['files']['one']
assert_signed_url(result['files']['two']['put_url'], TWO_DIGEST,
method='PUT', expires_in=60, region='us-west-2')
assert_batch_row(app, result['id'],
files=[
('one', len(ONE), ONE_DIGEST, ['us-east-1']),
('two', len(TWO), TWO_DIGEST, []),
])
assert_pending_upload(app, TWO_DIGEST, 'us-west-2')
@test_context
def test_upload_change_visibility(client, app):
"""Uploading a file that already exists with a different visibility level
fails with 400, even if there are no instances."""
batch = mkbatch()
batch['files']['one']['visibility'] = 'public'
add_file_to_db(app, ONE, regions=[], visibility='internal')
with set_time():
resp = upload_batch(client, batch, region='us-west-2')
eq_(resp.status_code, 400, resp.data)
assert_no_upload_rows(app)
@test_context
def test_upload_complete(client, app):
"""GET /upload/complete/<digest> when the pending upload has expired causes
a delayed call to check_file_pending_uploads and returns 202"""
with mock.patch('relengapi.blueprints.tooltool.grooming.check_file_pending_uploads') as cfpu:
with set_time(NOW - tooltool.UPLOAD_EXPIRES_IN - 1):
add_file_to_db(app, ONE, regions=[], pending_regions=['us-east-1'])
with set_time(NOW):
resp = client.get('/tooltool/upload/complete/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 202, resp.data)
cfpu.delay.assert_called_with(ONE_DIGEST)
@test_context
def test_upload_complete_not_expired(client, app):
"""GET /upload/complete/<digest> when the pending upload has not expired returns
409 with a header giving the time until expiration."""
with mock.patch('relengapi.blueprints.tooltool.grooming.check_file_pending_uploads') as cfpu:
with set_time(NOW - tooltool.UPLOAD_EXPIRES_IN + 5):
add_file_to_db(app, ONE, regions=[], pending_regions=['us-east-1'])
with set_time(NOW):
resp = client.get('/tooltool/upload/complete/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 409, resp.data)
eq_(resp.headers.get('x-retry-after'), '6') # 5 seconds + 1
eq_(cfpu.delay.mock_calls, [])
@test_context
def test_upload_complete_bad_digest(client, app):
"""GET /upload/complete/<digest> with a bad digest returns 400"""
with mock.patch('relengapi.blueprints.tooltool.grooming.check_file_pending_uploads') as cfpu:
resp = client.get('/tooltool/upload/complete/sha512/xyz')
eq_(resp.status_code, 400, resp.data)
cfpu.delay.assert_has_calls([])
@moto.mock_s3
@test_context
def test_download_file_no_such(app, client):
"""Getting /sha512/<digest> for a file that does not exist returns 404"""
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context
def test_download_file_invalid_digest(app, client):
"""Getting /sha512/<digest> for an invalid digest returns 400"""
resp = client.get('/tooltool/sha512/abcd')
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context
def test_download_file_no_instances(app, client):
"""Getting /sha512/<digest> for a file that exists but has no instances
returns 404"""
add_file_to_db(app, ONE, regions=[])
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context
def test_download_file_no_permission(app, client):
"""Getting /sha512/<digest> for a file with a visibility the user doesn't
have permission for returns 404."""
add_file_to_db(app, ONE, visibility='internal')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context
def test_download_file_exists(app, client):
"""Getting /sha512/<digest> for an exisitng file returns a 302 redirect to
a signed URL in a region where it exists."""
add_file_to_db(app, ONE, regions=['us-west-2', 'us-east-1'])
with set_time():
with not_so_random_choice():
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
assert_signed_302(resp, ONE_DIGEST, region='us-east-1')
@moto.mock_s3
@test_context.specialize(user=None)
def test_download_file_anonymous_forbidden(app, client):
"""Anonymously downloading a public file is forbidden if
TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD is not set"""
add_file_to_db(app, ONE, regions=['us-west-2'], visibility='public')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context.specialize(user=None, config=allow_anon_cfg)
def test_download_file_anonymous_nonpublic_forbidden(app, client):
"""Anonymously downloading an i nternal file is forbidden even if
TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD is set"""
add_file_to_db(app, ONE, regions=['us-west-2'], visibility='internal')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context.specialize(user=None, config=allow_anon_cfg)
def test_download_file_anonymous_allowed(app, client):
"""Anonymously downloading a public file is allowed if
TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD is set"""
add_file_to_db(app, ONE, regions=['us-west-2'], visibility='public')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 302)
@moto.mock_s3
@test_context
def test_download_file_exists_not_in_preferred_region(app, client):
"""Getting /sha512/<digest>?region=.. for an exisitng file that does not
exist in the requested region returns a signed URL for a region where the
file does exist."""
add_file_to_db(app, ONE, regions=['us-west-2'])
with set_time():
resp = client.get(
'/tooltool/sha512/{}?region=us-east-1'.format(ONE_DIGEST))
assert_signed_302(resp, ONE_DIGEST, region='us-west-2')
@moto.mock_s3
@test_context
def test_download_file_exists_region_choice(app, client):
"""Getting /sha512/<digest> for an exisitng file returns a 302 redirect to
a signed URL in the region where it exists."""
add_file_to_db(app, ONE, regions=['us-west-2', 'us-east-1'])
with set_time():
resp = client.get(
'/tooltool/sha512/{}?region=us-west-2'.format(ONE_DIGEST))
assert_signed_302(resp, ONE_DIGEST, region='us-west-2')
@moto.mock_s3
@test_context
def test_search_batches(app, client):
with set_time():
f1 = add_file_to_db(app, ONE)
f1j = {
"algorithm": "sha512",
"digest": ONE_DIGEST,
"size": len(ONE),
"visibility": "public",
"has_instances": True,
}
f2 = add_file_to_db(app, TWO)
f2j = {
"algorithm": "sha512",
"digest": TWO_DIGEST,
"size": len(TWO),
"visibility": "public",
"has_instances": True,
}
add_batch_to_db(
app, 'me@me.com', 'first batch', {'one': f1})
b1j = {
"author": "me@me.com",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {"one": f1j},
"id": 1,
"message": "first batch"
}
add_batch_to_db(
app, 'me@me.com', 'second batch', {'two': f2})
b2j = {
"author": "me@me.com",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {"two": f2j},
"id": 2,
"message": "second batch"
}
add_batch_to_db(
app, 'you@you.com', 'third batch', {'1': f1, '2': f2})
b3j = {
"author": "you@you.com",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {"1": f1j, "2": f2j},
"id": 3,
"message": "third batch"
}
for q, exp_batches in [
('me', [b1j, b2j]),
('ou@y', [b3j]),
('econd batc', [b2j]),
('', [b1j, b2j, b3j]),
]:
resp = client.get('/tooltool/upload?q=' + q)
eq_(resp.status_code, 200, resp.data)
eq_(sorted(json.loads(resp.data)['result']), sorted(exp_batches),
"got: {}\nexp: {}".format(resp.data, exp_batches))
@moto.mock_s3
@test_context
def test_get_batch_not_found(client):
resp = client.get('/tooltool/upload/99')
eq_(resp.status_code, 404, resp.data)
@moto.mock_s3
@test_context
def test_get_batch_found(client):
batch = mkbatch()
batch['files']['two'] = {
'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST,
'visibility': 'public',
}
with set_time():
resp = upload_batch(client, batch)
eq_(resp.status_code, 200, resp.data)
resp = client.get('/tooltool/upload/1')
eq_(resp.status_code, 200, resp.data)
eq_(json.loads(resp.data)['result'], {
"author": "me",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {
"one": {
"algorithm": "sha512",
"digest": ONE_DIGEST,
"size": len(ONE),
"visibility": "public",
"has_instances": False,
},
"two": {
"algorithm": "sha512",
"digest": TWO_DIGEST,
"size": len(TWO),
"visibility": "public",
"has_instances": False,
}
},
"id": 1,
"message": "a batch"
}, resp.data)
@test_context
def test_get_files(app, client):
"""GETs to /file?q=.. return appropriately filtered files."""
f1 = add_file_to_db(app, ONE)
f1j = {
"algorithm": "sha512",
"digest": ONE_DIGEST,
"size": len(ONE),
"visibility": "public",
"has_instances": True,
}
f2 = add_file_to_db(app, TWO)
f2j = {
"algorithm": "sha512",
"digest": TWO_DIGEST,
"size": len(TWO),
"visibility": "public",
"has_instances": True,
}
add_batch_to_db(
app, 'me@me.com', 'first batch', {'one': f1})
add_batch_to_db(
app, 'me@me.com', 'second batch', {'two': f2})
add_batch_to_db(
app, 'you@you.com', 'third batch', {'1': f1, '2': f2})
for q, exp_files in [
('one', [f1j]),
('2', [f2j]),
(ONE_DIGEST[:8], [f1j]),
(ONE_DIGEST[10:20], []), # digests are prefix-only
('', [f1j, f2j]),
]:
resp = client.get('/tooltool/file?q=' + q)
eq_(resp.status_code, 200)
eq_(sorted(json.loads(resp.data)['result']), sorted(exp_files))
@test_context
def test_get_file_bad_algo(client):
"""A GET to /file/<algo>/<digest> with an unknown algorithm fails with 404"""
eq_(client.get('/tooltool/file/md4/abcd').status_code, 404)
@test_context
def test_get_file_not_found(client):
"""A GET to /file/sha512/<digest> with an unknown digest fails with 404"""
eq_(client.get(
'/tooltool/file/sha512/{}'.format(ONE_DIGEST)).status_code, 404)
@test_context
def test_get_file_success(app, client):
"""A GET to /file/sha512/<digest> with an known digest returns the file"""
add_file_to_db(app, ONE)
resp = client.get('/tooltool/file/sha512/{}'.format(ONE_DIGEST))
assert_file_response(resp, ONE)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_no_such(app, client):
"""A PATCH to /file/<a>/<d> that doesn't exist returns 404."""
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_bad_algo(app, client):
"""A PATCH to /file/<a>/<d> with a bad algorithm returns 404."""
resp = do_patch(client, 'md3', ONE_DIGEST, [{'op': 'delete_instances'}])
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_no_op(app, client):
"""A PATCH to /file/<a>/<d> with change containing no 'op' returns 400."""
add_file_to_db(app, ONE)
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'pop': 'snap'}])
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_bad_op(app, client):
"""A PATCH to /file/<a>/<d> with change containing a bad 'op' returns 400."""
add_file_to_db(app, ONE)
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'hop'}])
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context
def test_patch_no_perms(app, client):
"""A PATCH to /file/<a>/<d> without tooltool.manage fails with 403"""
add_file_to_db(app, ONE, regions=['us-east-1'])
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_delete_instances_success_no_instances(app, client):
"""A PATCH with op=delete_instances succeeds when there are no instances."""
add_file_to_db(app, ONE, regions=[])
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
assert_file_response(resp, ONE, instances=[])
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_delete_instances_success(app, client):
"""A PATCH with op=delete_instances deletes its instances."""
add_file_to_db(app, ONE, regions=['us-east-1'])
add_file_to_s3(app, ONE, region='us-east-1')
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
assert_file_response(resp, ONE, instances=[])
with app.app_context():
# ensure instances are gone from the DB
f = tables.File.query.first()
eq_(f.instances, [])
# and from S3
conn = app.aws.connect_to('s3', 'us-east-1')
key = conn.get_bucket(
'tt-use1').get_key(util.keyname(ONE_DIGEST))
assert not key, "key still exists"
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_set_visibility_invalid_vis(app, client):
"""A PATCH with op=set_visibility and an invalid visibility fails."""
add_file_to_db(app, ONE, regions=[])
resp = do_patch(client, 'sha512', ONE_DIGEST,
[{'op': 'set_visibility', 'visibility': '5-eyes'}])
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_set_visibility_success(app, client):
"""A PATCH with op=set_visibility updates the file's visibility."""
add_file_to_db(app, ONE, visibility='public')
resp = do_patch(client, 'sha512', ONE_DIGEST,
[{'op': 'set_visibility', 'visibility': 'internal'}])
assert_file_response(resp, ONE, visibility='internal')
with app.app_context():
f = tables.File.query.first()
eq_(f.visibility, 'internal')
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_set_visibility_success_no_change(app, client):
"""A PATCH with op=set_visibility with the existing visibility succeeds."""
add_file_to_db(app, ONE, visibility='internal')
resp = do_patch(client, 'sha512', ONE_DIGEST,
[{'op': 'set_visibility', 'visibility': 'internal'}])
assert_file_response(resp, ONE, visibility='internal')
with app.app_context():
f = tables.File.query.first()
eq_(f.visibility, 'internal')
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_multi_op_patch(app, client):
"""A PATCH with multiple ops performs all of them."""
add_file_to_db(
app, ONE, visibility='internal', regions=['us-east-1', 'us-west-2'])
add_file_to_s3(app, ONE, region='us-east-1')
add_file_to_s3(app, ONE, region='us-west-2')
resp = do_patch(client, 'sha512', ONE_DIGEST, [
{'op': 'set_visibility', 'visibility': 'public'},
{'op': 'delete_instances'},
])
assert_file_response(resp, ONE, visibility='public', instances=[])
with app.app_context():
f = tables.File.query.first()
eq_(f.visibility, 'public')
eq_(f.instances, [])
| hwine/build-relengapi | relengapi/blueprints/tooltool/test_tooltool.py | Python | mpl-2.0 | 32,335 |
from . import test_get_weight
| akretion/delivery-carrier | base_delivery_carrier_label/tests/__init__.py | Python | agpl-3.0 | 30 |
# -*- coding: utf-8 -*-
# Copyright 2017 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from __future__ import division, print_function, unicode_literals
TIPO_COBRANCA = (
('0', u'Carteira'),
('1', u'Cheque'),
('2', u'CNAB'),
)
TIPO_COBRANCA_SPED = (
('0', u'Duplicata'),
('1', u'Cheque'),
('2', u'Promissória'),
('3', u'Recibo'),
)
| odoo-brazil/l10n-brazil-wip | l10n_br_financial/constantes.py | Python | agpl-3.0 | 388 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
import operator
import string
import random
import factory.fuzzy
from base.models.enums.quadrimesters import DerogationQuadrimester
from base.tests.factories.utils.fuzzy import FuzzyBoolean
from program_management.tests.factories.element import ElementGroupYearFactory, ElementLearningUnitYearFactory
def _generate_block_value():
"""Generate a random string composed of digit between 1 and 6 included.
Each digit can be represented at most once in the string and they are sorted from smallest to greatest.
Ex: "", "156", "2", "456" and so on
"""
population = list(range(1, 7))
k = random.randint(0, len(population))
sample = random.sample(population, k)
sample.sort()
return int("".join([str(element) for element in sample])) if sample else None
class GroupElementYearFactory(factory.django.DjangoModelFactory):
class Meta:
model = "base.GroupElementYear"
django_get_or_create = ('parent_element', 'child_element')
external_id = factory.fuzzy.FuzzyText(length=10, chars=string.digits)
changed = factory.fuzzy.FuzzyNaiveDateTime(datetime.datetime(2016, 1, 1), datetime.datetime(2017, 3, 1))
parent_element = factory.SubFactory(ElementGroupYearFactory)
child_element = factory.SubFactory(ElementGroupYearFactory)
relative_credits = factory.fuzzy.FuzzyInteger(0, 10)
is_mandatory = FuzzyBoolean()
link_type = None
order = None
block = factory.LazyFunction(_generate_block_value)
class GroupElementYearChildLeafFactory(GroupElementYearFactory):
child_element = factory.SubFactory(ElementLearningUnitYearFactory)
| uclouvain/OSIS-Louvain | base/tests/factories/group_element_year.py | Python | agpl-3.0 | 2,912 |
"""
Defines the URL routes for this app.
"""
from django.conf import settings
from django.conf.urls import patterns, url
from ..profile_images.views import ProfileImageView
from .accounts.views import AccountDeactivationView, AccountViewSet
from .preferences.views import PreferencesView, PreferencesDetailView
from .verification_api.views import PhotoVerificationStatusView
ME = AccountViewSet.as_view({
'get': 'get',
})
ACCOUNT_LIST = AccountViewSet.as_view({
'get': 'list',
})
ACCOUNT_DETAIL = AccountViewSet.as_view({
'get': 'retrieve',
'patch': 'partial_update',
})
urlpatterns = patterns(
'',
url(r'^v1/me$', ME, name='own_username_api'),
url(r'^v1/accounts/{}$'.format(settings.USERNAME_PATTERN), ACCOUNT_DETAIL, name='accounts_api'),
url(r'^v1/accounts$', ACCOUNT_LIST, name='accounts_detail_api'),
url(
r'^v1/accounts/{}/image$'.format(settings.USERNAME_PATTERN),
ProfileImageView.as_view(),
name='accounts_profile_image_api'
),
url(
r'^v1/accounts/{}/deactivate/$'.format(settings.USERNAME_PATTERN),
AccountDeactivationView.as_view(),
name='accounts_deactivation'
),
url(
r'^v1/accounts/{}/verification_status/$'.format(settings.USERNAME_PATTERN),
PhotoVerificationStatusView.as_view(),
name='verification_status'
),
url(
r'^v1/preferences/{}$'.format(settings.USERNAME_PATTERN),
PreferencesView.as_view(),
name='preferences_api'
),
url(
r'^v1/preferences/{}/(?P<preference_key>[a-zA-Z0-9_]+)$'.format(settings.USERNAME_PATTERN),
PreferencesDetailView.as_view(),
name='preferences_detail_api'
),
)
| prarthitm/edxplatform | openedx/core/djangoapps/user_api/urls.py | Python | agpl-3.0 | 1,706 |
# -*- coding: utf-8 -*-
from __future__ import division
from openfisca_core import reforms
from openfisca_france.model.base import FloatCol, Individus, Variable
# Build function
def build_reform(tax_benefit_system):
Reform = reforms.make_reform(
key = 'revenu_de_base_cotisations',
name = u"Réforme des cotisations pour un Revenu de base",
reference = tax_benefit_system,
)
class cotisations_contributives(Variable):
column = FloatCol
entity_class = Individus
label = u"Nouvelles cotisations contributives"
def function(self, simulation, period):
ags = simulation.calculate('ags', period)
agff_tranche_a_employeur = simulation.calculate('agff_tranche_a_employeur', period)
apec_employeur = simulation.calculate('apec_employeur', period)
arrco_tranche_a_employeur = simulation.calculate('arrco_tranche_a_employeur', period)
assedic_employeur = simulation.calculate('assedic_employeur', period)
cotisation_exceptionnelle_temporaire_employeur = simulation.calculate(
'cotisation_exceptionnelle_temporaire_employeur', period)
fonds_emploi_hospitalier = simulation.calculate('fonds_emploi_hospitalier', period)
ircantec_employeur = simulation.calculate('ircantec_employeur', period)
pension_civile_employeur = simulation.calculate('pension_civile_employeur', period)
prevoyance_obligatoire_cadre = simulation.calculate('prevoyance_obligatoire_cadre', period)
rafp_employeur = simulation.calculate('rafp_employeur', period)
vieillesse_deplafonnee_employeur = simulation.calculate('vieillesse_deplafonnee_employeur', period)
vieillesse_plafonnee_employeur = simulation.calculate('vieillesse_plafonnee_employeur', period)
allocations_temporaires_invalidite = simulation.calculate('allocations_temporaires_invalidite', period)
accident_du_travail = simulation.calculate('accident_du_travail', period)
agff_tranche_a_employe = simulation.calculate('agff_tranche_a_employe', period)
agirc_tranche_b_employe = simulation.calculate('agirc_tranche_b_employe', period)
apec_employe = simulation.calculate('apec_employe', period)
arrco_tranche_a_employe = simulation.calculate('arrco_tranche_a_employe', period)
assedic_employe = simulation.calculate('assedic_employe', period)
cotisation_exceptionnelle_temporaire_employe = simulation.calculate(
'cotisation_exceptionnelle_temporaire_employe', period)
ircantec_employe = simulation.calculate('ircantec_employe', period)
pension_civile_employe = simulation.calculate('pension_civile_employe', period)
rafp_employe = simulation.calculate('rafp_employe', period)
vieillesse_deplafonnee_employe = simulation.calculate('vieillesse_deplafonnee_employe', period)
vieillesse_plafonnee_employe = simulation.calculate('vieillesse_plafonnee_employe', period)
cotisations_contributives = (
# cotisations patronales contributives dans le prive
ags +
agff_tranche_a_employeur +
apec_employeur +
arrco_tranche_a_employeur +
assedic_employeur +
cotisation_exceptionnelle_temporaire_employeur +
prevoyance_obligatoire_cadre + # TODO contributive ou pas
vieillesse_deplafonnee_employeur +
vieillesse_plafonnee_employeur +
# cotisations patronales contributives dans le public
fonds_emploi_hospitalier +
ircantec_employeur +
pension_civile_employeur +
rafp_employeur +
# anciennes cot patronales non-contributives classées ici comme contributives
allocations_temporaires_invalidite +
accident_du_travail +
# anciennes cotisations salariales contributives dans le prive
agff_tranche_a_employe +
agirc_tranche_b_employe +
apec_employe +
arrco_tranche_a_employe +
assedic_employe +
cotisation_exceptionnelle_temporaire_employe +
vieillesse_deplafonnee_employe +
vieillesse_plafonnee_employe +
# anciennes cotisations salariales contributives dans le public
ircantec_employe +
pension_civile_employe +
rafp_employe
)
return period, cotisations_contributives
class nouv_salaire_de_base(Variable):
reference = tax_benefit_system.column_by_name['salaire_de_base']
# Le salaire brut se définit dans la réforme comme le salaire super-brut auquel
# on retranche les cotisations contributives
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
salsuperbrut = simulation.calculate('salsuperbrut', period)
cotisations_contributives = simulation.calculate('cotisations_contributives', period)
nouv_salaire_de_base = (
salsuperbrut -
cotisations_contributives
)
return period, nouv_salaire_de_base
class nouv_csg(Variable):
reference = tax_benefit_system.column_by_name['csg_imposable_salaire']
# On applique une CSG unique à 22,5% qui finance toutes les prestations non-contributives
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
nouv_salaire_de_base = simulation.calculate('nouv_salaire_de_base', period)
nouv_csg = (
-0.225 * nouv_salaire_de_base
)
return period, nouv_csg
class salaire_net(Variable):
reference = tax_benefit_system.column_by_name['salaire_net']
# On retire la nouvelle CSG (pas celle qui finance le RDB) pour trouver le nouveau salaire net
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
nouv_salaire_de_base = simulation.calculate('nouv_salaire_de_base', period)
nouv_csg = simulation.calculate('nouv_csg', period)
salaire_net = (
nouv_salaire_de_base +
nouv_csg
)
return period, salaire_net
class salaire_imposable(Variable):
reference = tax_benefit_system.column_by_name['salaire_imposable']
# Nous sommes partis du nouveau salaire net et par rapport au salaire imposable actuel,
# nous avons supprimé : les heures sup, la déductibilité de CSG
def function(self, simulation, period):
period = period
hsup = simulation.calculate('hsup', period)
salaire_net = simulation.calculate('salaire_net', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
supp_familial_traitement = simulation.calculate('supp_familial_traitement', period)
rev_microsocial_declarant1 = simulation.calculate('rev_microsocial_declarant1', period)
return period, (
salaire_net +
primes_fonction_publique +
indemnite_residence +
supp_familial_traitement +
hsup +
rev_microsocial_declarant1
)
return Reform()
| openfisca/openfisca-france-extension-revenu-de-base | openfisca_france_extension_revenu_de_base/cotisations.py | Python | agpl-3.0 | 7,784 |
# -*- coding: utf-8 -*-
from django.forms import formset_factory
import pytest
from shoop.admin.modules.products.views.variation.simple_variation_forms import SimpleVariationChildForm, SimpleVariationChildFormSet
from shoop.admin.modules.products.views.variation.variable_variation_forms import VariableVariationChildrenForm
from shoop.core.models.product_variation import ProductVariationVariable, ProductVariationVariableValue
from shoop.testing.factories import create_product
from shoop_tests.utils import printable_gibberish
from shoop_tests.utils.forms import get_form_data
@pytest.mark.django_db
def test_simple_children_formset():
FormSet = formset_factory(SimpleVariationChildForm, SimpleVariationChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
child = create_product(printable_gibberish())
# No links yet
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 0 # No children yet
# Save a link
data = dict(get_form_data(formset, True), **{"form-0-child": child.pk})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert parent.variation_children.filter(pk=child.pk).exists() # Got link'd!
# Remove the link
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 1 # Got the child here
data = dict(get_form_data(formset, True), **{"form-0-DELETE": "1"})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert not parent.variation_children.exists() # Got unlinked
@pytest.mark.django_db
def test_variable_variation_form():
var1 = printable_gibberish()
var2 = printable_gibberish()
parent = create_product(printable_gibberish())
for a in range(4):
for b in range(3):
child = create_product(printable_gibberish())
child.link_to_parent(parent, variables={var1: a, var2: b})
assert parent.variation_children.count() == 4 * 3
form = VariableVariationChildrenForm(parent_product=parent)
assert len(form.fields) == 12
# TODO: Improve this test?
| arth-co/shoop | shoop_tests/admin/test_product_variation.py | Python | agpl-3.0 | 2,122 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from json import loads
from datetime import datetime
from functools import partial
from django.db.models import Q
from django.contrib.gis.measure import Distance
from django.contrib.gis.geos import Point
from opentreemap.util import dotted_split
from treemap.lib.dates import DATETIME_FORMAT
from treemap.models import Boundary, Tree, Plot, Species, TreePhoto
from treemap.udf import UDFModel, UserDefinedCollectionValue
from treemap.util import to_object_name
class ParseException (Exception):
def __init__(self, message):
super(Exception, self).__init__(message)
self.message = message
class ModelParseException(ParseException):
pass
DEFAULT_MAPPING = {'plot': '',
'bioswale': '',
'rainGarden': '',
'rainBarrel': '',
'tree': 'tree__',
'species': 'tree__species__',
'treePhoto': 'tree__treephoto__',
'mapFeaturePhoto': 'mapfeaturephoto__',
'mapFeature': ''}
TREE_MAPPING = {'plot': 'plot__',
'tree': '',
'species': 'species__',
'treePhoto': 'treephoto__',
'mapFeaturePhoto': 'treephoto__',
'mapFeature': 'plot__'}
PLOT_RELATED_MODELS = {Plot, Tree, Species, TreePhoto}
MAP_FEATURE_RELATED_NAMES = {'mapFeature', 'mapFeaturePhoto'}
class Filter(object):
def __init__(self, filterstr, displaystr, instance):
self.filterstr = filterstr
self.display_filter = loads(displaystr) if displaystr else None
self.instance = instance
def get_objects(self, ModelClass):
# Filter out invalid models
model_name = ModelClass.__name__
# This is a special case when we're doing 'tree-centric'
# searches for eco benefits. Trees essentially count
# as plots for the purposes of pruning
if model_name == 'Tree':
model_name = 'Plot'
if not _model_in_display_filters(model_name, self.display_filter):
return ModelClass.objects.none()
if ModelClass == Tree:
mapping = TREE_MAPPING
else:
mapping = DEFAULT_MAPPING
q = create_filter(self.instance, self.filterstr, mapping)
if model_name == 'Plot':
q = _apply_tree_display_filter(q, self.display_filter, mapping)
models = q.basekeys
if _is_valid_models_list_for_model(models, model_name, ModelClass,
self.instance):
queryset = ModelClass.objects.filter(q)
else:
queryset = ModelClass.objects.none()
return queryset
def get_object_count(self, ModelClass):
return self.get_objects(ModelClass).count()
def _is_valid_models_list_for_model(models, model_name, ModelClass, instance):
"""Validates everything in models are valid filters for model_name"""
def collection_udf_set_for_model(Model):
if not issubclass(ModelClass, UDFModel):
return {}
if hasattr(Model, 'instance'):
fake_model = Model(instance=instance)
else:
fake_model = Model()
return set(fake_model.collection_udfs_search_names())
# MapFeature is valid for all models
models = models - MAP_FEATURE_RELATED_NAMES
object_name = to_object_name(model_name)
models = models - {object_name}
if model_name == 'Plot':
related_models = PLOT_RELATED_MODELS
else:
related_models = {ModelClass}
for Model in related_models:
models = models - {to_object_name(Model.__name__)}
if issubclass(Model, UDFModel):
models = models - collection_udf_set_for_model(Model)
return len(models) == 0
class FilterContext(Q):
def __init__(self, *args, **kwargs):
if 'basekey' in kwargs:
self.basekeys = {kwargs['basekey']}
del kwargs['basekey']
else:
self.basekeys = set()
super(FilterContext, self).__init__(*args, **kwargs)
# TODO: Nothing uses add, is it necessary?
def add(self, thing, conn):
if thing.basekeys:
self.basekeys = self.basekeys | thing.basekeys
return super(FilterContext, self).add(thing, conn)
def create_filter(instance, filterstr, mapping):
"""
A filter is a string that must be valid json and conform to
the following grammar:
literal = json literal | GMT date string in 'YYYY-MM-DD HH:MM:SS'
model = 'plot' | 'tree' | 'species'
value-property = 'MIN'
| 'MAX'
| 'EXCLUSIVE'
| 'IN'
| 'IS'
| 'WITHIN_RADIUS'
| 'IN_BOUNDARY'
| 'LIKE'
| 'ISNULL'
combinator = 'AND' | 'OR'
predicate = { model.field: literal }
| { model.field: { (value-property: literal)* }}
filter = predicate
| [combinator, filter*, literal?]
mapping allows for the developer to search focussed on a
particular object group
Returns a Q object that can be applied to a model of your choice
"""
if filterstr is not None and filterstr != '':
query = loads(filterstr)
q = _parse_filter(query, mapping)
else:
q = FilterContext()
if instance:
q = q & FilterContext(instance=instance)
return q
def _parse_filter(query, mapping):
if type(query) is dict:
return _parse_predicate(query, mapping)
elif type(query) is list:
predicates = [_parse_filter(p, mapping) for p in query[1:]]
return _apply_combinator(query[0], predicates)
def _parse_predicate(query, mapping):
qs = [_parse_predicate_pair(*kv, mapping=mapping)
for kv in query.iteritems()]
return _apply_combinator('AND', qs)
def _parse_predicate_key(key, mapping):
format_string = 'Keys must be in the form of "model.field", not "%s"'
model, field = dotted_split(key, 2,
failure_format_string=format_string,
cls=ParseException)
if _is_udf(model):
__, mapping_model, __ = model.split(':')
field = 'id'
else:
mapping_model = model
if mapping_model not in mapping:
raise ModelParseException(
'Valid models are: %s or a collection UDF, not "%s"' %
(mapping.keys(), model))
return model, mapping[mapping_model] + field
def _parse_value(value):
"""
A value can be either:
* A date
* A literal
* A list of other values
"""
if type(value) is list:
return [_parse_value(v) for v in value]
try:
return datetime.strptime(value, DATETIME_FORMAT)
except (ValueError, TypeError):
return value
def _parse_min_max_value_fn(operator):
"""
returns a function that produces singleton
dictionary of django operands for the given
query operator.
"""
def fn(predicate_value, field=None):
# a min/max predicate can either take
# a value or a dictionary that provides
# a VALUE and EXCLUSIVE flag.
if type(predicate_value) == dict:
raw_value = predicate_value.get('VALUE')
exclusive = predicate_value.get('EXCLUSIVE')
else:
raw_value = predicate_value
exclusive = False
if exclusive:
key = operator
else:
# django use lt/lte and gt/gte
# to handle inclusive/exclusive
key = operator + 'e'
value = _parse_value(raw_value)
if field: # implies hstore
if isinstance(value, datetime):
date_value = value.date().isoformat()
inner_value = {field: date_value}
else:
raise ParseException("Cannot perform min/max comparisons on "
"non-date hstore fields at this time.")
else:
inner_value = value
return {key: inner_value}
return fn
def _parse_within_radius_value(predicate_value, field=None):
"""
buildup the geospatial value for the RHS of an
on orm call and pair it with the LHS
"""
radius = _parse_value(predicate_value['RADIUS'])
x = _parse_value(predicate_value['POINT']['x'])
y = _parse_value(predicate_value['POINT']['y'])
point = Point(x, y, srid=3857)
return {'__dwithin': (point, Distance(m=radius))}
def _parse_in_boundary(boundary_id, field=None):
boundary = Boundary.objects.get(pk=boundary_id)
return {'__within': boundary.geom}
def _parse_isnull_hstore(value, field):
if value:
return {'__contains': {field: None}}
return {'__contains': [field]}
def _simple_pred(key):
return (lambda value, _: {key: value})
def _hstore_contains_predicate(val, field):
"""
django_hstore builds different sql for the __contains predicate
depending on whether the input value is a list or a single item
so this works for both 'IN' and 'IS'
"""
return {'__contains': {field: val}}
# a predicate_builder takes a value for the
# corresponding predicate type and returns
# a singleton dictionary with a mapping of
# predicate kwargs to pass to a Q object
PREDICATE_TYPES = {
'MIN': {
'combines_with': {'MAX'},
'predicate_builder': _parse_min_max_value_fn('__gt'),
},
'MAX': {
'combines_with': {'MIN'},
'predicate_builder': _parse_min_max_value_fn('__lt'),
},
'IN': {
'combines_with': set(),
'predicate_builder': _simple_pred('__in'),
},
'IS': {
'combines_with': set(),
'predicate_builder': _simple_pred('')
},
'LIKE': {
'combines_with': set(),
'predicate_builder': _simple_pred('__icontains')
},
'ISNULL': {
'combines_with': set(),
'predicate_builder': _simple_pred('__isnull')
},
'WITHIN_RADIUS': {
'combines_with': set(),
'predicate_builder': _parse_within_radius_value,
},
'IN_BOUNDARY': {
'combines_with': set(),
'predicate_builder': _parse_in_boundary
}
}
HSTORE_PREDICATE_TYPES = {
'MIN': {
'combines_with': {'MAX'},
'predicate_builder': _parse_min_max_value_fn('__gt'),
},
'MAX': {
'combines_with': {'MIN'},
'predicate_builder': _parse_min_max_value_fn('__lt'),
},
'IN': {
'combines_with': set(),
'predicate_builder': _hstore_contains_predicate,
},
'IS': {
'combines_with': set(),
'predicate_builder': _hstore_contains_predicate,
},
'ISNULL': {
'combines_with': set(),
'predicate_builder': _parse_isnull_hstore
},
}
def _parse_dict_value_for_mapping(mapping, valuesdict, field=None):
"""
Loops over the keys provided and returns predicate pairs
if all the keys validate.
Supported keys are:
'MIN', 'MAX', 'IN', 'IS', 'WITHIN_RADIUS', 'IN_BOUNDARY'
All predicates except MIN/MAX are mutually exclusive
"""
params = {}
for value_key in valuesdict:
if value_key not in mapping:
raise ParseException(
'Invalid key: %s in %s' % (value_key, valuesdict))
else:
predicate_props = mapping[value_key]
valid_values = predicate_props['combines_with'].union({value_key})
if not valid_values.issuperset(set(valuesdict.keys())):
raise ParseException(
'Cannot use these keys together: %s in %s' %
(valuesdict.keys(), valuesdict))
else:
predicate_builder = predicate_props['predicate_builder']
param_pair = predicate_builder(valuesdict[value_key], field)
params.update(param_pair)
return params
_parse_dict_value = partial(_parse_dict_value_for_mapping, PREDICATE_TYPES)
_parse_udf_dict_value = partial(_parse_dict_value_for_mapping,
HSTORE_PREDICATE_TYPES)
def _parse_predicate_pair(key, value, mapping):
try:
model, search_key = _parse_predicate_key(key, mapping)
except ModelParseException:
# currently, the only case in which a key for another model
# may be sent to a model search is when udfs are sent to
# tree search. therefore we should only allow those to pass.
if _is_udf(key):
return FilterContext()
else:
raise
__, __, field = key.partition('.')
if _is_udf(model) and type(value) == dict:
preds = _parse_udf_dict_value(value, field)
query = {'data' + k: v for (k, v) in preds.iteritems()}
elif _is_udf(model):
query = {'data__contains': {field: value}}
elif type(value) is dict:
query = {search_key + k: v for (k, v)
in _parse_dict_value(value).iteritems()}
else:
query = {search_key: value}
# If the model being searched is a collection UDF, we do an in clause on a
# subquery because we can't easily join to UserDefinedCollectionValue
if _is_udf(model):
__, __, udf_def_pk = model.split(':')
subquery = UserDefinedCollectionValue.objects\
.filter(**query)\
.filter(field_definition=udf_def_pk)\
.distinct('model_id')\
.values_list('model_id', flat=True)
query = {search_key + '__in': subquery}
return FilterContext(basekey=model, **query)
def _apply_combinator(combinator, predicates):
"""
Apply the given combinator to the predicate list
Supported combinators are currently 'AND' and 'OR'
"""
if len(predicates) == 0:
raise ParseException(
'Empty predicate list is not allowed')
q = predicates[0]
if combinator == 'AND':
for p in predicates[1:]:
q = q & p
elif combinator == 'OR':
for p in predicates[1:]:
q = q | p
else:
raise ParseException(
'Only AND and OR combinators supported, not "%s"' %
combinator)
return q
def _model_in_display_filters(model_name, display_filters):
if display_filters is not None:
if model_name == 'Plot':
plot_models = {'Plot', 'EmptyPlot', 'Tree'}
return bool(plot_models.intersection(display_filters))
else:
return model_name in display_filters
return True
def _apply_tree_display_filter(q, display_filter, mapping):
if display_filter is not None:
if 'Plot' in display_filter:
return q
is_empty_plot = 'EmptyPlot' in display_filter
search_key = mapping['tree'] + 'pk__isnull'
q = q & FilterContext(basekey='plot', **{search_key: is_empty_plot})
return q
def _is_udf(model_name):
return model_name.startswith('udf:')
| clever-crow-consulting/otm-core | opentreemap/treemap/search.py | Python | agpl-3.0 | 15,113 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
from weboob.capabilities.bank import Account, AccountNotFound
from weboob.tools.browser import BasePage
from weboob.tools.misc import to_unicode
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
from weboob.tools.ordereddict import OrderedDict
__all__ = ['AccountList']
class AccountList(BasePage):
def on_loaded(self):
self.accounts = OrderedDict()
self.parse_table('comptes', Account.TYPE_CHECKING)
self.parse_table('comptesEpargne', Account.TYPE_SAVINGS)
self.parse_table('comptesTitres', Account.TYPE_MARKET)
self.parse_table('comptesVie', Account.TYPE_DEPOSIT)
self.parse_table('comptesRetraireEuros')
def get_accounts_list(self):
return self.accounts.itervalues()
def parse_table(self, what, actype=Account.TYPE_UNKNOWN):
tables = self.document.xpath("//table[@id='%s']" % what, smart_strings=False)
if len(tables) < 1:
return
lines = tables[0].xpath(".//tbody/tr")
for line in lines:
account = Account()
tmp = line.xpath("./td//a")[0]
account.label = to_unicode(tmp.text)
account.type = actype
account._link_id = tmp.get("href")
if 'BourseEnLigne' in account._link_id:
account.type = Account.TYPE_MARKET
tmp = line.xpath("./td/span/strong")
if len(tmp) >= 2:
tmp_id = tmp[0].text
tmp_balance = tmp[1].text
else:
tmp_id = line.xpath("./td//span")[1].text
tmp_balance = tmp[0].text
account.id = tmp_id
account.currency = account.get_currency(tmp_balance)
account.balance = Decimal(FrenchTransaction.clean_amount(tmp_balance))
if account.id in self.accounts:
a = self.accounts[account.id]
a._card_links.append(account._link_id)
if not a.coming:
a.coming = Decimal('0.0')
a.coming += account.balance
else:
account._card_links = []
self.accounts[account.id] = account
def get_account(self, id):
try:
return self.accounts[id]
except KeyError:
raise AccountNotFound('Unable to find account: %s' % id)
| yannrouillard/weboob | modules/bp/pages/accountlist.py | Python | agpl-3.0 | 3,140 |
"""
Tests for Blocks Views
"""
import json
import ddt
from django.test import RequestFactory, TestCase
from django.core.urlresolvers import reverse
import httpretty
from student.tests.factories import UserFactory
from third_party_auth.tests.utils import ThirdPartyOAuthTestMixin, ThirdPartyOAuthTestMixinGoogle
from .constants import DUMMY_REDIRECT_URL
from .. import adapters
from .. import views
from . import mixins
class _DispatchingViewTestCase(TestCase):
"""
Base class for tests that exercise DispatchingViews.
"""
dop_adapter = adapters.DOPAdapter()
dot_adapter = adapters.DOTAdapter()
view_class = None
url = None
def setUp(self):
super(_DispatchingViewTestCase, self).setUp()
self.user = UserFactory()
self.dot_app = self.dot_adapter.create_public_client(
name='test dot application',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dot-app-client-id',
)
self.dop_client = self.dop_adapter.create_public_client(
name='test dop client',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dop-app-client-id',
)
def _post_request(self, user, client, token_type=None):
"""
Call the view with a POST request objectwith the appropriate format,
returning the response object.
"""
return self.client.post(self.url, self._post_body(user, client, token_type))
def _post_body(self, user, client, token_type=None):
"""
Return a dictionary to be used as the body of the POST request
"""
raise NotImplementedError()
@ddt.ddt
class TestAccessTokenView(mixins.AccessTokenMixin, _DispatchingViewTestCase):
"""
Test class for AccessTokenView
"""
view_class = views.AccessTokenView
url = reverse('access_token')
def _post_body(self, user, client, token_type=None):
"""
Return a dictionary to be used as the body of the POST request
"""
body = {
'client_id': client.client_id,
'grant_type': 'password',
'username': user.username,
'password': 'test',
}
if token_type:
body['token_type'] = token_type
return body
@ddt.data('dop_client', 'dot_app')
def test_access_token_fields(self, client_attr):
client = getattr(self, client_attr)
response = self._post_request(self.user, client)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('access_token', data)
self.assertIn('expires_in', data)
self.assertIn('scope', data)
self.assertIn('token_type', data)
@ddt.data('dop_client', 'dot_app')
def test_jwt_access_token(self, client_attr):
client = getattr(self, client_attr)
response = self._post_request(self.user, client, token_type='jwt')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('expires_in', data)
self.assertEqual(data['token_type'], 'JWT')
self.assert_valid_jwt_access_token(data['access_token'], self.user, data['scope'].split(' '))
def test_dot_access_token_provides_refresh_token(self):
response = self._post_request(self.user, self.dot_app)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('refresh_token', data)
def test_dop_public_client_access_token(self):
response = self._post_request(self.user, self.dop_client)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotIn('refresh_token', data)
@ddt.ddt
@httpretty.activate
class TestAccessTokenExchangeView(ThirdPartyOAuthTestMixinGoogle, ThirdPartyOAuthTestMixin, _DispatchingViewTestCase):
"""
Test class for AccessTokenExchangeView
"""
view_class = views.AccessTokenExchangeView
url = reverse('exchange_access_token', kwargs={'backend': 'google-oauth2'})
def _post_body(self, user, client, token_type=None):
return {
'client_id': client.client_id,
'access_token': self.access_token,
}
@ddt.data('dop_client', 'dot_app')
def test_access_token_exchange_calls_dispatched_view(self, client_attr):
client = getattr(self, client_attr)
self.oauth_client = client
self._setup_provider_response(success=True)
response = self._post_request(self.user, client)
self.assertEqual(response.status_code, 200)
@ddt.ddt
class TestAuthorizationView(TestCase):
"""
Test class for AuthorizationView
"""
dop_adapter = adapters.DOPAdapter()
def setUp(self):
super(TestAuthorizationView, self).setUp()
self.user = UserFactory()
self.dop_client = self._create_confidential_client(user=self.user, client_id='dop-app-client-id')
def _create_confidential_client(self, user, client_id):
"""
Create a confidential client suitable for testing purposes.
"""
return self.dop_adapter.create_confidential_client(
name='test_app',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL
)
def test_authorization_view(self):
self.client.login(username=self.user.username, password='test')
response = self.client.post(
'/oauth2/authorize/',
{
'client_id': self.dop_client.client_id, # TODO: DOT is not yet supported (MA-2124)
'response_type': 'code',
'state': 'random_state_string',
'redirect_uri': DUMMY_REDIRECT_URL,
},
follow=True,
)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
context = response.context_data # pylint: disable=no-member
self.assertIn('form', context)
self.assertIsNone(context['form']['authorize'].value())
self.assertIn('oauth_data', context)
oauth_data = context['oauth_data']
self.assertEqual(oauth_data['redirect_uri'], DUMMY_REDIRECT_URL)
self.assertEqual(oauth_data['state'], 'random_state_string')
class TestViewDispatch(TestCase):
"""
Test that the DispatchingView dispatches the right way.
"""
dop_adapter = adapters.DOPAdapter()
dot_adapter = adapters.DOTAdapter()
def setUp(self):
super(TestViewDispatch, self).setUp()
self.user = UserFactory()
self.view = views._DispatchingView() # pylint: disable=protected-access
self.dop_adapter.create_public_client(
name='',
user=self.user,
client_id='dop-id',
redirect_uri=DUMMY_REDIRECT_URL
)
self.dot_adapter.create_public_client(
name='',
user=self.user,
client_id='dot-id',
redirect_uri=DUMMY_REDIRECT_URL
)
def assert_is_view(self, view_candidate):
"""
Assert that a given object is a view. That is, it is callable, and
takes a request argument. Note: while technically, the request argument
could take any name, this assertion requires the argument to be named
`request`. This is good practice. You should do it anyway.
"""
_msg_base = u'{view} is not a view: {reason}'
msg_not_callable = _msg_base.format(view=view_candidate, reason=u'it is not callable')
msg_no_request = _msg_base.format(view=view_candidate, reason=u'it has no request argument')
self.assertTrue(hasattr(view_candidate, '__call__'), msg_not_callable)
args = view_candidate.func_code.co_varnames
self.assertTrue(args, msg_no_request)
self.assertEqual(args[0], 'request')
def _get_request(self, client_id):
"""
Return a request with the specified client_id in the body
"""
return RequestFactory().post('/', {'client_id': client_id})
def test_dispatching_to_dot(self):
request = self._get_request('dot-id')
self.assertEqual(self.view.select_backend(request), self.dot_adapter.backend)
def test_dispatching_to_dop(self):
request = self._get_request('dop-id')
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_dispatching_with_no_client(self):
request = self._get_request(None)
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_dispatching_with_invalid_client(self):
request = self._get_request('abcesdfljh')
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_get_view_for_dot(self):
view_object = views.AccessTokenView()
self.assert_is_view(view_object.get_view_for_backend(self.dot_adapter.backend))
def test_get_view_for_dop(self):
view_object = views.AccessTokenView()
self.assert_is_view(view_object.get_view_for_backend(self.dop_adapter.backend))
def test_get_view_for_no_backend(self):
view_object = views.AccessTokenView()
self.assertRaises(KeyError, view_object.get_view_for_backend, None)
| ampax/edx-platform | lms/djangoapps/oauth_dispatch/tests/test_views.py | Python | agpl-3.0 | 9,406 |
#!/usr/bin/env python2.5
"""A test provider for the stress testing."""
# change registry this often [msec]
registryChangeTimeout = 2017
from ContextKit.flexiprovider import *
import gobject
import time
import os
def update():
t = time.time()
dt = int(1000*(t - round(t)))
gobject.timeout_add(1000 - dt, update)
v = int(round(t))
fp.set('test.int', v)
fp.set('test.int2', v)
print t
return False
pcnt = 0
def chgRegistry():
global pcnt
pcnt += 1
if pcnt % 2:
print "1 provider"
os.system('cp 1provider.cdb tmp.cdb; mv tmp.cdb cache.cdb')
else:
print "2 providers"
os.system('cp 2providers.cdb tmp.cdb; mv tmp.cdb cache.cdb')
return True
gobject.timeout_add(1000, update)
# uncoment this to see the "Bus error" XXX
gobject.timeout_add(registryChangeTimeout, chgRegistry)
fp = Flexiprovider([INT('test.int'), INT('test.int2')], 'my.test.provider', 'session')
fp.run()
| dudochkin-victor/contextkit | sandbox/multithreading-tests/stress-test/provider.py | Python | lgpl-2.1 | 946 |
#!/usr/bin/env python
# Very simple serial terminal
# (C)2002-2009 Chris Liechti <cliechti@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes).
import sys, os, serial, threading, time
EXITCHARCTER = '\x1d' # GS/CTRL+]
MENUCHARACTER = '\x14' # Menu: CTRL+T
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-8s Send the menu character itself to remote
--- %(exchar)-8s Send the exit character to remote
--- %(info)-8s Show info
--- %(upload)-8s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)s RTS %(echo)s local echo
--- %(dtr)s DTR %(break)s BREAK
--- %(lfm)s line feed %(repr)s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unkown'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console:
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while 1:
z = msvcrt.getch()
if z == '\0' or z == '\xe0': # functions keys
msvcrt.getch()
else:
if z == '\r':
return '\n'
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console:
def __init__(self):
self.fd = sys.stdin.fileno()
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
console.setup()
sys.exitfunc = cleanup_console #terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = ('\n', '\r', '\r\n')
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm:
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1, stopbits=2)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1, stopbits=2)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def start(self):
self.alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(1)
self.receiver_thread.start()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(1)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
if not transmit_only:
self.receiver_thread.join()
# self.transmitter_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits,
))
sys.stderr.write('--- RTS %s\n' % (self.rts_state and 'active' or 'inactive'))
sys.stderr.write('--- DTR %s\n' % (self.dtr_state and 'active' or 'inactive'))
sys.stderr.write('--- BREAK %s\n' % (self.break_state and 'active' or 'inactive'))
sys.stderr.write('--- software flow control %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s\n' % (REPR_MODES[self.repr_mode],))
sys.stderr.write('--- linefeed: %s\n' % (LF_MODES[self.convert_outgoing],))
try:
sys.stderr.write('--- CTS: %s DSR: %s RI: %s CD: %s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive'),
))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
def reader(self):
"""loop and copy serial->console"""
while self.alive:
try:
data = self.serial.read(1)
# data = self.read()
# check for exit from device
if data == EXITCHARCTER:
self.stop()
break
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for character in data:
sys.stdout.write("%s " % character.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
time.sleep(0.001)
continue
except TypeError as e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""loop and copy console->serial until EXITCHARCTER character is
found. when MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = console.getkey()
except KeyboardInterrupt:
c = '\x03'
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(c) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
#~ elif c in 'pP': # P -> change port XXX reader thread would exit
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\r':
pass
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(c) # send character
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
parser.add_option("-p", "--port",
dest = "port",
help = "port, a number (default 0) or a device name (deprecated option)",
default = None
)
parser.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = 9600
)
parser.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
parser.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
parser.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
parser.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
parser.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
parser.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
parser.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = None
)
parser.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = None
)
parser.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non error messages",
default = False
)
parser.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application...",
default = 0x1d
)
parser.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
if port is None: port = 0
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
miniterm.start()
miniterm.join(True)
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
if __name__ == '__main__':
main()
| jeditekunum/Cosa | build/miniterm.py | Python | lgpl-2.1 | 23,810 |
#! /usr/bin/env python
"""
Setup script to build a standalone apdumanager.exe executable on windows
using py2exe. Run: python.exe setup.py py2exe, to build executable file.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from distutils.core import setup
import py2exe
from smartcard.wx import ICO_SMARTCARD, ICO_READER
Mydata_files = [('images',
['images/mysmartcard.ico',
ICO_SMARTCARD, ICO_READER])]
setup(windows=['apdumanager.py'],
data_files=Mydata_files,
options={"py2exe": {"dll_excludes": ["MSVCP90.dll"]}}
)
| moreati/pyscard | smartcard/Examples/wx/apdumanager/setup.py | Python | lgpl-2.1 | 1,413 |
#!/usr/bin/python
"""Test to verify presentation of selectable list items."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control><Shift>n"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Tab to list item",
["KNOWN ISSUE: We are presenting nothing here",
""]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"2. Right to next list item",
["BRAILLE LINE: 'soffice application Template Manager frame Template Manager dialog Drawings page tab list Presentation Backgrounds list item'",
" VISIBLE: 'Presentation Backgrounds list it', cursor=1",
"SPEECH OUTPUT: 'Presentation Backgrounds'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"3. Left to previous list item",
["BRAILLE LINE: 'soffice application Template Manager frame Template Manager dialog Drawings page tab list My Templates list item'",
" VISIBLE: 'My Templates list item', cursor=1",
"SPEECH OUTPUT: 'My Templates'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| pvagner/orca | test/keystrokes/oowriter/ui_role_list_item.py | Python | lgpl-2.1 | 1,384 |
#!/usr/bin/python
"""Test of line navigation output of Firefox."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Line 1'",
" VISIBLE: 'Line 1', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'Line 3'",
" VISIBLE: 'Line 3', cursor=1",
"SPEECH OUTPUT: 'Line 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"3. Line Up",
["BRAILLE LINE: 'Line 1'",
" VISIBLE: 'Line 1', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| GNOME/orca | test/keystrokes/firefox/line_nav_iframes_in_inline_block.py | Python | lgpl-2.1 | 1,242 |
###############################################################
# Copyright 2020 Lawrence Livermore National Security, LLC
# (c.f. AUTHORS, NOTICE.LLNS, COPYING)
#
# This file is part of the Flux resource manager framework.
# For details, see https://github.com/flux-framework.
#
# SPDX-License-Identifier: LGPL-3.0
###############################################################
from flux.resource.Rlist import Rlist
from flux.resource.ResourceSet import ResourceSet
| grondo/flux-core | src/bindings/python/flux/resource/__init__.py | Python | lgpl-3.0 | 469 |
from sys import exit
import argparse
import sobol, morris, extended_fast
parser = argparse.ArgumentParser(description='Perform sensitivity analysis on model output')
parser.add_argument('-m', '--method', type=str, choices=['sobol', 'morris', 'fast'], required=True)
parser.add_argument('-p', '--paramfile', type=str, required=True, help='Parameter range file')
parser.add_argument('-Y', '--model-output-file', type=str, required=True, help='Model output file')
parser.add_argument('-c', '--column', type=int, required=False, default=0, help='Column of output to analyze')
parser.add_argument('--delimiter', type=str, required=False, default=' ', help='Column delimiter in model output file')
parser.add_argument('--sobol-max-order', type=int, required=False, default=2, choices=[1, 2], help='Maximum order of sensitivity indices to calculate (Sobol only)')
parser.add_argument('-X', '--morris-model-input', type=str, required=False, default=None, help='Model inputs (required for Method of Morris only)')
parser.add_argument('-r', '--sobol-bootstrap-resamples', type=int, required=False, default=1000, help='Number of bootstrap resamples for Sobol confidence intervals')
args = parser.parse_args()
if args.method == 'sobol':
calc_second_order = (args.sobol_max_order == 2)
sobol.analyze(args.paramfile, args.model_output_file, args.column, calc_second_order, num_resamples = args.sobol_bootstrap_resamples, delim = args.delimiter)
elif args.method == 'morris':
if args.morris_model_input is not None:
morris.analyze(args.paramfile, args.morris_model_input, args.model_output_file, args.column, delim = args.delimiter)
else:
print "Error: model input file is required for Method of Morris. Run with -h flag to see usage."
exit()
elif args.method == 'fast':
extended_fast.analyze(args.paramfile, args.model_output_file, args.column, delim = args.delimiter) | dhyams/SALib | SALib/analyze/__main__.py | Python | lgpl-3.0 | 1,927 |
import sys
from java.util import Vector
def addTemplate(core):
core.spawnService.addLairTemplate('dantooine_voritor_hunter_lair_2', 'slinking_voritor_hunter', 15, 'object/tangible/lair/base/poi_all_lair_bones.iff')
return | ProjectSWGCore/NGECore2 | scripts/mobiles/lairs/dantooine_voritor_hunter_lair_1.py | Python | lgpl-3.0 | 224 |
from __future__ import unicode_literals
import binascii
import collections
import email
import getpass
import io
import optparse
import os
import re
import shlex
import shutil
import socket
import subprocess
import sys
import itertools
import xml.etree.ElementTree
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from html.parser import HTMLParser as compat_HTMLParser
except ImportError: # Python 2
from HTMLParser import HTMLParser as compat_HTMLParser
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
except ImportError: # Python 2
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
else re.compile('([\x00-\x7f]+)'))
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
# is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
def compat_urllib_parse_unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(compat_urllib_parse._hextochr[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
except ImportError: # Python 2
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
list_e = encode_list(e)
e = tuple(list_e) if isinstance(e, tuple) else list_e
elif isinstance(e, compat_str):
e = e.encode(encoding)
return e
def encode_dict(d):
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
def encode_list(l):
return [encode_elem(e) for e in l]
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(':', 1)
mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
if sys.version_info[0] >= 3:
compat_etree_fromstring = xml.etree.ElementTree.fromstring
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
etree = xml.etree.ElementTree
try:
_etree_iter = etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
yield el
for sub in _etree_iter(el):
yield sub
# on 2.6 XML doesn't have a parser argument, function copied from CPython
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=etree.TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=etree.TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
def compat_xpath(xpath):
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
return xpath
else:
compat_xpath = lambda xpath: xpath
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, compat_str
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
from shlex import quote as shlex_quote
except ImportError: # Python < 3.3
def shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
if sys.version_info >= (2, 7, 3):
compat_shlex_split = shlex.split
else:
# Working around shlex issue with unicode strings on some python 2
# versions (see http://bugs.python.org/issue1548891)
def compat_shlex_split(s, comments=False, posix=True):
if isinstance(s, compat_str):
s = s.encode('utf-8')
return shlex.split(s, comments, posix)
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
compat_os_name = os._name if os.name == 'java' else os.name
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if compat_os_name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif compat_os_name == 'nt' or compat_os_name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif 'HOMEPATH' not in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
# Fix https://github.com/rg3/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
try:
itertools.count(start=0, step=1)
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
def compat_itertools_count(start=0, step=1):
n = start
while True:
yield n
n += step
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
__all__ = [
'compat_HTMLParser',
'compat_HTTPError',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_cookies',
'compat_etree_fromstring',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_http_client',
'compat_http_server',
'compat_itertools_count',
'compat_kwargs',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'compat_xpath',
'shlex_quote',
'subprocess_check_output',
'workaround_optparse_bug9161',
]
| nfedera/rg3-youtube-dl | youtube_dl/compat.py | Python | unlicense | 21,183 |
#!/usr/bin/env python
import csv
import sys
import json
import hashlib
from subprocess import Popen, PIPE
from urlparse import urlparse
DEFAULT_GROUP = "lastpass-import"
class Record:
def __init__(self, d):
self.d = d
self.password = d['password']
if d['grouping'] in [None, "", "(none)"]:
self.group = DEFAULT_GROUP
else:
self.group = d['grouping']
self.d['kind'] = "lastpass imported item"
self.name = d['name']
self.username = d['username']
self.netloc = urlparse(d['url']).netloc
self.text = "{}\n{}".format(
self.password, json.dumps(self.d, sort_keys=True,
indent=2, separators=(',', ': ')))
self.md5 = hashlib.md5(self.text).hexdigest()
if self.name is None or self.name == "":
if self.netloc is None or self.netloc == "":
self.name = self.md5
else:
self.name = self.netloc
if self.username is None or self.username == "":
self.username = "unknown"
self.id = "{}/{}/{}".format(self.group,
self.name.replace('/', '_'),
self.username.replace('/', '_'))
self.items = [self]
def append(self, entry):
self.items.append(entry)
def writeToPass(self):
if len(self.items) == 1:
process = Popen(["pass", "insert", "-m", self.id], stdin=PIPE,
stdout=PIPE, stderr=None)
self.stdout = process.communicate(str(self))
self.result = process.returncode
else:
for (i, v) in enumerate(self.items):
key = "{}/{}".format(self.id, i)
process = Popen(["pass", "insert", "-m", key],
stdin=PIPE, stdout=PIPE, stderr=None)
self.stdout = process.communicate(str(v))
self.result = process.returncode
def __str__(self):
return self.text
class Records:
def __init__(self):
self.d = dict()
def add(self, r):
if r.id not in self.d:
self.d[r.id] = r
else:
self.d[r.id].append(r)
def get(self, k):
return self.d[k]
fn = sys.argv[1]
with open(fn, 'rb') as cf:
lp = csv.DictReader(cf, delimiter=',')
rs = Records()
for l in lp:
r = Record(l)
rs.add(r)
for k, v in rs.d.items():
v.writeToPass()
if v.result != 0:
print "{} {} {}".format(v.result, len(v.items), k)
| itorres/junk-drawer | 2015/lp2pass.py | Python | unlicense | 2,627 |
from __future__ import print_function, division, absolute_import
import unittest
from cu2qu.pens import Cu2QuPen, Cu2QuPointPen
from . import CUBIC_GLYPHS, QUAD_GLYPHS
from .utils import DummyGlyph, DummyPointGlyph
from .utils import DummyPen, DummyPointPen
from fontTools.misc.loggingTools import CapturingLogHandler
from textwrap import dedent
import logging
MAX_ERR = 1.0
class _TestPenMixin(object):
"""Collection of tests that are shared by both the SegmentPen and the
PointPen test cases, plus some helper methods.
"""
maxDiff = None
def diff(self, expected, actual):
import difflib
expected = str(self.Glyph(expected)).splitlines(True)
actual = str(self.Glyph(actual)).splitlines(True)
diff = difflib.unified_diff(
expected, actual, fromfile='expected', tofile='actual')
return "".join(diff)
def convert_glyph(self, glyph, **kwargs):
# draw source glyph onto a new glyph using a Cu2Qu pen and return it
converted = self.Glyph()
pen = getattr(converted, self.pen_getter_name)()
quadpen = self.Cu2QuPen(pen, MAX_ERR, **kwargs)
getattr(glyph, self.draw_method_name)(quadpen)
return converted
def expect_glyph(self, source, expected):
converted = self.convert_glyph(source)
self.assertNotEqual(converted, source)
if not converted.approx(expected):
print(self.diff(expected, converted))
self.fail("converted glyph is different from expected")
def test_convert_simple_glyph(self):
self.expect_glyph(CUBIC_GLYPHS['a'], QUAD_GLYPHS['a'])
self.expect_glyph(CUBIC_GLYPHS['A'], QUAD_GLYPHS['A'])
def test_convert_composite_glyph(self):
source = CUBIC_GLYPHS['Aacute']
converted = self.convert_glyph(source)
# components don't change after quadratic conversion
self.assertEqual(converted, source)
def test_convert_mixed_glyph(self):
# this contains a mix of contours and components
self.expect_glyph(CUBIC_GLYPHS['Eacute'], QUAD_GLYPHS['Eacute'])
def test_reverse_direction(self):
for name in ('a', 'A', 'Eacute'):
source = CUBIC_GLYPHS[name]
normal_glyph = self.convert_glyph(source)
reversed_glyph = self.convert_glyph(source, reverse_direction=True)
# the number of commands is the same, just their order is iverted
self.assertTrue(
len(normal_glyph.outline), len(reversed_glyph.outline))
self.assertNotEqual(normal_glyph, reversed_glyph)
def test_stats(self):
stats = {}
for name in CUBIC_GLYPHS.keys():
source = CUBIC_GLYPHS[name]
self.convert_glyph(source, stats=stats)
self.assertTrue(stats)
self.assertTrue('1' in stats)
self.assertEqual(type(stats['1']), int)
def test_addComponent(self):
pen = self.Pen()
quadpen = self.Cu2QuPen(pen, MAX_ERR)
quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
# components are passed through without changes
self.assertEqual(str(pen).splitlines(), [
"pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
])
class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
def __init__(self, *args, **kwargs):
super(TestCu2QuPen, self).__init__(*args, **kwargs)
self.Glyph = DummyGlyph
self.Pen = DummyPen
self.Cu2QuPen = Cu2QuPen
self.pen_getter_name = 'getPen'
self.draw_method_name = 'draw'
def test__check_contour_is_open(self):
msg = "moveTo is required"
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
with self.assertRaisesRegex(AssertionError, msg):
quadpen.lineTo((0, 0))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.qCurveTo((0, 0), (1, 1))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.curveTo((0, 0), (1, 1), (2, 2))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.closePath()
with self.assertRaisesRegex(AssertionError, msg):
quadpen.endPath()
quadpen.moveTo((0, 0)) # now it works
quadpen.lineTo((1, 1))
quadpen.qCurveTo((2, 2), (3, 3))
quadpen.curveTo((4, 4), (5, 5), (6, 6))
quadpen.closePath()
def test__check_contour_closed(self):
msg = "closePath or endPath is required"
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.moveTo((1, 1))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
# it works if contour is closed
quadpen.closePath()
quadpen.moveTo((1, 1))
quadpen.endPath()
quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
def test_qCurveTo_no_points(self):
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(
AssertionError, "illegal qcurve segment point count: 0"):
quadpen.qCurveTo()
def test_qCurveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.lineTo((1, 1))",
])
def test_qCurveTo_more_than_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((1, 1), (2, 2))",
])
def test_curveTo_no_points(self):
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(
AssertionError, "illegal curve segment point count: 0"):
quadpen.curveTo()
def test_curveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.lineTo((1, 1))",
])
def test_curveTo_2_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((1, 1), (2, 2))",
])
def test_curveTo_3_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((0.75, 0.75), (2.25, 2.25), (3, 3))",
])
def test_curveTo_more_than_3_points(self):
# a 'SuperBezier' as described in fontTools.basePen.AbstractPen
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3), (4, 4))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((0.75, 0.75), (1.625, 1.625), (2, 2))",
"pen.qCurveTo((2.375, 2.375), (3.25, 3.25), (4, 4))",
])
def test_addComponent(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
# components are passed through without changes
self.assertEqual(str(pen).splitlines(), [
"pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
])
def test_ignore_single_points(self):
pen = DummyPen()
try:
logging.captureWarnings(True)
with CapturingLogHandler("py.warnings", level="WARNING") as log:
quadpen = Cu2QuPen(pen, MAX_ERR, ignore_single_points=True)
finally:
logging.captureWarnings(False)
quadpen.moveTo((0, 0))
quadpen.endPath()
quadpen.moveTo((1, 1))
quadpen.closePath()
self.assertGreaterEqual(len(log.records), 1)
self.assertIn("ignore_single_points is deprecated",
log.records[0].args[0])
# single-point contours were ignored, so the pen commands are empty
self.assertFalse(pen.commands)
# redraw without ignoring single points
quadpen.ignore_single_points = False
quadpen.moveTo((0, 0))
quadpen.endPath()
quadpen.moveTo((1, 1))
quadpen.closePath()
self.assertTrue(pen.commands)
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.endPath()",
"pen.moveTo((1, 1))",
"pen.closePath()"
])
class TestCu2QuPointPen(unittest.TestCase, _TestPenMixin):
def __init__(self, *args, **kwargs):
super(TestCu2QuPointPen, self).__init__(*args, **kwargs)
self.Glyph = DummyPointGlyph
self.Pen = DummyPointPen
self.Cu2QuPen = Cu2QuPointPen
self.pen_getter_name = 'getPointPen'
self.draw_method_name = 'drawPoints'
def test_super_bezier_curve(self):
pen = DummyPointPen()
quadpen = Cu2QuPointPen(pen, MAX_ERR)
quadpen.beginPath()
quadpen.addPoint((0, 0), segmentType="move")
quadpen.addPoint((1, 1))
quadpen.addPoint((2, 2))
quadpen.addPoint((3, 3))
quadpen.addPoint(
(4, 4), segmentType="curve", smooth=False, name="up", selected=1)
quadpen.endPath()
self.assertEqual(str(pen).splitlines(), """\
pen.beginPath()
pen.addPoint((0, 0), name=None, segmentType='move', smooth=False)
pen.addPoint((0.75, 0.75), name=None, segmentType=None, smooth=False)
pen.addPoint((1.625, 1.625), name=None, segmentType=None, smooth=False)
pen.addPoint((2, 2), name=None, segmentType='qcurve', smooth=True)
pen.addPoint((2.375, 2.375), name=None, segmentType=None, smooth=False)
pen.addPoint((3.25, 3.25), name=None, segmentType=None, smooth=False)
pen.addPoint((4, 4), name='up', segmentType='qcurve', selected=1, smooth=False)
pen.endPath()""".splitlines())
def test__flushContour_restore_starting_point(self):
pen = DummyPointPen()
quadpen = Cu2QuPointPen(pen, MAX_ERR)
# collect the output of _flushContour before it's sent to _drawPoints
new_segments = []
def _drawPoints(segments):
new_segments.extend(segments)
Cu2QuPointPen._drawPoints(quadpen, segments)
quadpen._drawPoints = _drawPoints
# a closed path (ie. no "move" segmentType)
quadpen._flushContour([
("curve", [
((2, 2), False, None, {}),
((1, 1), False, None, {}),
((0, 0), False, None, {}),
]),
("curve", [
((1, 1), False, None, {}),
((2, 2), False, None, {}),
((3, 3), False, None, {}),
]),
])
# the original starting point is restored: the last segment has become
# the first
self.assertEqual(new_segments[0][1][-1][0], (3, 3))
self.assertEqual(new_segments[-1][1][-1][0], (0, 0))
new_segments = []
# an open path (ie. starting with "move")
quadpen._flushContour([
("move", [
((0, 0), False, None, {}),
]),
("curve", [
((1, 1), False, None, {}),
((2, 2), False, None, {}),
((3, 3), False, None, {}),
]),
])
# the segment order stays the same before and after _flushContour
self.assertEqual(new_segments[0][1][-1][0], (0, 0))
self.assertEqual(new_segments[-1][1][-1][0], (3, 3))
def test_quad_no_oncurve(self):
"""When passed a contour which has no on-curve points, the
Cu2QuPointPen will treat it as a special quadratic contour whose
first point has 'None' coordinates.
"""
self.maxDiff = None
pen = DummyPointPen()
quadpen = Cu2QuPointPen(pen, MAX_ERR)
quadpen.beginPath()
quadpen.addPoint((1, 1))
quadpen.addPoint((2, 2))
quadpen.addPoint((3, 3))
quadpen.endPath()
self.assertEqual(
str(pen),
dedent(
"""\
pen.beginPath()
pen.addPoint((1, 1), name=None, segmentType=None, smooth=False)
pen.addPoint((2, 2), name=None, segmentType=None, smooth=False)
pen.addPoint((3, 3), name=None, segmentType=None, smooth=False)
pen.endPath()"""
)
)
if __name__ == "__main__":
unittest.main()
| googlefonts/cu2qu | tests/pens_test.py | Python | apache-2.0 | 13,027 |