repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
johan--/Quiz-Program
refs/heads/master
vendor/bundle/ruby/2.2.0/gems/libv8-3.16.14.7/vendor/gyp/test/build-option/gyptest-build.py
196
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies simplest-possible build of a "Hello, world!" program using the default build target. """ import TestGyp test = TestGyp.TestGyp(workdir='workarea_default') test.run_gyp('hello.gyp', '--build=Default') test.run_built_executable('hello', stdout="Hello, world!\n") test.up_to_date('hello.gyp', test.DEFAULT) test.pass_test()
cesc-park/CRCN
refs/heads/master
keras/keras/datasets/reuters.py
10
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from .data_utils import get_file import string import random import os import six.moves.cPickle from six.moves import zip def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'), min_samples_per_topic=15): import re from ..preprocessing.text import Tokenizer wire_topics = [] topic_counts = {} wire_bodies = [] for fname in os.listdir(path): if 'sgm' in fname: s = open(path + fname).read() tag = '<TOPICS>' while tag in s: s = s[s.find(tag)+len(tag):] topics = s[:s.find('</')] if topics and not '</D><D>' in topics: topic = topics.replace('<D>', '').replace('</D>', '') wire_topics.append(topic) topic_counts[topic] = topic_counts.get(topic, 0) + 1 else: continue bodytag = '<BODY>' body = s[s.find(bodytag)+len(bodytag):] body = body[:body.find('</')] wire_bodies.append(body) # only keep most common topics items = list(topic_counts.items()) items.sort(key = lambda x: x[1]) kept_topics = set() for x in items: print(x[0] + ': ' + str(x[1])) if x[1] >= min_samples_per_topic: kept_topics.add(x[0]) print('-') print('Kept topics:', len(kept_topics)) # filter wires with rare topics kept_wires = [] labels = [] topic_indexes = {} for t, b in zip(wire_topics, wire_bodies): if t in kept_topics: if t not in topic_indexes: topic_index = len(topic_indexes) topic_indexes[t] = topic_index else: topic_index = topic_indexes[t] labels.append(topic_index) kept_wires.append(b) # vectorize wires tokenizer = Tokenizer() tokenizer.fit_on_texts(kept_wires) X = tokenizer.texts_to_sequences(kept_wires) print('Sanity check:') for w in ["banana", "oil", "chocolate", "the", "dsft"]: print('...index of', w, ':', tokenizer.word_index.get(w)) dataset = (X, labels) print('-') print('Saving...') six.moves.cPickle.dump(dataset, open(os.path.join('datasets', 'data', 'reuters.pkl'), 'w')) six.moves.cPickle.dump(tokenizer.word_index, open(os.path.join('datasets','data', 'reuters_word_index.pkl'), 'w')) def load_data(path="reuters.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113): path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/reuters.pkl") f = open(path, 'rb') X, labels = six.moves.cPickle.load(f) f.close() random.seed(seed) random.shuffle(X) random.seed(seed) random.shuffle(labels) if maxlen: new_X = [] new_labels = [] for x, y in zip(X, labels): if len(x) < maxlen: new_X.append(x) new_labels.append(y) X = new_X labels = new_labels if not nb_words: nb_words = max([max(x) for x in X]) X = [[0 if (w >= nb_words or w < skip_top) else w for w in x] for x in X] X_train = X[:int(len(X)*(1-test_split))] y_train = labels[:int(len(X)*(1-test_split))] X_test = X[int(len(X)*(1-test_split)):] y_test = labels[int(len(X)*(1-test_split)):] return (X_train, y_train), (X_test, y_test) def get_word_index(path="reuters_word_index.pkl"): path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/reuters_word_index.pkl") f = open(path, 'rb') return six.moves.cPickle.load(f) if __name__ == "__main__": make_reuters_dataset() (X_train, y_train), (X_test, y_test) = load_data()
dgzurita/odoo
refs/heads/8.0
addons/account_check_writing/account.py
379
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv,fields class account_journal(osv.osv): _inherit = "account.journal" _columns = { 'allow_check_writing': fields.boolean('Allow Check writing', help='Check this if the journal is to be used for writing checks.'), 'use_preprint_check': fields.boolean('Use Preprinted Check', help='Check if you use a preformated sheet for check'), } class res_company(osv.osv): _inherit = "res.company" _columns = { 'check_layout': fields.selection([ ('top', 'Check on Top'), ('middle', 'Check in middle'), ('bottom', 'Check on bottom'), ],"Check Layout", help="Check on top is compatible with Quicken, QuickBooks and Microsoft Money. Check in middle is compatible with Peachtree, ACCPAC and DacEasy. Check on bottom is compatible with Peachtree, ACCPAC and DacEasy only" ), } _defaults = { 'check_layout' : lambda *a: 'top', } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
axinging/sky_engine
refs/heads/master
sky/engine/build/gn_list_to_space_separated_string.py
77
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys # Converts its arguments to a single GN-style string, with each element # quoted and separated by a space. result = "" for i in sys.argv[1:]: result += '"%s" ' % i print result
mikemoraned/geowhatsit-server
refs/heads/master
node_modules/heroku-redis-client/node_modules/publish/node_modules/npm/node_modules/node-gyp/gyp/test/same-target-name/gyptest-same-target-name.py
363
#!/usr/bin/env python # Copyright (c) 2010 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Check that duplicate targets in a directory gives an error. """ import TestGyp test = TestGyp.TestGyp() # Require that gyp files with duplicate targets spit out an error. test.run_gyp('all.gyp', chdir='src', status=1, stderr=None) test.pass_test()
rmcdermo/macfp-db
refs/heads/master
Liquid_Pool_Fires/Waterloo_Methanol/Computational_Results/2021/EDFAMU/EDFAMU_Waterloo_Methanol_plot_results.py
3
#!/usr/bin/env python3 # McDermott # Feb 2021 # first, make sure the macfp module directory is in your path # if not, uncomment the lines below and replace <path to macfp-db> # with the path (absolute or relative) to your macfp-db repository import sys # sys.path.append('<path to macfp-db>/macfp-db/Utilities/') sys.path.append('../../../../../../macfp-db/Utilities/') import macfp import importlib importlib.reload(macfp) # use for development (while making changes to macfp.py) import matplotlib.pyplot as plt macfp.dataplot(config_filename='EDFAMU_Waterloo_Methanol_dataplot_config.csv', institute='EDFAMU', expdir='../../../../../Liquid_Pool_Fires/', pltdir='./Plots/', close_figs=True, verbose=True, plot_range=range(1000)) # plt.show()
rootfs/Rusthon
refs/heads/master
regtests/test-c++.py
3
import os, sys, subprocess passed = {} ignore = () TODO_FIX = ( 'chain.py', 'generics_subclasses.py', ) files = os.listdir('./c++') files.reverse() for md in files: if md in TODO_FIX: print 'skip test: %s (TODO fix later)' %md continue elif not md.endswith('.py'): continue print md if md.startswith( ignore ): continue subprocess.check_call([ 'python', '../rusthon.py', '--c++', os.path.join('./c++', md) ]) passed[ md ] = open('/tmp/rusthon-c++-build.cpp').read().split('/*end-builtins*/')[-1] print 'TESTS PASSED:' report = [ 'C++11 Backend Regression Tests', '-----------------------------', 'the following tests compiled, and the binary executed without any errors', ] for md in passed: print md report.append('* [%s](c++/%s)' %(md,md)) report.append('') report.append('input:') report.append('------') report.append('```python') report.extend( open('./c++/'+md, 'rb').read().splitlines() ) report.append('```') report.append('output:') report.append('------') report.append('```c++') report.extend( passed[md].splitlines() ) report.append('```') open('regtest-report-c++.md', 'wb').write('\n'.join(report))
boblefrag/lolyx
refs/heads/master
lolyx/resume/tests/urls.py
1
# -*- coding: utf-8 -*- # # Copyright (c) 2013 Rodolphe Quiédeville <rodolphe@quiedeville.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Test all public and private urls """ from django.contrib.auth.models import User from django.test import TestCase from django.test import Client from lolyx.resume.models import Resume class UrlsTests(TestCase): # pylint: disable-msg=R0904 """ The urls """ def setUp(self): """ set up the tests """ Resume.objects.all().delete() self.user = User.objects.create_user('foobar', 'admin_search@bar.com', 'admintest') def test_view(self): """ The view """ resume = Resume.objects.create(title='Senior admin', user=self.user) client = Client() response = client.get('/cv/{}/'.format(resume.id)) self.assertContains(response, resume.title, status_code=200) def test_edit(self): """ The view """ resume = Resume.objects.create(title='Senior admin', user=self.user) client = Client() client.login(username='foobar', password='admintest') response = client.get('/cv/edit/{}/'.format(resume.id)) self.assertContains(response, resume.title, status_code=200) def test_new_resume(self): """ The view """ client = Client() response = client.get('/cv/new/') self.assertContains(response, 'form', status_code=200) def test_resume_newpost(self): """ The form is valid """ token = '88c25fbe6ed4871bf9e8e83820a4e001' datas = {'title': 'foobar', 'csrfmiddlewaretoken': token} client = Client() client.login(username='foobar', password='admintest') resp = client.post('/cv/new/', datas) self.assertEqual(resp.status_code, 200) # pylint: disable-msg=E1103
vipulkanade/EventbriteDjango
refs/heads/master
lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py
224
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2014 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging __version__ = '0.2.1' class DistlibException(Exception): pass try: from logging import NullHandler except ImportError: # pragma: no cover class NullHandler(logging.Handler): def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None logger = logging.getLogger(__name__) logger.addHandler(NullHandler())
naousse/odoo
refs/heads/8.0
addons/hr_payroll/wizard/hr_payroll_payslips_by_employees.py
337
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from datetime import datetime from dateutil import relativedelta from openerp.osv import fields, osv from openerp.tools.translate import _ class hr_payslip_employees(osv.osv_memory): _name ='hr.payslip.employees' _description = 'Generate payslips for all selected employees' _columns = { 'employee_ids': fields.many2many('hr.employee', 'hr_employee_group_rel', 'payslip_id', 'employee_id', 'Employees'), } def compute_sheet(self, cr, uid, ids, context=None): emp_pool = self.pool.get('hr.employee') slip_pool = self.pool.get('hr.payslip') run_pool = self.pool.get('hr.payslip.run') slip_ids = [] if context is None: context = {} data = self.read(cr, uid, ids, context=context)[0] run_data = {} if context and context.get('active_id', False): run_data = run_pool.read(cr, uid, [context['active_id']], ['date_start', 'date_end', 'credit_note'])[0] from_date = run_data.get('date_start', False) to_date = run_data.get('date_end', False) credit_note = run_data.get('credit_note', False) if not data['employee_ids']: raise osv.except_osv(_("Warning!"), _("You must select employee(s) to generate payslip(s).")) for emp in emp_pool.browse(cr, uid, data['employee_ids'], context=context): slip_data = slip_pool.onchange_employee_id(cr, uid, [], from_date, to_date, emp.id, contract_id=False, context=context) res = { 'employee_id': emp.id, 'name': slip_data['value'].get('name', False), 'struct_id': slip_data['value'].get('struct_id', False), 'contract_id': slip_data['value'].get('contract_id', False), 'payslip_run_id': context.get('active_id', False), 'input_line_ids': [(0, 0, x) for x in slip_data['value'].get('input_line_ids', False)], 'worked_days_line_ids': [(0, 0, x) for x in slip_data['value'].get('worked_days_line_ids', False)], 'date_from': from_date, 'date_to': to_date, 'credit_note': credit_note, } slip_ids.append(slip_pool.create(cr, uid, res, context=context)) slip_pool.compute_sheet(cr, uid, slip_ids, context=context) return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
ksophocleous/grpc
refs/heads/master
src/python/grpcio_test/grpc_test/framework/face/__init__.py
1496
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
wandss/ExportingTool
refs/heads/master
fnetET/extractiontool/migrations/0002_cmisserver_date_creation.py
1
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('extractiontool', '0001_initial'), ] operations = [ migrations.AddField( model_name='cmisserver', name='date_creation', field=models.DateField(default=django.utils.timezone.now), ), ]
MontpellierRessourcesImagerie/openmicroscopy
refs/heads/develop
components/tools/OmeroPy/test/integration/clitest/test_user.py
3
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2013-2014 University of Dundee & Open Microscopy Environment. # All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from omero.cli import NonZeroReturnCode from omero.rtypes import rstring from omero.plugins.user import UserControl from test.integration.clitest.cli import CLITest, RootCLITest from test.integration.clitest.cli import get_user_ids, get_group_ids from test.integration.clitest.cli import UserIdNameFixtures from test.integration.clitest.cli import GroupFixtures from test.integration.clitest.cli import UserFixtures from Glacier2 import PermissionDeniedException import getpass import pytest GroupNames = [str(x) for x in GroupFixtures] UserNames = [str(x) for x in UserFixtures] UserIdNameNames = [str(x) for x in UserIdNameFixtures] sort_keys = [None, "id", "login", "first-name", "last-name", "email"] middlename_prefixes = [None, '-m', '--middlename'] email_prefixes = [None, '-e', '--email'] institution_prefixes = [None, '-i', '--institution'] admin_prefixes = [None, '-a', '--admin'] password_prefixes = [None, '-P', '--userpassword'] class TestUser(CLITest): @classmethod def setup_class(self): super(TestUser, self).setup_class() self.cli.register("user", UserControl, "TEST") self.users = self.sf.getAdminService().lookupExperimenters() def setup_method(self, method): super(TestUser, self).setup_method(method) self.args += ["user"] # List subcommand # ======================================================================== @pytest.mark.parametrize("sort_key", sort_keys) @pytest.mark.parametrize("group_format", [None, "count", "long"]) def testList(self, capsys, sort_key, group_format): self.args += ["list"] if sort_key: self.args += ["--sort-by-%s" % sort_key] if group_format: self.args += ["--%s" % group_format] self.cli.invoke(self.args, strict=True) # Read from the stdout out, err = capsys.readouterr() ids = get_user_ids(out, sort_key=sort_key) # Check all users are listed if sort_key == 'login': sorted_list = sorted(self.users, key=lambda x: x.omeName.val) elif sort_key == 'first-name': sorted_list = sorted(self.users, key=lambda x: x.firstName.val) elif sort_key == 'last-name': sorted_list = sorted(self.users, key=lambda x: x.lastName.val) elif sort_key == 'email': sorted_list = sorted(self.users, key=lambda x: ( x.email and x.email.val or "")) else: sorted_list = sorted(self.users, key=lambda x: x.id.val) assert ids == [user.id.val for user in sorted_list] @pytest.mark.parametrize("style", [None, "sql", "csv", "plain", "json"]) def testListWithStyles(self, capsys, style): self.args += ["list"] if style: self.args += ["--style=%s" % style] self.cli.invoke(self.args, strict=True) # Info subcomand # ======================================================================== def testInfoNoArgument(self, capsys): self.args += ["info"] self.cli.invoke(self.args, strict=True) # Read from the stdout out, err = capsys.readouterr() ids = get_user_ids(out) assert ids == [self.user.id.val] @pytest.mark.parametrize("userfixture", UserFixtures, ids=UserNames) def testInfoArgument(self, capsys, userfixture): self.args += ["info"] self.args += userfixture.get_arguments(self.user) self.cli.invoke(self.args, strict=True) # Read from the stdout out, err = capsys.readouterr() ids = get_user_ids(out) assert ids == [self.user.id.val] def testInfoInvalidUser(self, capsys): self.args += ["info"] self.args += ["-1"] with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) # Listgroups subcomand # ======================================================================== def testListGroupsNoArgument(self, capsys): self.args += ["listgroups"] self.cli.invoke(self.args, strict=True) out, err = capsys.readouterr() ids = get_group_ids(out) roles = self.sf.getAdminService().getSecurityRoles() assert ids == [roles.userGroupId, self.group.id.val] @pytest.mark.parametrize("userfixture", UserFixtures, ids=UserNames) def testListGroupsArgument(self, capsys, userfixture): self.args += ["listgroups"] self.args += userfixture.get_arguments(self.user) self.cli.invoke(self.args, strict=True) out, err = capsys.readouterr() ids = get_group_ids(out) roles = self.sf.getAdminService().getSecurityRoles() assert ids == [roles.userGroupId, self.group.id.val] def testListGroupsInvalidArgument(self, capsys): self.args += ["listgroups"] self.args += ["-1"] with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) # Email subcommand # ======================================================================== @pytest.mark.parametrize("oneperline_arg", [None, "-1", "--one"]) def testEmail(self, capsys, oneperline_arg): self.args += ["email", "-i"] if oneperline_arg: self.args += [oneperline_arg] self.cli.invoke(self.args, strict=True) # Read from the stdout out, err = capsys.readouterr() # Check all users are listed emails = [x.email.val for x in self.users if x.email and x.email.val] if oneperline_arg: assert out.strip() == "\n".join(emails) else: assert out.strip() == ", ".join(emails) # Password subcommand # ======================================================================== @pytest.mark.parametrize("is_unicode", [True, False]) def testPassword(self, is_unicode): self.args += ["password"] login = self.ctx.userName if is_unicode: password = "ążćę" else: password = self.uuid() self.setup_mock() self.mox.StubOutWithMock(getpass, 'getpass') i1 = 'Please enter password for your user (%s): ' % login i2 = 'Please enter password to be set: ' i3 = 'Please re-enter password to be set: ' getpass.getpass(i1).AndReturn(login) getpass.getpass(i2).AndReturn(password) getpass.getpass(i3).AndReturn(password) self.mox.ReplayAll() try: self.cli.invoke(self.args, strict=True) self.teardown_mock() # Check session creation using new password self.new_client(user=login, password=password) # Check session creation fails with a random password with pytest.raises(PermissionDeniedException): self.new_client(user=login, password=self.uuid) if is_unicode: # Check session creation fails with a combination of unicode # characters with pytest.raises(PermissionDeniedException): self.new_client(user=login, password="żąćę") # Check session creation fails with question marks with pytest.raises(PermissionDeniedException): self.new_client(user=login, password="????") finally: # Restore default password self.sf.getAdminService().changePasswordWithOldPassword( rstring(password), rstring(login)) def testAddAdminOnly(self, capsys): group = self.new_group() login = self.uuid() firstname = self.uuid() lastname = self.uuid() self.args += ["add", login, firstname, lastname] self.args += ["%s" % group.id.val] self.args += ["--userpassword", "%s" % self.uuid()] with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) out, err = capsys.readouterr() assert err.endswith("SecurityViolation: Admins only!\n") class TestUserRoot(RootCLITest): @classmethod def setup_class(self): super(TestUserRoot, self).setup_class() self.cli.register("user", UserControl, "TEST") self.users = self.sf.getAdminService().lookupExperimenters() def setup_method(self, method): super(TestUserRoot, self).setup_method(method) self.args += ["user"] def getuserids(self, gid): group = self.sf.getAdminService().getGroup(gid) return [x.child.id.val for x in group.copyGroupExperimenterMap()] def getmemberids(self, gid): group = self.sf.getAdminService().getGroup(gid) return [x.child.id.val for x in group.copyGroupExperimenterMap() if not x.owner.val] def getownerids(self, gid): group = self.sf.getAdminService().getGroup(gid) return [x.child.id.val for x in group.copyGroupExperimenterMap() if x.owner.val] # User joingroup subcommand # ======================================================================== @pytest.mark.parametrize( "idnamefixture", UserIdNameFixtures, ids=UserIdNameNames) @pytest.mark.parametrize("groupfixture", GroupFixtures, ids=GroupNames) @pytest.mark.parametrize("owner_arg", [None, '--as-owner']) def testJoinGroup(self, idnamefixture, groupfixture, owner_arg): user = self.new_user() group = self.new_group() assert user.id.val not in self.getuserids(group.id.val) self.args += ["joingroup"] self.args += idnamefixture.get_arguments(user) self.args += groupfixture.get_arguments(group) if owner_arg: self.args += [owner_arg] self.cli.invoke(self.args, strict=True) # Check user has been added to the list of member/owners if owner_arg: assert user.id.val in self.getownerids(group.id.val) else: assert user.id.val in self.getmemberids(group.id.val) # User leavegroup subcommand # ======================================================================== @pytest.mark.parametrize( "idnamefixture", UserIdNameFixtures, ids=UserIdNameNames) @pytest.mark.parametrize("groupfixture", GroupFixtures, ids=GroupNames) @pytest.mark.parametrize("is_owner", [True, False]) @pytest.mark.parametrize("owner_arg", [None, '--as-owner']) def testLeaveGroup(self, idnamefixture, groupfixture, is_owner, owner_arg): user = self.new_user() group = self.new_group([user]) if is_owner: self.root.sf.getAdminService().setGroupOwner(group, user) assert user.id.val in self.getownerids(group.id.val) else: assert user.id.val in self.getmemberids(group.id.val) self.args += ["leavegroup"] self.args += idnamefixture.get_arguments(user) self.args += groupfixture.get_arguments(group) if owner_arg: self.args += [owner_arg] self.cli.invoke(self.args, strict=True) # Check user has been added to the list of member/owners if owner_arg: assert user.id.val not in self.getownerids(group.id.val) else: assert user.id.val not in self.getuserids(group.id.val) # User add subcommand # ======================================================================== @pytest.mark.parametrize("middlename_prefix", middlename_prefixes) @pytest.mark.parametrize("email_prefix", email_prefixes) @pytest.mark.parametrize("institution_prefix", institution_prefixes) @pytest.mark.parametrize("admin_prefix", admin_prefixes) def testAdd(self, middlename_prefix, email_prefix, institution_prefix, admin_prefix): group = self.new_group() login = self.uuid() firstname = self.uuid() lastname = self.uuid() self.args += ["add", login, firstname, lastname] kwargs = { 'omeName': login, 'firstName': firstname, 'lastName': lastname} self.args += ["%s" % group.id.val] if middlename_prefix: middlename = self.uuid() self.args += [middlename_prefix, middlename] kwargs['middleName'] = middlename if email_prefix: email = "%s.%s@%s.org" % (firstname[:6], lastname[:6], self.uuid()[:6]) self.args += [email_prefix, email] kwargs['email'] = email if institution_prefix: institution = self.uuid() self.args += [institution_prefix, institution] kwargs['institution'] = institution if admin_prefix: self.args += [admin_prefix] self.args += ['-P', login] self.cli.invoke(self.args, strict=True) # Check user has been added to the list of member/owners user = self.sf.getAdminService().lookupExperimenter(login) for key, value in kwargs.iteritems(): assert getattr(user, key).val == kwargs[key] assert user.id.val in self.getuserids(group.id.val) if admin_prefix: roles = self.sf.getAdminService().getSecurityRoles() assert user.id.val in self.getuserids(roles.systemGroupId) @pytest.mark.parametrize("groupfixture", GroupFixtures, ids=GroupNames) def testAddGroup(self, groupfixture): group = self.new_group() login = self.uuid() firstname = self.uuid() lastname = self.uuid() self.args += ["add", login, firstname, lastname] self.args += groupfixture.get_arguments(group) self.args += ['-P', login] self.cli.invoke(self.args, strict=True) # Check user has been added to the list of member/owners user = self.sf.getAdminService().lookupExperimenter(login) assert user.omeName.val == login assert user.firstName.val == firstname assert user.lastName.val == lastname assert user.id.val in self.getuserids(group.id.val) @pytest.mark.parametrize("password_prefix", password_prefixes) @pytest.mark.parametrize("is_unicode", [True, False]) def testAddPassword(self, password_prefix, is_unicode): group = self.new_group() login = self.uuid() firstname = self.uuid() lastname = self.uuid() if is_unicode: password = "ążćę" else: password = self.uuid() self.args += ["add", login, firstname, lastname] self.args += ["%s" % group.id.val] if password_prefix: self.args += [password_prefix, "%s" % password] else: self.setup_mock() self.mox.StubOutWithMock(getpass, 'getpass') i1 = 'Please enter password for your new user (%s): ' % login i2 = 'Please re-enter password for your new user (%s): ' % login getpass.getpass(i1).AndReturn(password) getpass.getpass(i2).AndReturn(password) self.mox.ReplayAll() self.cli.invoke(self.args, strict=True) if not password_prefix: self.teardown_mock() # Check user has been added to the list of member/owners user = self.sf.getAdminService().lookupExperimenter(login) assert user.omeName.val == login assert user.firstName.val == firstname assert user.lastName.val == lastname assert user.id.val in self.getuserids(group.id.val) # Check session creation using password self.new_client(user=login, password=password) # Check session creation fails with a random password with pytest.raises(PermissionDeniedException): self.new_client(user=login, password=self.uuid) def testAddNoPassword(self): group = self.new_group() login = self.uuid() firstname = self.uuid() lastname = self.uuid() self.args += ["add", login, firstname, lastname] self.args += ["%s" % group.id.val] self.args += ["--no-password"] # Assumes the server has the default configuration, i.e. # password_required=true with pytest.raises(NonZeroReturnCode): self.cli.invoke(self.args, strict=True) # Password subcommand # ======================================================================== @pytest.mark.parametrize("is_unicode", [True, False]) def testPassword(self, is_unicode): user = self.new_user() login = user.omeName.val self.args += ["password", "%s" % login] if is_unicode: password = "ążćę" else: password = self.uuid() self.setup_mock() self.mox.StubOutWithMock(getpass, 'getpass') i1 = 'Please enter password for your user (root): ' i2 = 'Please enter password to be set: ' i3 = 'Please re-enter password to be set: ' getpass.getpass(i1).AndReturn(self.root.getProperty("omero.rootpass")) getpass.getpass(i2).AndReturn(password) getpass.getpass(i3).AndReturn(password) self.mox.ReplayAll() self.cli.invoke(self.args, strict=True) self.teardown_mock() # Check session creation using new password self.new_client(user=login, password=password) # Check session creation fails with a random password with pytest.raises(PermissionDeniedException): self.new_client(user=login, password=self.uuid) if is_unicode: # Check session creation fails with a combination of unicode # characters with pytest.raises(PermissionDeniedException): self.new_client(user=login, password="żąćę") # Check session creation fails with question marks with pytest.raises(PermissionDeniedException): self.new_client(user=login, password="????")
quietcoolwu/leetcode-python
refs/heads/master
letter_combinations_of_a_phone_number/solution.py
7
class Solution: # @return a list of strings, [s1, s2] def letterCombinations(self, digits): d = { '2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz', } return self.combinations(digits, 0, d) def combinations(self, digits, i, d): if i == len(digits): return [''] else: res = [] rest_combs = self.combinations(digits, i + 1, d) for comb in rest_combs: number = digits[i] letters = d[number] for letter in letters: res.append(letter + comb) return res
zaycev/pcfg_parser
refs/heads/master
unknown.py
1
#!/usr/bin/env python import sys, fileinput import collections import tree count = collections.defaultdict(int) trees = [] for line in fileinput.input(): t = tree.Tree.from_str(line) for leaf in t.leaves(): count[leaf.label] += 1 trees.append(t) for t in trees: for leaf in t.leaves(): if count[leaf.label] < 2: leaf.label = "<unk>" sys.stdout.write("{0}\n".format(t))
igemsoftware/SYSU-Software2013
refs/heads/master
project/Python27/Tools/scripts/hotshotmain.py
100
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- """ Run a Python script under hotshot's control. Adapted from a posting on python-dev by Walter Dörwald usage %prog [ %prog args ] filename [ filename args ] Any arguments after the filename are used as sys.argv for the filename. """ import sys import optparse import os import hotshot import hotshot.stats PROFILE = "hotshot.prof" def run_hotshot(filename, profile, args): prof = hotshot.Profile(profile) sys.path.insert(0, os.path.dirname(filename)) sys.argv = [filename] + args prof.run("execfile(%r)" % filename) prof.close() stats = hotshot.stats.load(profile) stats.sort_stats("time", "calls") # print_stats uses unadorned print statements, so the only way # to force output to stderr is to reassign sys.stdout temporarily save_stdout = sys.stdout sys.stdout = sys.stderr stats.print_stats() sys.stdout = save_stdout return 0 def main(args): parser = optparse.OptionParser(__doc__) parser.disable_interspersed_args() parser.add_option("-p", "--profile", action="store", default=PROFILE, dest="profile", help='Specify profile file to use') (options, args) = parser.parse_args(args) if len(args) == 0: parser.print_help("missing script to execute") return 1 filename = args[0] return run_hotshot(filename, options.profile, args[1:]) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
akshatharaj/django
refs/heads/master
tests/model_formsets_regress/models.py
281
from django.db import models from django.utils.encoding import python_2_unicode_compatible class User(models.Model): username = models.CharField(max_length=12, unique=True) serial = models.IntegerField() class UserSite(models.Model): user = models.ForeignKey(User, models.CASCADE, to_field="username") data = models.IntegerField() class UserProfile(models.Model): user = models.ForeignKey(User, models.CASCADE, unique=True, to_field="username") about = models.TextField() class ProfileNetwork(models.Model): profile = models.ForeignKey(UserProfile, models.CASCADE, to_field="user") network = models.IntegerField() identifier = models.IntegerField() class Place(models.Model): name = models.CharField(max_length=50) class Restaurant(Place): pass class Manager(models.Model): restaurant = models.ForeignKey(Restaurant, models.CASCADE) name = models.CharField(max_length=50) class Network(models.Model): name = models.CharField(max_length=15) @python_2_unicode_compatible class Host(models.Model): network = models.ForeignKey(Network, models.CASCADE) hostname = models.CharField(max_length=25) def __str__(self): return self.hostname
jvanbrug/alanaldavista
refs/heads/master
boto/sdb/connection.py
3
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import xml.sax import threading import boto from boto import handler from boto.connection import AWSQueryConnection from boto.sdb.domain import Domain, DomainMetaData from boto.sdb.item import Item from boto.sdb.regioninfo import SDBRegionInfo from boto.exception import SDBResponseError class ItemThread(threading.Thread): """ A threaded :class:`Item <boto.sdb.item.Item>` retriever utility class. Retrieved :class:`Item <boto.sdb.item.Item>` objects are stored in the ``items`` instance variable after :py:meth:`run() <run>` is called. .. tip:: The item retrieval will not start until the :func:`run() <boto.sdb.connection.ItemThread.run>` method is called. """ def __init__(self, name, domain_name, item_names): """ :param str name: A thread name. Used for identification. :param str domain_name: The name of a SimpleDB :class:`Domain <boto.sdb.domain.Domain>` :type item_names: string or list of strings :param item_names: The name(s) of the items to retrieve from the specified :class:`Domain <boto.sdb.domain.Domain>`. :ivar list items: A list of items retrieved. Starts as empty list. """ super(ItemThread, self).__init__(name=name) #print 'starting %s with %d items' % (name, len(item_names)) self.domain_name = domain_name self.conn = SDBConnection() self.item_names = item_names self.items = [] def run(self): """ Start the threaded retrieval of items. Populates the ``items`` list with :class:`Item <boto.sdb.item.Item>` objects. """ for item_name in self.item_names: item = self.conn.get_attributes(self.domain_name, item_name) self.items.append(item) #boto.set_stream_logger('sdb') class SDBConnection(AWSQueryConnection): """ This class serves as a gateway to your SimpleDB region (defaults to us-east-1). Methods within allow access to SimpleDB :class:`Domain <boto.sdb.domain.Domain>` objects and their associated :class:`Item <boto.sdb.item.Item>` objects. .. tip:: While you may instantiate this class directly, it may be easier to go through :py:func:`boto.connect_sdb`. """ DefaultRegionName = 'us-east-1' DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com' APIVersion = '2009-04-15' ResponseError = SDBResponseError def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', converter=None, security_token=None, validate_certs=True): """ For any keywords that aren't documented, refer to the parent class, :py:class:`boto.connection.AWSAuthConnection`. You can avoid having to worry about these keyword arguments by instantiating these objects via :py:func:`boto.connect_sdb`. :type region: :class:`boto.sdb.regioninfo.SDBRegionInfo` :keyword region: Explicitly specify a region. Defaults to ``us-east-1`` if not specified. You may also specify the region in your ``boto.cfg``: .. code-block:: cfg [SDB] region = eu-west-1 """ if not region: region_name = boto.config.get('SDB', 'region', self.DefaultRegionName) for reg in boto.sdb.regions(): if reg.name == region_name: region = reg break self.region = region super(SDBConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token=security_token, validate_certs=validate_certs) self.box_usage = 0.0 self.converter = converter self.item_cls = Item def _required_auth_capability(self): return ['sdb'] def set_item_cls(self, cls): """ While the default item class is :py:class:`boto.sdb.item.Item`, this default may be overridden. Use this method to change a connection's item class. :param object cls: The new class to set as this connection's item class. See the default item class for inspiration as to what your replacement should/could look like. """ self.item_cls = cls def _build_name_value_list(self, params, attributes, replace=False, label='Attribute'): keys = sorted(attributes.keys()) i = 1 for key in keys: value = attributes[key] if isinstance(value, list): for v in value: params['%s.%d.Name' % (label, i)] = key if self.converter: v = self.converter.encode(v) params['%s.%d.Value' % (label, i)] = v if replace: params['%s.%d.Replace' % (label, i)] = 'true' i += 1 else: params['%s.%d.Name' % (label, i)] = key if self.converter: value = self.converter.encode(value) params['%s.%d.Value' % (label, i)] = value if replace: params['%s.%d.Replace' % (label, i)] = 'true' i += 1 def _build_expected_value(self, params, expected_value): params['Expected.1.Name'] = expected_value[0] if expected_value[1] is True: params['Expected.1.Exists'] = 'true' elif expected_value[1] is False: params['Expected.1.Exists'] = 'false' else: params['Expected.1.Value'] = expected_value[1] def _build_batch_list(self, params, items, replace=False): item_names = items.keys() i = 0 for item_name in item_names: params['Item.%d.ItemName' % i] = item_name j = 0 item = items[item_name] if item is not None: attr_names = item.keys() for attr_name in attr_names: value = item[attr_name] if isinstance(value, list): for v in value: if self.converter: v = self.converter.encode(v) params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name params['Item.%d.Attribute.%d.Value' % (i, j)] = v if replace: params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' j += 1 else: params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name if self.converter: value = self.converter.encode(value) params['Item.%d.Attribute.%d.Value' % (i, j)] = value if replace: params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' j += 1 i += 1 def _build_name_list(self, params, attribute_names): i = 1 attribute_names.sort() for name in attribute_names: params['Attribute.%d.Name' % i] = name i += 1 def get_usage(self): """ Returns the BoxUsage (in USD) accumulated on this specific SDBConnection instance. .. tip:: This can be out of date, and should only be treated as a rough estimate. Also note that this estimate only applies to the requests made on this specific connection instance. It is by no means an account-wide estimate. :rtype: float :return: The accumulated BoxUsage of all requests made on the connection. """ return self.box_usage def print_usage(self): """ Print the BoxUsage and approximate costs of all requests made on this specific SDBConnection instance. .. tip:: This can be out of date, and should only be treated as a rough estimate. Also note that this estimate only applies to the requests made on this specific connection instance. It is by no means an account-wide estimate. """ print 'Total Usage: %f compute seconds' % self.box_usage cost = self.box_usage * 0.14 print 'Approximate Cost: $%f' % cost def get_domain(self, domain_name, validate=True): """ Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name matches ``domain_name``. :param str domain_name: The name of the domain to retrieve :keyword bool validate: When ``True``, check to see if the domain actually exists. If ``False``, blindly return a :py:class:`Domain <boto.sdb.domain.Domain>` object with the specified name set. :raises: :py:class:`boto.exception.SDBResponseError` if ``validate`` is ``True`` and no match could be found. :rtype: :py:class:`boto.sdb.domain.Domain` :return: The requested domain """ domain = Domain(self, domain_name) if validate: self.select(domain, """select * from `%s` limit 1""" % domain_name) return domain def lookup(self, domain_name, validate=True): """ Lookup an existing SimpleDB domain. This differs from :py:meth:`get_domain` in that ``None`` is returned if ``validate`` is ``True`` and no match was found (instead of raising an exception). :param str domain_name: The name of the domain to retrieve :param bool validate: If ``True``, a ``None`` value will be returned if the specified domain can't be found. If ``False``, a :py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly returned, regardless of whether it actually exists. :rtype: :class:`boto.sdb.domain.Domain` object or ``None`` :return: The Domain object or ``None`` if the domain does not exist. """ try: domain = self.get_domain(domain_name, validate) except: domain = None return domain def get_all_domains(self, max_domains=None, next_token=None): """ Returns a :py:class:`boto.resultset.ResultSet` containing all :py:class:`boto.sdb.domain.Domain` objects associated with this connection's Access Key ID. :keyword int max_domains: Limit the returned :py:class:`ResultSet <boto.resultset.ResultSet>` to the specified number of members. :keyword str next_token: A token string that was returned in an earlier call to this method as the ``next_token`` attribute on the returned :py:class:`ResultSet <boto.resultset.ResultSet>` object. This attribute is set if there are more than Domains than the value specified in the ``max_domains`` keyword. Pass the ``next_token`` value from you earlier query in this keyword to get the next 'page' of domains. """ params = {} if max_domains: params['MaxNumberOfDomains'] = max_domains if next_token: params['NextToken'] = next_token return self.get_list('ListDomains', params, [('DomainName', Domain)]) def create_domain(self, domain_name): """ Create a SimpleDB domain. :type domain_name: string :param domain_name: The name of the new domain :rtype: :class:`boto.sdb.domain.Domain` object :return: The newly created domain """ params = {'DomainName':domain_name} d = self.get_object('CreateDomain', params, Domain) d.name = domain_name return d def get_domain_and_name(self, domain_or_name): """ Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a ``tuple`` with the following members (in order): * In instance of :class:`boto.sdb.domain.Domain` for the requested domain * The domain's name as a ``str`` :type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain` :param domain_or_name: The domain or domain name to get the domain and name for. :raises: :class:`boto.exception.SDBResponseError` when an invalid domain name is specified. :rtype: tuple :return: A ``tuple`` with contents outlined as per above. """ if (isinstance(domain_or_name, Domain)): return (domain_or_name, domain_or_name.name) else: return (self.get_domain(domain_or_name), domain_or_name) def delete_domain(self, domain_or_name): """ Delete a SimpleDB domain. .. caution:: This will delete the domain and all items within the domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName':domain_name} return self.get_status('DeleteDomain', params) def domain_metadata(self, domain_or_name): """ Get the Metadata for a SimpleDB domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :rtype: :class:`boto.sdb.domain.DomainMetaData` object :return: The newly created domain metadata object """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName':domain_name} d = self.get_object('DomainMetadata', params, DomainMetaData) d.domain = domain return d def put_attributes(self, domain_or_name, item_name, attributes, replace=True, expected_value=None): """ Store attributes for a given item in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type item_name: string :param item_name: The name of the item whose attributes are being stored. :type attribute_names: dict or dict-like object :param attribute_names: The name/value pairs to store as attributes :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName' : domain_name, 'ItemName' : item_name} self._build_name_value_list(params, attributes, replace) if expected_value: self._build_expected_value(params, expected_value) return self.get_status('PutAttributes', params) def batch_put_attributes(self, domain_or_name, items, replace=True): """ Store attributes for multiple items in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are themselves dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName' : domain_name} self._build_batch_list(params, items, replace) return self.get_status('BatchPutAttributes', params, verb='POST') def get_attributes(self, domain_or_name, item_name, attribute_names=None, consistent_read=False, item=None): """ Retrieve attributes for a given item in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type item_name: string :param item_name: The name of the item whose attributes are being retrieved. :type attribute_names: string or list of strings :param attribute_names: An attribute name or list of attribute names. This parameter is optional. If not supplied, all attributes will be retrieved for the item. :type consistent_read: bool :param consistent_read: When set to true, ensures that the most recent data is returned. :type item: :class:`boto.sdb.item.Item` :keyword item: Instead of instantiating a new Item object, you may specify one to update. :rtype: :class:`boto.sdb.item.Item` :return: An Item with the requested attribute name/values set on it """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName' : domain_name, 'ItemName' : item_name} if consistent_read: params['ConsistentRead'] = 'true' if attribute_names: if not isinstance(attribute_names, list): attribute_names = [attribute_names] self.build_list_params(params, attribute_names, 'AttributeName') response = self.make_request('GetAttributes', params) body = response.read() if response.status == 200: if item is None: item = self.item_cls(domain, item_name) h = handler.XmlHandler(item, self) xml.sax.parseString(body, h) return item else: raise SDBResponseError(response.status, response.reason, body) def delete_attributes(self, domain_or_name, item_name, attr_names=None, expected_value=None): """ Delete attributes from a given item in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type item_name: string :param item_name: The name of the item whose attributes are being deleted. :type attributes: dict, list or :class:`boto.sdb.item.Item` :param attributes: Either a list containing attribute names which will cause all values associated with that attribute name to be deleted or a dict or Item containing the attribute names and keys and list of values to delete as the value. If no value is supplied, all attribute name/values for the item will be deleted. :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName':domain_name, 'ItemName' : item_name} if attr_names: if isinstance(attr_names, list): self._build_name_list(params, attr_names) elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls): self._build_name_value_list(params, attr_names) if expected_value: self._build_expected_value(params, expected_value) return self.get_status('DeleteAttributes', params) def batch_delete_attributes(self, domain_or_name, items): """ Delete multiple items in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are either: * dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. The attribute name/value pairs will only be deleted if they match the name/value pairs passed in. * None which means that all attributes associated with the item should be deleted. :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName' : domain_name} self._build_batch_list(params, items, False) return self.get_status('BatchDeleteAttributes', params, verb='POST') def select(self, domain_or_name, query='', next_token=None, consistent_read=False): """ Returns a set of Attributes for item names within domain_name that match the query. The query must be expressed in using the SELECT style syntax rather than the original SimpleDB query language. Even though the select request does not require a domain object, a domain object must be passed into this method so the Item objects returned can point to the appropriate domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object :param domain_or_name: Either the name of a domain or a Domain object :type query: string :param query: The SimpleDB query to be performed. :type consistent_read: bool :param consistent_read: When set to true, ensures that the most recent data is returned. :rtype: ResultSet :return: An iterator containing the results. """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'SelectExpression' : query} if consistent_read: params['ConsistentRead'] = 'true' if next_token: params['NextToken'] = next_token try: return self.get_list('Select', params, [('Item', self.item_cls)], parent=domain) except SDBResponseError, e: e.body = "Query: %s\n%s" % (query, e.body) raise e
jan-rybizki/Chempy
refs/heads/master
Chempy/input/yields/West17/time2human.py
1
#! /bin/env python3 # TODO - check consistent use of digits for div_lim # TODO - check use of div_lim for rounding # TODO - check use of div_lim, dec_lim, rounding for unit_upgrade import datetime, sys, math try: import physconst except ImportError: SEC = 31556926 else: SEC = physconst.SEC _Units = ('','k','M','G','T','P','E','Z','Y') _units = ('','m','u','n','p','f','a') _times = {'s' : 1, 'min' : 60, 'h' : 3600, 'd' : 86400, 'yr' : SEC} def _div_lim(x, digits = 0): return x *(1 - 2.e-15) - 0.5 * 10**(-digits) def time2human(time, digits = 2, cut = True, extended = False, strip = True, unit = None, unit_upgrade = False, rounding = False, comma = False, numeric_int = False, dec_lim = None, ): # dec_lim = 10 results in integer output """Convert time in seconds in readable format. Specify number of total *digits* and whether to *cut* trailing zeros. If "extended" is set to True, also the numeric value, the unit string, and the scale factor are returned, and the nucmecti value can be returned as integer (numeric_int). A minimum decimal limit (dec_lim) can be set. With default setting, if this is set to 10, no decimals are produced. The output *unit* can be enforced as well as *rounding* to the specified number of digits and adding of *comma*s. """ if isinstance(time, datetime.timedelta): time = time.total_seconds() atime = abs(time) xtime = atime su = 's' length = digits + 1 div_lim1000 = _div_lim(1000) div_lim100 = _div_lim(100) div_lim60 = _div_lim(60) div_lim1 = _div_lim(1, 3) decimals = 0 if dec_lim is None: div_lims = [ div_lim1, div_lim100, div_lim60, div_lim100, div_lim1000] div_lim = 1 else: div_lims = [ _div_lim(dec_lim, 3), _div_lim(dec_lim * 60), _div_lim(dec_lim * 60), _div_lim(dec_lim * 100), _div_lim(dec_lim * 1000), ] div_lim = dec_lim if unit is not None: su = unit xtime /= unit2scale(su) while True: if (su[0] in _units[1:]) and (unit_upgrade) and (su != 'min') and (xtime > div_lim1000): i = _units.index(su[0]) su = _units[i-1] + su[1:] xtime /= 1000 decimals += 3 else: break if su in _times and (unit_upgrade) and (xtime > div_lim1000): su = _Units[1] + su xtime /= 1000 decimals += 3 while True: if (su[0] in _Units[1:]) and (unit_upgrade) and (xtime > div_lim1000): i = _Units.index(su[0]) su = _Units[i+1] + su[1:] xtime /= 1000 decimals += 3 else: break elif atime >= div_lim: # big numbers if atime > div_lims[1]: xtime /= 60 su = 'min' if atime > 60 * div_lims[2]: xtime /= 60 su = 'h' if atime > 24 * 60 * div_lims[3]: xtime /= 24 su = 'd' if atime > SEC: xtime = atime / SEC su='yr' i = 0 while xtime > div_lims[4]: xtime /= 1000 i += 1 if i >= len(_Units): return('***') su = _Units[i] + su elif atime > 0: # small numbers ... i = 0 while xtime < div_lims[0]: xtime *= 1000 i += 1 if i >= len(_units): return('***') su = _units[i] + su sv = "{:20.15f}".format(xtime).strip() i = sv.find('.') l = max(digits + 1, i + 1) + decimals format = "{:" + "{:d}".format(l).strip() + "." + "{:d}".format(l-i-1).strip() + "f}" if xtime > 0 and rounding: xtime = round(xtime, digits - 1 - math.floor(math.log10(xtime))) sv = format.format(xtime).strip() if cut: if sv.find('.') > 0: sv = sv.rstrip('0') sv = sv.rstrip('.') if comma: l = len(sv) j = sv.find('.') if j == -1: j = l for i in range(j-3, 0, -3): sv = sv[:i] + ',' + sv[i:] if time < 0: sv = '-' + sv s = sv + ' ' + su if strip: s = s.strip() if extended: if xtime == 0: scale = 1 else: scale = atime / xtime numeric = time / scale # if sv.find('.') == -1: # numeric = int(numeric) return s, numeric, su, scale return s def split_unit(s, num_val = False, num_unit = False): if s.count(' ') == 1: v,u = s.split() else: j = -1 for i,c in enumerate(s): if c in '1234567890.': j = i + 1 if j == -1: v = 1 u = s.strip() elif j == len(s): v = s.strip() u = 's' else: v = s[:j].strip() u = s[j:].strip() if num_val: try: v = int(v) except: v = float(v) iv = int(v) if iv == v: v = iv if num_unit: u = unit2scale(u) return v,u _unit_base = dict( s = 1, min = 60, h = 3600, d = 86400, yr = SEC, ) _unit2scale = dict() for b,v in _unit_base.items(): for i,u in enumerate(_units): _unit2scale[u + b] = v * 10**(-3 * i) for i,u in enumerate(_Units[1:]): _unit2scale[u + b] = v * 10**(3 *(i + 1)) def unit2scale(unit): scale = _unit2scale.get(unit, 0) if scale != 0: return scale def max_unit(units): return sorted(units, key = unit2scale)[-1] def human2time(s): s = s.replace(',', '') v,u = split_unit(s) try: return int(v) * unit2scale(u) except: return float(v) * unit2scale(u) if __name__ == '__main__': argv = sys.argv if len(argv) == 2: try: print(time2human(float(argv[1]))) except: print('***')
dfranco/shinken
refs/heads/master
test/test_initial_state.py
10
#!/usr/bin/env python # Copyright (C) 2009-2014: # Gabes Jean, naparuba@gmail.com # Gerhard Lausser, Gerhard.Lausser@consol.de # # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. # # This file is used to test object properties overriding. # from shinken_test import unittest, ShinkenTest import re class TestInitialState(ShinkenTest): def setUp(self): self.setup_with_file('etc/shinken_initial_state.cfg') def test_initial_state(self): host0 = self.sched.hosts.find_by_name("test_host_0") host1 = self.sched.hosts.find_by_name("test_host_1") svc00 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_service_0") svc01 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_service_1") svc10 = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_service_0") svc11 = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_service_1") self.assertIsNotNone(host0) self.assertIsNotNone(host1) self.assertIsNotNone(svc00) self.assertIsNotNone(svc01) self.assertIsNotNone(svc10) self.assertIsNotNone(svc11) self.assertEqual(host0.state, "PENDING") self.assertEqual(host0.state_id, 0) self.assertEqual(host0.output, "") self.assertEqual(host1.state, "DOWN") self.assertEqual(host1.state_id, 1) self.assertEqual(host1.output, "No host result received") self.assertEqual(svc00.state, "PENDING") self.assertEqual(svc00.state_id, 0) self.assertEqual(svc00.output, "") self.assertEqual(svc01.state, "CRITICAL") self.assertEqual(svc01.state_id, 2) self.assertEqual(svc01.output, "No sevrvice result received") self.assertEqual(svc10.state, "PENDING") self.assertEqual(svc10.state_id, 0) self.assertEqual(svc10.output, "") self.assertEqual(svc11.state, "CRITICAL") self.assertEqual(svc11.state_id, 2) self.assertEqual(svc11.output, "No sevrvice result received") self.scheduler_loop(1, [ [host0, 0, 'UP test_host_0'], [host1, 0, 'UP test_host_1'], [svc00, 0, 'OK test_host_0/test_service_0'], [svc01, 0, 'OK test_host_0/test_service_1'], [svc10, 0, 'OK test_host_1/test_service_0'], [svc11, 0, 'OK test_host_1/test_service_1'], ], do_sleep=True) self.assertEqual(host0.state, "UP") self.assertEqual(host0.state_id, 0) self.assertEqual(host0.output, "UP test_host_0") self.assertEqual(host1.state, "UP") self.assertEqual(host1.state_id, 0) self.assertEqual(host1.output, "UP test_host_1") self.assertEqual(svc00.state, "OK") self.assertEqual(svc00.state_id, 0) self.assertEqual(svc00.output, "OK test_host_0/test_service_0") self.assertEqual(svc01.state, "OK") self.assertEqual(svc01.state_id, 0) self.assertEqual(svc01.output, "OK test_host_0/test_service_1") self.assertEqual(svc10.state, "OK") self.assertEqual(svc10.state_id, 0) self.assertEqual(svc10.output, "OK test_host_1/test_service_0") self.assertEqual(svc11.state, "OK") self.assertEqual(svc11.state_id, 0) self.assertEqual(svc11.output, "OK test_host_1/test_service_1") class TestInitialStateBadConf(ShinkenTest): def setUp(self): self.setup_with_file('etc/shinken_initial_state_bad.cfg') def test_bad_conf(self): self.assertFalse(self.conf.conf_is_correct) # Get the arbiter's log broks [b.prepare() for b in self.broks.values()] logs = [b.data['log'] for b in self.broks.values() if b.type == 'log'] self.assertEqual(1, len([log for log in logs if re.search('invalid initial_state: a, should be one of u, d', log)]) ) self.assertEqual(1, len([log for log in logs if re.search('invalid initial_state: a, should be one of c, u, w, o', log)]) ) if __name__ == '__main__': unittest.main()
matthewlent/ng-boilerplate-flask
refs/heads/master
venv/lib/python2.7/site-packages/werkzeug/_internal.py
146
# -*- coding: utf-8 -*- """ werkzeug._internal ~~~~~~~~~~~~~~~~~~ This module provides internally used helpers and constants. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import string import inspect from weakref import WeakKeyDictionary from datetime import datetime, date from itertools import chain from werkzeug._compat import iter_bytes, text_type, BytesIO, int_to_byte, \ range_type, to_native _logger = None _empty_stream = BytesIO() _signature_cache = WeakKeyDictionary() _epoch_ord = date(1970, 1, 1).toordinal() _cookie_params = set((b'expires', b'path', b'comment', b'max-age', b'secure', b'httponly', b'version')) _legal_cookie_chars = (string.ascii_letters + string.digits + u"!#$%&'*+-.^_`|~:").encode('ascii') _cookie_quoting_map = { b',' : b'\\054', b';' : b'\\073', b'"' : b'\\"', b'\\' : b'\\\\', } for _i in chain(range_type(32), range_type(127, 256)): _cookie_quoting_map[int_to_byte(_i)] = ('\\%03o' % _i).encode('latin1') _octal_re = re.compile(b'\\\\[0-3][0-7][0-7]') _quote_re = re.compile(b'[\\\\].') _legal_cookie_chars_re = b'[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]' _cookie_re = re.compile(b"""(?x) (?P<key>[^=]+) \s*=\s* (?P<val> "(?:[^\\\\"]|\\\\.)*" | (?:.*?) ) \s*; """) class _Missing(object): def __repr__(self): return 'no value' def __reduce__(self): return '_missing' _missing = _Missing() def _get_environ(obj): env = getattr(obj, 'environ', obj) assert isinstance(env, dict), \ '%r is not a WSGI environment (has to be a dict)' % type(obj).__name__ return env def _log(type, message, *args, **kwargs): """Log into the internal werkzeug logger.""" global _logger if _logger is None: import logging _logger = logging.getLogger('werkzeug') # Only set up a default log handler if the # end-user application didn't set anything up. if not logging.root.handlers and _logger.level == logging.NOTSET: _logger.setLevel(logging.INFO) handler = logging.StreamHandler() _logger.addHandler(handler) getattr(_logger, type)(message.rstrip(), *args, **kwargs) def _parse_signature(func): """Return a signature object for the function.""" if hasattr(func, 'im_func'): func = func.im_func # if we have a cached validator for this function, return it parse = _signature_cache.get(func) if parse is not None: return parse # inspect the function signature and collect all the information positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func) defaults = defaults or () arg_count = len(positional) arguments = [] for idx, name in enumerate(positional): if isinstance(name, list): raise TypeError('cannot parse functions that unpack tuples ' 'in the function signature') try: default = defaults[idx - arg_count] except IndexError: param = (name, False, None) else: param = (name, True, default) arguments.append(param) arguments = tuple(arguments) def parse(args, kwargs): new_args = [] missing = [] extra = {} # consume as many arguments as positional as possible for idx, (name, has_default, default) in enumerate(arguments): try: new_args.append(args[idx]) except IndexError: try: new_args.append(kwargs.pop(name)) except KeyError: if has_default: new_args.append(default) else: missing.append(name) else: if name in kwargs: extra[name] = kwargs.pop(name) # handle extra arguments extra_positional = args[arg_count:] if vararg_var is not None: new_args.extend(extra_positional) extra_positional = () if kwargs and not kwarg_var is not None: extra.update(kwargs) kwargs = {} return new_args, kwargs, missing, extra, extra_positional, \ arguments, vararg_var, kwarg_var _signature_cache[func] = parse return parse def _date_to_unix(arg): """Converts a timetuple, integer or datetime object into the seconds from epoch in utc. """ if isinstance(arg, datetime): arg = arg.utctimetuple() elif isinstance(arg, (int, long, float)): return int(arg) year, month, day, hour, minute, second = arg[:6] days = date(year, month, 1).toordinal() - _epoch_ord + day - 1 hours = days * 24 + hour minutes = hours * 60 + minute seconds = minutes * 60 + second return seconds class _DictAccessorProperty(object): """Baseclass for `environ_property` and `header_property`.""" read_only = False def __init__(self, name, default=None, load_func=None, dump_func=None, read_only=None, doc=None): self.name = name self.default = default self.load_func = load_func self.dump_func = dump_func if read_only is not None: self.read_only = read_only self.__doc__ = doc def __get__(self, obj, type=None): if obj is None: return self storage = self.lookup(obj) if self.name not in storage: return self.default rv = storage[self.name] if self.load_func is not None: try: rv = self.load_func(rv) except (ValueError, TypeError): rv = self.default return rv def __set__(self, obj, value): if self.read_only: raise AttributeError('read only property') if self.dump_func is not None: value = self.dump_func(value) self.lookup(obj)[self.name] = value def __delete__(self, obj): if self.read_only: raise AttributeError('read only property') self.lookup(obj).pop(self.name, None) def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, self.name ) def _cookie_quote(b): buf = bytearray() all_legal = True _lookup = _cookie_quoting_map.get _push = buf.extend for char in iter_bytes(b): if char not in _legal_cookie_chars: all_legal = False char = _lookup(char, char) _push(char) if all_legal: return bytes(buf) return bytes(b'"' + buf + b'"') def _cookie_unquote(b): if len(b) < 2: return b if b[:1] != b'"' or b[-1:] != b'"': return b b = b[1:-1] i = 0 n = len(b) rv = bytearray() _push = rv.extend while 0 <= i < n: o_match = _octal_re.search(b, i) q_match = _quote_re.search(b, i) if not o_match and not q_match: rv.extend(b[i:]) break j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): _push(b[i:k]) _push(b[k + 1:k + 2]) i = k + 2 else: _push(b[i:j]) rv.append(int(b[j + 1:j + 4], 8)) i = j + 4 return bytes(rv) def _cookie_parse_impl(b): """Lowlevel cookie parsing facility that operates on bytes.""" i = 0 n = len(b) while i < n: match = _cookie_re.search(b + b';', i) if not match: break key = match.group('key').strip() value = match.group('val') i = match.end(0) # Ignore parameters. We have no interest in them. if key.lower() not in _cookie_params: yield _cookie_unquote(key), _cookie_unquote(value) def _encode_idna(domain): # If we're given bytes, make sure they fit into ASCII if not isinstance(domain, text_type): domain.decode('ascii') return domain # Otherwise check if it's already ascii, then return try: return domain.encode('ascii') except UnicodeError: pass # Otherwise encode each part separately parts = domain.split('.') for idx, part in enumerate(parts): parts[idx] = part.encode('idna') return b'.'.join(parts) def _decode_idna(domain): # If the input is a string try to encode it to ascii to # do the idna decoding. if that fails because of an # unicode error, then we already have a decoded idna domain if isinstance(domain, text_type): try: domain = domain.encode('ascii') except UnicodeError: return domain # Decode each part separately. If a part fails, try to # decode it with ascii and silently ignore errors. This makes # most sense because the idna codec does not have error handling parts = domain.split(b'.') for idx, part in enumerate(parts): try: parts[idx] = part.decode('idna') except UnicodeError: parts[idx] = part.decode('ascii', 'ignore') return '.'.join(parts) def _make_cookie_domain(domain): if domain is None: return None domain = _encode_idna(domain) if b':' in domain: domain = domain.split(b':', 1)[0] if b'.' in domain: return domain raise ValueError( 'Setting \'domain\' for a cookie on a server running localy (ex: ' 'localhost) is not supportted by complying browsers. You should ' 'have something like: \'127.0.0.1 localhost dev.localhost\' on ' 'your hosts file and then point your server to run on ' '\'dev.localhost\' and also set \'domain\' for \'dev.localhost\'' ) def _easteregg(app=None): """Like the name says. But who knows how it works?""" def bzzzzzzz(gyver): import base64 import zlib return zlib.decompress(base64.b64decode(gyver)).decode('ascii') gyver = u'\n'.join([x + (77 - len(x)) * u' ' for x in bzzzzzzz(b''' eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m 9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz 4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5 jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317 8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE 1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG 8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8 MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4 GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/ nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p 7f2zLkGNv8b191cD/3vs9Q833z8t''').splitlines()]) def easteregged(environ, start_response): def injecting_start_response(status, headers, exc_info=None): headers.append(('X-Powered-By', 'Werkzeug')) return start_response(status, headers, exc_info) if app is not None and environ.get('QUERY_STRING') != 'macgybarchakku': return app(environ, injecting_start_response) injecting_start_response('200 OK', [('Content-Type', 'text/html')]) return [(u''' <!DOCTYPE html> <html> <head> <title>About Werkzeug</title> <style type="text/css"> body { font: 15px Georgia, serif; text-align: center; } a { color: #333; text-decoration: none; } h1 { font-size: 30px; margin: 20px 0 10px 0; } p { margin: 0 0 30px 0; } pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; } </style> </head> <body> <h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1> <p>the Swiss Army knife of Python web development.</p> <pre>%s\n\n\n</pre> </body> </html>''' % gyver).encode('latin1')] return easteregged
BeyondTheClouds/nova
refs/heads/disco/mitaka
nova/tests/unit/objects/test_service.py
5
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import exception as ovo_exc from nova.compute import manager as compute_manager from nova import context from nova import db from nova import exception from nova import objects from nova.objects import aggregate from nova.objects import fields from nova.objects import service from nova import test from nova.tests.unit.objects import test_compute_node from nova.tests.unit.objects import test_objects NOW = timeutils.utcnow().replace(microsecond=0) def _fake_service(**kwargs): fake_service = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'host': 'fake-host', 'binary': 'nova-fake', 'topic': 'fake-service-topic', 'report_count': 1, 'forced_down': False, 'disabled': False, 'disabled_reason': None, 'last_seen_up': None, 'version': service.SERVICE_VERSION, } fake_service.update(kwargs) return fake_service fake_service = _fake_service() OPTIONAL = ['availability_zone', 'compute_node'] class _TestServiceObject(object): def supported_hv_specs_comparator(self, expected, obj_val): obj_val = [inst.to_list() for inst in obj_val] self.assertJsonEqual(expected, obj_val) def pci_device_pools_comparator(self, expected, obj_val): obj_val = obj_val.obj_to_primitive() self.assertJsonEqual(expected, obj_val) def comparators(self): return {'stats': self.assertJsonEqual, 'host_ip': self.assertJsonEqual, 'supported_hv_specs': self.supported_hv_specs_comparator, 'pci_device_pools': self.pci_device_pools_comparator} def subs(self): return {'supported_hv_specs': 'supported_instances', 'pci_device_pools': 'pci_stats'} def _test_query(self, db_method, obj_method, *args, **kwargs): self.mox.StubOutWithMock(db, db_method) db_exception = kwargs.pop('db_exception', None) if db_exception: getattr(db, db_method)(self.context, *args, **kwargs).AndRaise( db_exception) else: getattr(db, db_method)(self.context, *args, **kwargs).AndReturn( fake_service) self.mox.ReplayAll() obj = getattr(service.Service, obj_method)(self.context, *args, **kwargs) if db_exception: self.assertIsNone(obj) else: self.compare_obj(obj, fake_service, allow_missing=OPTIONAL) def test_get_by_id(self): self._test_query('service_get', 'get_by_id', 123) def test_get_by_host_and_topic(self): self._test_query('service_get_by_host_and_topic', 'get_by_host_and_topic', 'fake-host', 'fake-topic') def test_get_by_host_and_binary(self): self._test_query('service_get_by_host_and_binary', 'get_by_host_and_binary', 'fake-host', 'fake-binary') def test_get_by_host_and_binary_raises(self): self._test_query('service_get_by_host_and_binary', 'get_by_host_and_binary', 'fake-host', 'fake-binary', db_exception=exception.HostBinaryNotFound( host='fake-host', binary='fake-binary')) def test_get_by_compute_host(self): self._test_query('service_get_by_compute_host', 'get_by_compute_host', 'fake-host') def test_get_by_args(self): self._test_query('service_get_by_host_and_binary', 'get_by_args', 'fake-host', 'fake-binary') def test_create(self): self.mox.StubOutWithMock(db, 'service_create') db.service_create(self.context, {'host': 'fake-host', 'version': fake_service['version']} ).AndReturn(fake_service) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.host = 'fake-host' service_obj.create() self.assertEqual(fake_service['id'], service_obj.id) self.assertEqual(service.SERVICE_VERSION, service_obj.version) def test_recreate_fails(self): self.mox.StubOutWithMock(db, 'service_create') db.service_create(self.context, {'host': 'fake-host', 'version': fake_service['version']} ).AndReturn(fake_service) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.host = 'fake-host' service_obj.create() self.assertRaises(exception.ObjectActionError, service_obj.create) def test_save(self): self.mox.StubOutWithMock(db, 'service_update') db.service_update(self.context, 123, {'host': 'fake-host', 'version': fake_service['version']} ).AndReturn(fake_service) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.id = 123 service_obj.host = 'fake-host' service_obj.save() self.assertEqual(service.SERVICE_VERSION, service_obj.version) @mock.patch.object(db, 'service_create', return_value=fake_service) def test_set_id_failure(self, db_mock): service_obj = service.Service(context=self.context, binary='nova-compute') service_obj.create() self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr, service_obj, 'id', 124) def _test_destroy(self): self.mox.StubOutWithMock(db, 'service_destroy') db.service_destroy(self.context, 123) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.id = 123 service_obj.destroy() def test_destroy(self): # The test harness needs db.service_destroy to work, # so avoid leaving it broken here after we're done orig_service_destroy = db.service_destroy try: self._test_destroy() finally: db.service_destroy = orig_service_destroy def test_get_by_topic(self): self.mox.StubOutWithMock(db, 'service_get_all_by_topic') db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_by_topic(self.context, 'fake-topic') self.assertEqual(1, len(services)) self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL) @mock.patch('nova.db.service_get_all_by_binary') def test_get_by_binary(self, mock_get): mock_get.return_value = [fake_service] services = service.ServiceList.get_by_binary(self.context, 'fake-binary') self.assertEqual(1, len(services)) mock_get.assert_called_once_with(self.context, 'fake-binary', include_disabled=False) @mock.patch('nova.db.service_get_all_by_binary') def test_get_by_binary_disabled(self, mock_get): mock_get.return_value = [_fake_service(disabled=True)] services = service.ServiceList.get_by_binary(self.context, 'fake-binary', include_disabled=True) self.assertEqual(1, len(services)) mock_get.assert_called_once_with(self.context, 'fake-binary', include_disabled=True) @mock.patch('nova.db.service_get_all_by_binary') def test_get_by_binary_both(self, mock_get): mock_get.return_value = [_fake_service(), _fake_service(disabled=True)] services = service.ServiceList.get_by_binary(self.context, 'fake-binary', include_disabled=True) self.assertEqual(2, len(services)) mock_get.assert_called_once_with(self.context, 'fake-binary', include_disabled=True) def test_get_by_host(self): self.mox.StubOutWithMock(db, 'service_get_all_by_host') db.service_get_all_by_host(self.context, 'fake-host').AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_by_host(self.context, 'fake-host') self.assertEqual(1, len(services)) self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL) def test_get_all(self): self.mox.StubOutWithMock(db, 'service_get_all') db.service_get_all(self.context, disabled=False).AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_all(self.context, disabled=False) self.assertEqual(1, len(services)) self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL) def test_get_all_with_az(self): self.mox.StubOutWithMock(db, 'service_get_all') self.mox.StubOutWithMock(aggregate.AggregateList, 'get_by_metadata_key') db.service_get_all(self.context, disabled=None).AndReturn( [dict(fake_service, topic='compute')]) agg = aggregate.Aggregate(context=self.context) agg.name = 'foo' agg.metadata = {'availability_zone': 'test-az'} agg.create() agg.hosts = [fake_service['host']] aggregate.AggregateList.get_by_metadata_key(self.context, 'availability_zone', hosts=set(agg.hosts)).AndReturn([agg]) self.mox.ReplayAll() services = service.ServiceList.get_all(self.context, set_zones=True) self.assertEqual(1, len(services)) self.assertEqual('test-az', services[0].availability_zone) def test_compute_node(self): fake_compute_node = objects.ComputeNode._from_db_object( self.context, objects.ComputeNode(), test_compute_node.fake_compute_node) self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all_by_host') objects.ComputeNodeList.get_all_by_host( self.context, 'fake-host').AndReturn( [fake_compute_node]) self.mox.ReplayAll() service_obj = service.Service(id=123, host="fake-host", binary="nova-compute") service_obj._context = self.context self.assertEqual(service_obj.compute_node, fake_compute_node) # Make sure it doesn't re-fetch this service_obj.compute_node def test_load_when_orphaned(self): service_obj = service.Service() service_obj.id = 123 self.assertRaises(exception.OrphanedObjectError, getattr, service_obj, 'compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host') def test_obj_make_compatible_for_compute_node(self, get_all_by_host): service_obj = objects.Service(context=self.context) fake_service_dict = fake_service.copy() fake_compute_obj = objects.ComputeNode(host=fake_service['host'], service_id=fake_service['id']) get_all_by_host.return_value = [fake_compute_obj] versions = ovo_base.obj_tree_get_versions('Service') versions['ComputeNode'] = '1.10' service_obj.obj_make_compatible_from_manifest(fake_service_dict, '1.9', versions) self.assertEqual( fake_compute_obj.obj_to_primitive(target_version='1.10', version_manifest=versions), fake_service_dict['compute_node']) @mock.patch('nova.db.service_get_minimum_version') def test_get_minimum_version_none(self, mock_get): mock_get.return_value = None self.assertEqual(0, objects.Service.get_minimum_version(self.context, 'nova-compute')) mock_get.assert_called_once_with(self.context, 'nova-compute') @mock.patch('nova.db.service_get_minimum_version') def test_get_minimum_version(self, mock_get): mock_get.return_value = 123 self.assertEqual(123, objects.Service.get_minimum_version(self.context, 'nova-compute')) mock_get.assert_called_once_with(self.context, 'nova-compute') @mock.patch('nova.db.service_get_minimum_version') @mock.patch('nova.objects.service.LOG') def test_get_minimum_version_checks_binary(self, mock_log, mock_get): mock_get.return_value = None self.assertEqual(0, objects.Service.get_minimum_version(self.context, 'nova-compute')) self.assertFalse(mock_log.warning.called) self.assertRaises(exception.ObjectActionError, objects.Service.get_minimum_version, self.context, 'compute') self.assertTrue(mock_log.warning.called) @mock.patch('nova.db.service_get_minimum_version') def test_get_minimum_version_with_caching(self, mock_get): objects.Service.enable_min_version_cache() mock_get.return_value = 123 self.assertEqual(123, objects.Service.get_minimum_version(self.context, 'nova-compute')) self.assertEqual({"nova-compute": 123}, objects.Service._MIN_VERSION_CACHE) self.assertEqual(123, objects.Service.get_minimum_version(self.context, 'nova-compute')) mock_get.assert_called_once_with(self.context, 'nova-compute') objects.Service._SERVICE_VERSION_CACHING = False objects.Service.clear_min_version_cache() @mock.patch('nova.db.service_get_minimum_version', return_value=2) def test_create_above_minimum(self, mock_get): with mock.patch('nova.objects.service.SERVICE_VERSION', new=3): objects.Service(context=self.context, binary='nova-compute').create() @mock.patch('nova.db.service_get_minimum_version', return_value=2) def test_create_equal_to_minimum(self, mock_get): with mock.patch('nova.objects.service.SERVICE_VERSION', new=2): objects.Service(context=self.context, binary='nova-compute').create() @mock.patch('nova.db.service_get_minimum_version', return_value=2) def test_create_below_minimum(self, mock_get): with mock.patch('nova.objects.service.SERVICE_VERSION', new=1): self.assertRaises(exception.ServiceTooOld, objects.Service(context=self.context, binary='nova-compute', ).create) class TestServiceObject(test_objects._LocalTest, _TestServiceObject): pass class TestRemoteServiceObject(test_objects._RemoteTest, _TestServiceObject): pass class TestServiceVersion(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() super(TestServiceVersion, self).setUp() def _collect_things(self): data = { 'compute_rpc': compute_manager.ComputeManager.target.version, } return data def test_version(self): calculated = self._collect_things() self.assertEqual( len(service.SERVICE_VERSION_HISTORY), service.SERVICE_VERSION + 1, 'Service version %i has no history. Please update ' 'nova.objects.service.SERVICE_VERSION_HISTORY ' 'and add %s to it' % (service.SERVICE_VERSION, repr(calculated))) current = service.SERVICE_VERSION_HISTORY[service.SERVICE_VERSION] self.assertEqual( current, calculated, 'Changes detected that require a SERVICE_VERSION change. Please ' 'increment nova.objects.service.SERVICE_VERSION, and make sure it' 'is equal to nova.compute.manager.ComputeManager.target.version.') def test_version_in_init(self): self.assertRaises(exception.ObjectActionError, objects.Service, version=123) def test_version_set_on_init(self): self.assertEqual(service.SERVICE_VERSION, objects.Service().version) def test_version_loaded_from_db(self): fake_version = fake_service['version'] + 1 fake_different_service = dict(fake_service) fake_different_service['version'] = fake_version obj = objects.Service() obj._from_db_object(self.ctxt, obj, fake_different_service) self.assertEqual(fake_version, obj.version) def test_save_noop_with_only_version(self): o = objects.Service(context=self.ctxt, id=fake_service['id']) o.obj_reset_changes(['id']) self.assertEqual(set(['version']), o.obj_what_changed()) with mock.patch('nova.db.service_update') as mock_update: o.save() self.assertFalse(mock_update.called) o.host = 'foo' with mock.patch('nova.db.service_update') as mock_update: mock_update.return_value = fake_service o.save() mock_update.assert_called_once_with( self.ctxt, fake_service['id'], {'version': service.SERVICE_VERSION, 'host': 'foo'}) class TestServiceStatusNotification(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() super(TestServiceStatusNotification, self).setUp() @mock.patch('nova.objects.service.ServiceStatusNotification') def _verify_notification(self, service_obj, mock_notification): service_obj.save() self.assertTrue(mock_notification.called) event_type = mock_notification.call_args[1]['event_type'] priority = mock_notification.call_args[1]['priority'] publisher = mock_notification.call_args[1]['publisher'] payload = mock_notification.call_args[1]['payload'] self.assertEqual(service_obj.host, publisher.host) self.assertEqual(service_obj.binary, publisher.binary) self.assertEqual(fields.NotificationPriority.INFO, priority) self.assertEqual('service', event_type.object) self.assertEqual(fields.NotificationAction.UPDATE, event_type.action) for field in service.ServiceStatusPayload.SCHEMA: if field in fake_service: self.assertEqual(fake_service[field], getattr(payload, field)) mock_notification.return_value.emit.assert_called_once_with(self.ctxt) @mock.patch('nova.db.service_update') def test_service_update_with_notification(self, mock_db_service_update): service_obj = objects.Service(context=self.ctxt, id=fake_service['id']) mock_db_service_update.return_value = fake_service for key, value in {'disabled': True, 'disabled_reason': 'my reason', 'forced_down': True}.items(): setattr(service_obj, key, value) self._verify_notification(service_obj) @mock.patch('nova.objects.service.ServiceStatusNotification') @mock.patch('nova.db.service_update') def test_service_update_without_notification(self, mock_db_service_update, mock_notification): service_obj = objects.Service(context=self.ctxt, id=fake_service['id']) mock_db_service_update.return_value = fake_service for key, value in {'report_count': 13, 'last_seen_up': timeutils.utcnow()}.items(): setattr(service_obj, key, value) service_obj.save() self.assertFalse(mock_notification.called)
abhinavp13/IITBX-edx-platform-dev
refs/heads/master
common/djangoapps/student/management/commands/emaillist.py
5
from django.core.management.base import BaseCommand from django.contrib.auth.models import User import mitxmako.middleware as middleware middleware.MakoMiddleware() class Command(BaseCommand): help = \ ''' Extract an e-mail list of all active students. ''' def handle(self, *args, **options): #text = open(args[0]).read() #subject = open(args[1]).read() users = User.objects.all() for user in users: if user.is_active: print user.email
otherness-space/myProject003
refs/heads/master
my_project_003/lib/python2.7/site-packages/pymongo/collection.py
3
# Copyright 2009-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Collection level utilities for Mongo.""" import warnings from bson.code import Code from bson.objectid import ObjectId from bson.son import SON from pymongo import (bulk, common, helpers, message) from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor from pymongo.errors import InvalidName, OperationFailure from pymongo.helpers import _check_write_command_response from pymongo.message import _INSERT, _UPDATE, _DELETE from pymongo.read_preferences import ReadPreference try: from collections import OrderedDict ordered_types = (SON, OrderedDict) except ImportError: ordered_types = SON def _gen_index_name(keys): """Generate an index name from the set of fields it is over. """ return u"_".join([u"%s_%s" % item for item in keys]) class Collection(common.BaseObject): """A Mongo collection. """ def __init__(self, database, name, create=False, **kwargs): """Get / create a Mongo collection. Raises :class:`TypeError` if `name` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName` if `name` is not a valid collection name. Any additional keyword arguments will be used as options passed to the create command. See :meth:`~pymongo.database.Database.create_collection` for valid options. If `create` is ``True`` or additional keyword arguments are present a create command will be sent. Otherwise, a create command will not be sent and the collection will be created implicitly on first use. :Parameters: - `database`: the database to get a collection from - `name`: the name of the collection to get - `create` (optional): if ``True``, force collection creation even without options being set - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command .. versionchanged:: 2.2 Removed deprecated argument: options .. versionadded:: 2.1 uuid_subtype attribute .. versionchanged:: 1.5 deprecating `options` in favor of kwargs .. versionadded:: 1.5 the `create` parameter .. mongodoc:: collections """ super(Collection, self).__init__( slave_okay=database.slave_okay, read_preference=database.read_preference, tag_sets=database.tag_sets, secondary_acceptable_latency_ms=( database.secondary_acceptable_latency_ms), safe=database.safe, uuidrepresentation=database.uuid_subtype, **database.write_concern) if not isinstance(name, basestring): raise TypeError("name must be an instance " "of %s" % (basestring.__name__,)) if not name or ".." in name: raise InvalidName("collection names cannot be empty") if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): raise InvalidName("collection names must not " "contain '$': %r" % name) if name[0] == "." or name[-1] == ".": raise InvalidName("collection names must not start " "or end with '.': %r" % name) if "\x00" in name: raise InvalidName("collection names must not contain the " "null character") self.__database = database self.__name = unicode(name) self.__full_name = u"%s.%s" % (self.__database.name, self.__name) if create or kwargs: self.__create(kwargs) def __create(self, options): """Sends a create command with the given options. """ if options: if "size" in options: options["size"] = float(options["size"]) self.__database.command("create", self.__name, read_preference=ReadPreference.PRIMARY, **options) else: self.__database.command("create", self.__name, read_preference=ReadPreference.PRIMARY) def __getattr__(self, name): """Get a sub-collection of this collection by name. Raises InvalidName if an invalid collection name is used. :Parameters: - `name`: the name of the collection to get """ return Collection(self.__database, u"%s.%s" % (self.__name, name)) def __getitem__(self, name): return self.__getattr__(name) def __repr__(self): return "Collection(%r, %r)" % (self.__database, self.__name) def __eq__(self, other): if isinstance(other, Collection): us = (self.__database, self.__name) them = (other.__database, other.__name) return us == them return NotImplemented def __ne__(self, other): return not self == other @property def full_name(self): """The full name of this :class:`Collection`. The full name is of the form `database_name.collection_name`. .. versionchanged:: 1.3 ``full_name`` is now a property rather than a method. """ return self.__full_name @property def name(self): """The name of this :class:`Collection`. .. versionchanged:: 1.3 ``name`` is now a property rather than a method. """ return self.__name @property def database(self): """The :class:`~pymongo.database.Database` that this :class:`Collection` is a part of. .. versionchanged:: 1.3 ``database`` is now a property rather than a method. """ return self.__database def initialize_unordered_bulk_op(self): """Initialize an unordered batch of write operations. Operations will be performed on the server in arbitrary order, possibly in parallel. All operations will be attempted. Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. See :ref:`unordered_bulk` for examples. .. versionadded:: 2.7 """ return bulk.BulkOperationBuilder(self, ordered=False) def initialize_ordered_bulk_op(self): """Initialize an ordered batch of write operations. Operations will be performed on the server serially, in the order provided. If an error occurs all remaining operations are aborted. Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. See :ref:`ordered_bulk` for examples. .. versionadded:: 2.7 """ return bulk.BulkOperationBuilder(self, ordered=True) def save(self, to_save, manipulate=True, safe=None, check_keys=True, **kwargs): """Save a document in this collection. If `to_save` already has an ``"_id"`` then an :meth:`update` (upsert) operation is performed and any existing document with that ``"_id"`` is overwritten. Otherwise an :meth:`insert` operation is performed. In this case if `manipulate` is ``True`` an ``"_id"`` will be added to `to_save` and this method returns the ``"_id"`` of the saved document. If `manipulate` is ``False`` the ``"_id"`` will be added by the server but this method will return ``None``. Raises :class:`TypeError` if `to_save` is not an instance of :class:`dict`. Write concern options can be passed as keyword arguments, overriding any global defaults. Valid options include w=<int/string>, wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below for a detailed explanation of these options. By default an acknowledgment is requested from the server that the save was successful, raising :class:`~pymongo.errors.OperationFailure` if an error occurred. **Passing w=0 disables write acknowledgement and all other write concern options.** :Parameters: - `to_save`: the document to be saved - `manipulate` (optional): manipulate the document before saving it? - `safe` (optional): **DEPRECATED** - Use `w` instead. - `check_keys` (optional): check if keys start with '$' or contain '.', raising :class:`~pymongo.errors.InvalidName` in either case. - `w` (optional): (integer or string) If this is a replica set, write operations will block until they have been replicated to the specified number or tagged set of servers. `w=<int>` always includes the replica set primary (e.g. w=3 means write to the primary and wait until replicated to **two** secondaries). **Passing w=0 disables write acknowledgement and all other write concern options.** - `wtimeout` (optional): (integer) Used in conjunction with `w`. Specify a value in milliseconds to control how long to wait for write propagation to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - `j` (optional): If ``True`` block until write operations have been committed to the journal. Ignored if the server is running without journaling. - `fsync` (optional): If ``True`` force the database to fsync all files before returning. When used with `j` the server awaits the next group commit before returning. :Returns: - The ``'_id'`` value of `to_save` or ``[None]`` if `manipulate` is ``False`` and `to_save` has no '_id' field. .. versionadded:: 1.8 Support for passing `getLastError` options as keyword arguments. .. mongodoc:: insert """ if not isinstance(to_save, dict): raise TypeError("cannot save object of type %s" % type(to_save)) if "_id" not in to_save: return self.insert(to_save, manipulate, safe, check_keys, **kwargs) else: self.update({"_id": to_save["_id"]}, to_save, True, manipulate, safe, check_keys=check_keys, **kwargs) return to_save.get("_id", None) def insert(self, doc_or_docs, manipulate=True, safe=None, check_keys=True, continue_on_error=False, **kwargs): """Insert a document(s) into this collection. If `manipulate` is ``True``, the document(s) are manipulated using any :class:`~pymongo.son_manipulator.SONManipulator` instances that have been added to this :class:`~pymongo.database.Database`. In this case an ``"_id"`` will be added if the document(s) does not already contain one and the ``"id"`` (or list of ``"_id"`` values for more than one document) will be returned. If `manipulate` is ``False`` and the document(s) does not include an ``"_id"`` one will be added by the server. The server does not return the ``"_id"`` it created so ``None`` is returned. Write concern options can be passed as keyword arguments, overriding any global defaults. Valid options include w=<int/string>, wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below for a detailed explanation of these options. By default an acknowledgment is requested from the server that the insert was successful, raising :class:`~pymongo.errors.OperationFailure` if an error occurred. **Passing w=0 disables write acknowledgement and all other write concern options.** :Parameters: - `doc_or_docs`: a document or list of documents to be inserted - `manipulate` (optional): If ``True`` manipulate the documents before inserting. - `safe` (optional): **DEPRECATED** - Use `w` instead. - `check_keys` (optional): If ``True`` check if keys start with '$' or contain '.', raising :class:`~pymongo.errors.InvalidName` in either case. - `continue_on_error` (optional): If ``True``, the database will not stop processing a bulk insert if one fails (e.g. due to duplicate IDs). This makes bulk insert behave similarly to a series of single inserts, except lastError will be set if any insert fails, not just the last one. If multiple errors occur, only the most recent will be reported by :meth:`~pymongo.database.Database.error`. - `w` (optional): (integer or string) If this is a replica set, write operations will block until they have been replicated to the specified number or tagged set of servers. `w=<int>` always includes the replica set primary (e.g. w=3 means write to the primary and wait until replicated to **two** secondaries). **Passing w=0 disables write acknowledgement and all other write concern options.** - `wtimeout` (optional): (integer) Used in conjunction with `w`. Specify a value in milliseconds to control how long to wait for write propagation to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - `j` (optional): If ``True`` block until write operations have been committed to the journal. Ignored if the server is running without journaling. - `fsync` (optional): If ``True`` force the database to fsync all files before returning. When used with `j` the server awaits the next group commit before returning. :Returns: - The ``'_id'`` value (or list of '_id' values) of `doc_or_docs` or ``[None]`` if manipulate is ``False`` and the documents passed as `doc_or_docs` do not include an '_id' field. .. note:: `continue_on_error` requires server version **>= 1.9.1** .. versionadded:: 2.1 Support for continue_on_error. .. versionadded:: 1.8 Support for passing `getLastError` options as keyword arguments. .. versionchanged:: 1.1 Bulk insert works with an iterable sequence of documents. .. mongodoc:: insert """ client = self.database.connection # Batch inserts require us to know the connected primary's # max_bson_size, max_message_size, and max_write_batch_size. # We have to be connected to the primary to know that. client._ensure_connected(True) docs = doc_or_docs return_one = False if isinstance(docs, dict): return_one = True docs = [docs] ids = [] if manipulate: def gen(): db = self.__database for doc in docs: # Apply user-configured SON manipulators. This order of # operations is required for backwards compatibility, # see PYTHON-709. doc = db._apply_incoming_manipulators(doc, self) if '_id' not in doc: doc['_id'] = ObjectId() doc = db._apply_incoming_copying_manipulators(doc, self) ids.append(doc['_id']) yield doc else: def gen(): for doc in docs: ids.append(doc.get('_id')) yield doc safe, options = self._get_write_mode(safe, **kwargs) if client.max_wire_version > 1 and safe: # Insert command command = SON([('insert', self.name), ('ordered', not continue_on_error)]) if options: command['writeConcern'] = options results = message._do_batched_write_command( self.database.name + ".$cmd", _INSERT, command, gen(), check_keys, self.uuid_subtype, client) _check_write_command_response(results) else: # Legacy batched OP_INSERT message._do_batched_insert(self.__full_name, gen(), check_keys, safe, options, continue_on_error, self.uuid_subtype, client) if return_one: return ids[0] else: return ids def update(self, spec, document, upsert=False, manipulate=False, safe=None, multi=False, check_keys=True, **kwargs): """Update a document(s) in this collection. Raises :class:`TypeError` if either `spec` or `document` is not an instance of ``dict`` or `upsert` is not an instance of ``bool``. Write concern options can be passed as keyword arguments, overriding any global defaults. Valid options include w=<int/string>, wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below for a detailed explanation of these options. By default an acknowledgment is requested from the server that the update was successful, raising :class:`~pymongo.errors.OperationFailure` if an error occurred. **Passing w=0 disables write acknowledgement and all other write concern options.** There are many useful `update modifiers`_ which can be used when performing updates. For example, here we use the ``"$set"`` modifier to modify some fields in a matching document: .. doctest:: >>> db.test.insert({"x": "y", "a": "b"}) ObjectId('...') >>> list(db.test.find()) [{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}] >>> db.test.update({"x": "y"}, {"$set": {"a": "c"}}) {...} >>> list(db.test.find()) [{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}] :Parameters: - `spec`: a ``dict`` or :class:`~bson.son.SON` instance specifying elements which must be present for a document to be updated - `document`: a ``dict`` or :class:`~bson.son.SON` instance specifying the document to be used for the update or (in the case of an upsert) insert - see docs on MongoDB `update modifiers`_ - `upsert` (optional): perform an upsert if ``True`` - `manipulate` (optional): manipulate the document before updating? If ``True`` all instances of :mod:`~pymongo.son_manipulator.SONManipulator` added to this :class:`~pymongo.database.Database` will be applied to the document before performing the update. - `check_keys` (optional): check if keys in `document` start with '$' or contain '.', raising :class:`~pymongo.errors.InvalidName`. Only applies to document replacement, not modification through $ operators. - `safe` (optional): **DEPRECATED** - Use `w` instead. - `multi` (optional): update all documents that match `spec`, rather than just the first matching document. The default value for `multi` is currently ``False``, but this might eventually change to ``True``. It is recommended that you specify this argument explicitly for all update operations in order to prepare your code for that change. - `w` (optional): (integer or string) If this is a replica set, write operations will block until they have been replicated to the specified number or tagged set of servers. `w=<int>` always includes the replica set primary (e.g. w=3 means write to the primary and wait until replicated to **two** secondaries). **Passing w=0 disables write acknowledgement and all other write concern options.** - `wtimeout` (optional): (integer) Used in conjunction with `w`. Specify a value in milliseconds to control how long to wait for write propagation to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - `j` (optional): If ``True`` block until write operations have been committed to the journal. Ignored if the server is running without journaling. - `fsync` (optional): If ``True`` force the database to fsync all files before returning. When used with `j` the server awaits the next group commit before returning. :Returns: - A document (dict) describing the effect of the update or ``None`` if write acknowledgement is disabled. .. versionadded:: 1.8 Support for passing `getLastError` options as keyword arguments. .. versionchanged:: 1.4 Return the response to *lastError* if `safe` is ``True``. .. versionadded:: 1.1.1 The `multi` parameter. .. _update modifiers: http://www.mongodb.org/display/DOCS/Updating .. mongodoc:: update """ if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(document, dict): raise TypeError("document must be an instance of dict") if not isinstance(upsert, bool): raise TypeError("upsert must be an instance of bool") client = self.database.connection # Need to connect to know the wire version, and may want to connect # before applying SON manipulators. client._ensure_connected(True) if manipulate: document = self.__database._fix_incoming(document, self) safe, options = self._get_write_mode(safe, **kwargs) if document: # If a top level key begins with '$' this is a modify operation # and we should skip key validation. It doesn't matter which key # we check here. Passing a document with a mix of top level keys # starting with and without a '$' is invalid and the server will # raise an appropriate exception. first = (document.iterkeys()).next() if first.startswith('$'): check_keys = False if client.max_wire_version > 1 and safe: # Update command command = SON([('update', self.name)]) if options: command['writeConcern'] = options docs = [SON([('q', spec), ('u', document), ('multi', multi), ('upsert', upsert)])] results = message._do_batched_write_command( self.database.name + '.$cmd', _UPDATE, command, docs, check_keys, self.uuid_subtype, client) _check_write_command_response(results) _, result = results[0] # Add the updatedExisting field for compatibility if result.get('n') and 'upserted' not in result: result['updatedExisting'] = True else: result['updatedExisting'] = False # MongoDB >= 2.6.0 returns the upsert _id in an array # element. Break it out for backward compatibility. if isinstance(result.get('upserted'), list): result['upserted'] = result['upserted'][0]['_id'] return result else: # Legacy OP_UPDATE return client._send_message( message.update(self.__full_name, upsert, multi, spec, document, safe, options, check_keys, self.uuid_subtype), safe) def drop(self): """Alias for :meth:`~pymongo.database.Database.drop_collection`. The following two calls are equivalent: >>> db.foo.drop() >>> db.drop_collection("foo") .. versionadded:: 1.8 """ self.__database.drop_collection(self.__name) def remove(self, spec_or_id=None, safe=None, multi=True, **kwargs): """Remove a document(s) from this collection. .. warning:: Calls to :meth:`remove` should be performed with care, as removed data cannot be restored. If `spec_or_id` is ``None``, all documents in this collection will be removed. This is not equivalent to calling :meth:`~pymongo.database.Database.drop_collection`, however, as indexes will not be removed. Write concern options can be passed as keyword arguments, overriding any global defaults. Valid options include w=<int/string>, wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below for a detailed explanation of these options. By default an acknowledgment is requested from the server that the remove was successful, raising :class:`~pymongo.errors.OperationFailure` if an error occurred. **Passing w=0 disables write acknowledgement and all other write concern options.** :Parameters: - `spec_or_id` (optional): a dictionary specifying the documents to be removed OR any other type specifying the value of ``"_id"`` for the document to be removed - `safe` (optional): **DEPRECATED** - Use `w` instead. - `multi` (optional): If ``True`` (the default) remove all documents matching `spec_or_id`, otherwise remove only the first matching document. - `w` (optional): (integer or string) If this is a replica set, write operations will block until they have been replicated to the specified number or tagged set of servers. `w=<int>` always includes the replica set primary (e.g. w=3 means write to the primary and wait until replicated to **two** secondaries). **Passing w=0 disables write acknowledgement and all other write concern options.** - `wtimeout` (optional): (integer) Used in conjunction with `w`. Specify a value in milliseconds to control how long to wait for write propagation to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - `j` (optional): If ``True`` block until write operations have been committed to the journal. Ignored if the server is running without journaling. - `fsync` (optional): If ``True`` force the database to fsync all files before returning. When used with `j` the server awaits the next group commit before returning. :Returns: - A document (dict) describing the effect of the remove or ``None`` if write acknowledgement is disabled. .. versionadded:: 1.8 Support for passing `getLastError` options as keyword arguments. .. versionchanged:: 1.7 Accept any type other than a ``dict`` instance for removal by ``"_id"``, not just :class:`~bson.objectid.ObjectId` instances. .. versionchanged:: 1.4 Return the response to *lastError* if `safe` is ``True``. .. versionchanged:: 1.2 The `spec_or_id` parameter is now optional. If it is not specified *all* documents in the collection will be removed. .. versionadded:: 1.1 The `safe` parameter. .. mongodoc:: remove """ if spec_or_id is None: spec_or_id = {} if not isinstance(spec_or_id, dict): spec_or_id = {"_id": spec_or_id} safe, options = self._get_write_mode(safe, **kwargs) client = self.database.connection # Need to connect to know the wire version. client._ensure_connected(True) if client.max_wire_version > 1 and safe: # Delete command command = SON([('delete', self.name)]) if options: command['writeConcern'] = options docs = [SON([('q', spec_or_id), ('limit', int(not multi))])] results = message._do_batched_write_command( self.database.name + '.$cmd', _DELETE, command, docs, False, self.uuid_subtype, client) _check_write_command_response(results) _, result = results[0] return result else: # Legacy OP_DELETE return client._send_message( message.delete(self.__full_name, spec_or_id, safe, options, self.uuid_subtype, int(not multi)), safe) def find_one(self, spec_or_id=None, *args, **kwargs): """Get a single document from the database. All arguments to :meth:`find` are also valid arguments for :meth:`find_one`, although any `limit` argument will be ignored. Returns a single document, or ``None`` if no matching document is found. :Parameters: - `spec_or_id` (optional): a dictionary specifying the query to be performed OR any other type to be used as the value for a query for ``"_id"``. - `*args` (optional): any additional positional arguments are the same as the arguments to :meth:`find`. - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`find`. - `max_time_ms` (optional): a value for max_time_ms may be specified as part of `**kwargs`, e.g. >>> find_one(max_time_ms=100) .. versionchanged:: 1.7 Allow passing any of the arguments that are valid for :meth:`find`. .. versionchanged:: 1.7 Accept any type other than a ``dict`` instance as an ``"_id"`` query, not just :class:`~bson.objectid.ObjectId` instances. """ if spec_or_id is not None and not isinstance(spec_or_id, dict): spec_or_id = {"_id": spec_or_id} max_time_ms = kwargs.pop("max_time_ms", None) cursor = self.find(spec_or_id, *args, **kwargs).max_time_ms(max_time_ms) for result in cursor.limit(-1): return result return None def find(self, *args, **kwargs): """Query the database. The `spec` argument is a prototype document that all results must match. For example: >>> db.test.find({"hello": "world"}) only matches documents that have a key "hello" with value "world". Matches can have other keys *in addition* to "hello". The `fields` argument is used to specify a subset of fields that should be included in the result documents. By limiting results to a certain subset of fields you can cut down on network traffic and decoding time. Raises :class:`TypeError` if any of the arguments are of improper type. Returns an instance of :class:`~pymongo.cursor.Cursor` corresponding to this query. :Parameters: - `spec` (optional): a SON object specifying elements which must be present for a document to be included in the result set - `fields` (optional): a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude. If `fields` is a list "_id" will always be returned. Use a dict to exclude fields from the result (e.g. fields={'_id': False}). - `skip` (optional): the number of documents to omit (from the start of the result set) when returning the results - `limit` (optional): the maximum number of results to return - `timeout` (optional): if True (the default), any returned cursor is closed by the server after 10 minutes of inactivity. If set to False, the returned cursor will never time out on the server. Care should be taken to ensure that cursors with timeout turned off are properly closed. - `snapshot` (optional): if True, snapshot mode will be used for this query. Snapshot mode assures no duplicates are returned, or objects missed, which were present at both the start and end of the query's execution. For details, see the `snapshot documentation <http://dochub.mongodb.org/core/snapshot>`_. - `tailable` (optional): the result of this find call will be a tailable cursor - tailable cursors aren't closed when the last data is retrieved but are kept open and the cursors location marks the final document's position. if more data is received iteration of the cursor will continue from the last document received. For details, see the `tailable cursor documentation <http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_. - `sort` (optional): a list of (key, direction) pairs specifying the sort order for this query. See :meth:`~pymongo.cursor.Cursor.sort` for details. - `max_scan` (optional): limit the number of documents examined when performing the query - `as_class` (optional): class to use for documents in the query result (default is :attr:`~pymongo.mongo_client.MongoClient.document_class`) - `slave_okay` (optional): if True, allows this query to be run against a replica secondary. - `await_data` (optional): if True, the server will block for some extra time before returning, waiting for more data to return. Ignored if `tailable` is False. - `partial` (optional): if True, mongos will return partial results if some shards are down instead of returning an error. - `manipulate`: (optional): If True (the default), apply any outgoing SON manipulators before returning. - `network_timeout` (optional): specify a timeout to use for this query, which will override the :class:`~pymongo.mongo_client.MongoClient`-level default - `read_preference` (optional): The read preference for this query. - `tag_sets` (optional): The tag sets for this query. - `secondary_acceptable_latency_ms` (optional): Any replica-set member whose ping time is within secondary_acceptable_latency_ms of the nearest member may accept reads. Default 15 milliseconds. **Ignored by mongos** and must be configured on the command line. See the localThreshold_ option for more information. - `compile_re` (optional): if ``False``, don't attempt to compile BSON regex objects into Python regexes. Return instances of :class:`~bson.regex.Regex` instead. - `exhaust` (optional): If ``True`` create an "exhaust" cursor. MongoDB will stream batched results to the client without waiting for the client to request each batch, reducing latency. .. note:: There are a number of caveats to using the `exhaust` parameter: 1. The `exhaust` and `limit` options are incompatible and can not be used together. 2. The `exhaust` option is not supported by mongos and can not be used with a sharded cluster. 3. A :class:`~pymongo.cursor.Cursor` instance created with the `exhaust` option requires an exclusive :class:`~socket.socket` connection to MongoDB. If the :class:`~pymongo.cursor.Cursor` is discarded without being completely iterated the underlying :class:`~socket.socket` connection will be closed and discarded without being returned to the connection pool. 4. A :class:`~pymongo.cursor.Cursor` instance created with the `exhaust` option in a :doc:`request </examples/requests>` **must** be completely iterated before executing any other operation. 5. The `network_timeout` option is ignored when using the `exhaust` option. .. note:: The `manipulate` and `compile_re` parameters may default to False in future releases. .. note:: The `max_scan` parameter requires server version **>= 1.5.1** .. versionadded:: 2.7 The ``compile_re`` parameter. .. versionadded:: 2.3 The `tag_sets` and `secondary_acceptable_latency_ms` parameters. .. versionadded:: 1.11+ The `await_data`, `partial`, and `manipulate` parameters. .. versionadded:: 1.8 The `network_timeout` parameter. .. versionadded:: 1.7 The `sort`, `max_scan` and `as_class` parameters. .. versionchanged:: 1.7 The `fields` parameter can now be a dict or any iterable in addition to a list. .. versionadded:: 1.1 The `tailable` parameter. .. mongodoc:: find .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold """ if not 'slave_okay' in kwargs: kwargs['slave_okay'] = self.slave_okay if not 'read_preference' in kwargs: kwargs['read_preference'] = self.read_preference if not 'tag_sets' in kwargs: kwargs['tag_sets'] = self.tag_sets if not 'secondary_acceptable_latency_ms' in kwargs: kwargs['secondary_acceptable_latency_ms'] = ( self.secondary_acceptable_latency_ms) return Cursor(self, *args, **kwargs) def parallel_scan(self, num_cursors, **kwargs): """Scan this entire collection in parallel. Returns a list of up to ``num_cursors`` cursors that can be iterated concurrently. As long as the collection is not modified during scanning, each document appears once in one of the cursors' result sets. For example, to process each document in a collection using some thread-safe ``process_document()`` function:: def process_cursor(cursor): for document in cursor: # Some thread-safe processing function: process_document(document) # Get up to 4 cursors. cursors = collection.parallel_scan(4) threads = [ threading.Thread(target=process_cursor, args=(cursor,)) for cursor in cursors] for thread in threads: thread.start() for thread in threads: thread.join() # All documents have now been processed. With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, if the `read_preference` attribute of this instance is not set to :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or the (deprecated) `slave_okay` attribute of this instance is set to `True` the command will be sent to a secondary or slave. :Parameters: - `num_cursors`: the number of cursors to return .. note:: Requires server version **>= 2.5.5**. """ use_master = not self.slave_okay and not self.read_preference compile_re = kwargs.get('compile_re', False) command_kwargs = { 'numCursors': num_cursors, 'read_preference': self.read_preference, 'tag_sets': self.tag_sets, 'secondary_acceptable_latency_ms': ( self.secondary_acceptable_latency_ms), 'slave_okay': self.slave_okay, '_use_master': use_master} command_kwargs.update(kwargs) result, conn_id = self.__database._command( "parallelCollectionScan", self.__name, **command_kwargs) return [CommandCursor(self, cursor['cursor'], conn_id, compile_re) for cursor in result['cursors']] def count(self): """Get the number of documents in this collection. To get the number of documents matching a specific query use :meth:`pymongo.cursor.Cursor.count`. """ return self.find().count() def create_index(self, key_or_list, cache_for=300, **kwargs): """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) should be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). To create a simple ascending index on the key ``'mike'`` we just use a string argument:: >>> my_collection.create_index("mike") For a compound index on ``'mike'`` descending and ``'eliot'`` ascending we need to use a list of tuples:: >>> my_collection.create_index([("mike", pymongo.DESCENDING), ... ("eliot", pymongo.ASCENDING)]) All optional index creation parameters should be passed as keyword arguments to this method. For example:: >>> my_collection.create_index([("mike", pymongo.DESCENDING)], ... background=True) Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated - `unique`: if ``True`` creates a unique constraint on the index - `background`: if ``True`` this index should be created in the background - `sparse`: if ``True``, omit from the index any documents that lack the indexed field - `bucketSize` or `bucket_size`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `dropDups` or `drop_dups` (**deprecated**): if ``True`` duplicate values are dropped during index creation when creating a unique index See the MongoDB documentation for a full list of supported options by server version. .. warning:: `dropDups` / `drop_dups` is no longer supported by MongoDB starting with server version 2.7.5. The option is silently ignored by the server and unique index builds using the option will fail if a duplicate value is detected. .. note:: `expireAfterSeconds` requires server version **>= 2.1.2** :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the index to create - `cache_for` (optional): time window (in seconds) during which this index will be recognized by subsequent calls to :meth:`ensure_index` - see documentation for :meth:`ensure_index` for details - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments - `ttl` (deprecated): Use `cache_for` instead. .. versionchanged:: 2.3 The `ttl` parameter has been deprecated to avoid confusion with TTL collections. Use `cache_for` instead. .. versionchanged:: 2.2 Removed deprecated argument: deprecated_unique .. versionchanged:: 1.5.1 Accept kwargs to support all index creation options. .. versionadded:: 1.5 The `name` parameter. .. seealso:: :meth:`ensure_index` .. mongodoc:: indexes """ if 'ttl' in kwargs: cache_for = kwargs.pop('ttl') warnings.warn("ttl is deprecated. Please use cache_for instead.", DeprecationWarning, stacklevel=2) # The types supported by datetime.timedelta. 2to3 removes long. if not isinstance(cache_for, (int, long, float)): raise TypeError("cache_for must be an integer or float.") keys = helpers._index_list(key_or_list) index_doc = helpers._index_document(keys) name = "name" in kwargs and kwargs["name"] or _gen_index_name(keys) index = {"key": index_doc, "name": name} if "drop_dups" in kwargs: kwargs["dropDups"] = kwargs.pop("drop_dups") if "bucket_size" in kwargs: kwargs["bucketSize"] = kwargs.pop("bucket_size") index.update(kwargs) try: self.__database.command('createIndexes', self.name, read_preference=ReadPreference.PRIMARY, indexes=[index]) except OperationFailure, exc: if exc.code in common.COMMAND_NOT_FOUND_CODES: index["ns"] = self.__full_name self.__database.system.indexes.insert(index, manipulate=False, check_keys=False, **self._get_wc_override()) else: raise self.__database.connection._cache_index(self.__database.name, self.__name, name, cache_for) return name def ensure_index(self, key_or_list, cache_for=300, **kwargs): """Ensures that an index exists on this collection. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) should be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`pymongo.TEXT`). See :meth:`create_index` for detailed examples. Unlike :meth:`create_index`, which attempts to create an index unconditionally, :meth:`ensure_index` takes advantage of some caching within the driver such that it only attempts to create indexes that might not already exist. When an index is created (or ensured) by PyMongo it is "remembered" for `cache_for` seconds. Repeated calls to :meth:`ensure_index` within that time limit will be lightweight - they will not attempt to actually create the index. Care must be taken when the database is being accessed through multiple clients at once. If an index is created using this client and deleted using another, any call to :meth:`ensure_index` within the cache window will fail to re-create the missing index. Returns the specified or generated index name used if :meth:`ensure_index` attempts to create the index. Returns ``None`` if the index is already cached. All optional index creation parameters should be passed as keyword arguments to this method. Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated - `unique`: if ``True`` creates a unique constraint on the index - `background`: if ``True`` this index should be created in the background - `sparse`: if ``True``, omit from the index any documents that lack the indexed field - `bucketSize` or `bucket_size`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `dropDups` or `drop_dups` (**deprecated**): if ``True`` duplicate values are dropped during index creation when creating a unique index See the MongoDB documentation for a full list of supported options by server version. .. warning:: `dropDups` / `drop_dups` is no longer supported by MongoDB starting with server version 2.7.5. The option is silently ignored by the server and unique index builds using the option will fail if a duplicate value is detected. .. note:: `expireAfterSeconds` requires server version **>= 2.1.2** :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the index to create - `cache_for` (optional): time window (in seconds) during which this index will be recognized by subsequent calls to :meth:`ensure_index` - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments - `ttl` (deprecated): Use `cache_for` instead. .. versionchanged:: 2.3 The `ttl` parameter has been deprecated to avoid confusion with TTL collections. Use `cache_for` instead. .. versionchanged:: 2.2 Removed deprecated argument: deprecated_unique .. versionchanged:: 1.5.1 Accept kwargs to support all index creation options. .. versionadded:: 1.5 The `name` parameter. .. seealso:: :meth:`create_index` """ if "name" in kwargs: name = kwargs["name"] else: keys = helpers._index_list(key_or_list) name = kwargs["name"] = _gen_index_name(keys) if not self.__database.connection._cached(self.__database.name, self.__name, name): return self.create_index(key_or_list, cache_for, **kwargs) return None def drop_indexes(self): """Drops all indexes on this collection. Can be used on non-existant collections or collections with no indexes. Raises OperationFailure on an error. """ self.__database.connection._purge_index(self.__database.name, self.__name) self.drop_index(u"*") def drop_index(self, index_or_name): """Drops the specified index on this collection. Can be used on non-existant collections or collections with no indexes. Raises OperationFailure on an error (e.g. trying to drop an index that does not exist). `index_or_name` can be either an index name (as returned by `create_index`), or an index specifier (as passed to `create_index`). An index specifier should be a list of (key, direction) pairs. Raises TypeError if index is not an instance of (str, unicode, list). .. warning:: if a custom name was used on index creation (by passing the `name` parameter to :meth:`create_index` or :meth:`ensure_index`) the index **must** be dropped by name. :Parameters: - `index_or_name`: index (or name of index) to drop """ name = index_or_name if isinstance(index_or_name, list): name = _gen_index_name(index_or_name) if not isinstance(name, basestring): raise TypeError("index_or_name must be an index name or list") self.__database.connection._purge_index(self.__database.name, self.__name, name) self.__database.command("dropIndexes", self.__name, read_preference=ReadPreference.PRIMARY, index=name, allowable_errors=["ns not found"]) def reindex(self): """Rebuilds all indexes on this collection. .. warning:: reindex blocks all other operations (indexes are built in the foreground) and will be slow for large collections. .. versionadded:: 1.11+ """ return self.__database.command("reIndex", self.__name, read_preference=ReadPreference.PRIMARY) def index_information(self): """Get information on this collection's indexes. Returns a dictionary where the keys are index names (as returned by create_index()) and the values are dictionaries containing information about each index. The dictionary is guaranteed to contain at least a single key, ``"key"`` which is a list of (key, direction) pairs specifying the index (as passed to create_index()). It will also contain any other metadata about the indexes, except for the ``"ns"`` and ``"name"`` keys, which are cleaned. Example output might look like this: >>> db.test.ensure_index("x", unique=True) u'x_1' >>> db.test.index_information() {u'_id_': {u'key': [(u'_id', 1)]}, u'x_1': {u'unique': True, u'key': [(u'x', 1)]}} .. versionchanged:: 1.7 The values in the resultant dictionary are now dictionaries themselves, whose ``"key"`` item contains the list that was the value in previous versions of PyMongo. """ client = self.database.connection client._ensure_connected(True) slave_okay = not client._rs_client and not client.is_mongos if client.max_wire_version > 2: res, addr = self.__database._command( "listIndexes", self.__name, as_class=SON, cursor={}, slave_okay=slave_okay, read_preference=ReadPreference.PRIMARY) # MongoDB 2.8rc2 if "indexes" in res: raw = res["indexes"] # >= MongoDB 2.8rc3 else: raw = CommandCursor(self, res["cursor"], addr) else: raw = self.__database.system.indexes.find({"ns": self.__full_name}, {"ns": 0}, as_class=SON, slave_okay=slave_okay, _must_use_master=True) info = {} for index in raw: index["key"] = index["key"].items() index = dict(index) info[index.pop("name")] = index return info def options(self): """Get the options set on this collection. Returns a dictionary of options and their values - see :meth:`~pymongo.database.Database.create_collection` for more information on the possible options. Returns an empty dictionary if the collection has not been created yet. """ client = self.database.connection client._ensure_connected(True) result = None slave_okay = not client._rs_client and not client.is_mongos if client.max_wire_version > 2: res, addr = self.__database._command( "listCollections", cursor={}, filter={"name": self.__name}, read_preference=ReadPreference.PRIMARY, slave_okay=slave_okay) # MongoDB 2.8rc2 if "collections" in res: results = res["collections"] # >= MongoDB 2.8rc3 else: results = CommandCursor(self, res["cursor"], addr) for doc in results: result = doc break else: result = self.__database.system.namespaces.find_one( {"name": self.__full_name}, slave_okay=slave_okay, _must_use_master=True) if not result: return {} options = result.get("options", {}) if "create" in options: del options["create"] return options def aggregate(self, pipeline, **kwargs): """Perform an aggregation using the aggregation framework on this collection. With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, if the `read_preference` attribute of this instance is not set to :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or the (deprecated) `slave_okay` attribute of this instance is set to `True` the `aggregate command`_ will be sent to a secondary or slave. :Parameters: - `pipeline`: a single command or list of aggregation commands - `**kwargs`: send arbitrary parameters to the aggregate command .. note:: Requires server version **>= 2.1.0**. With server version **>= 2.5.1**, pass ``cursor={}`` to retrieve unlimited aggregation results with a :class:`~pymongo.command_cursor.CommandCursor`:: pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}] cursor = collection.aggregate(pipeline, cursor={}) for doc in cursor: print doc .. versionchanged:: 2.7 When the cursor option is used, return :class:`~pymongo.command_cursor.CommandCursor` instead of :class:`~pymongo.cursor.Cursor`. .. versionchanged:: 2.6 Added cursor support. .. versionadded:: 2.3 .. _aggregate command: http://docs.mongodb.org/manual/applications/aggregation """ if not isinstance(pipeline, (dict, list, tuple)): raise TypeError("pipeline must be a dict, list or tuple") if isinstance(pipeline, dict): pipeline = [pipeline] use_master = not self.slave_okay and not self.read_preference command_kwargs = { 'pipeline': pipeline, 'read_preference': self.read_preference, 'tag_sets': self.tag_sets, 'secondary_acceptable_latency_ms': ( self.secondary_acceptable_latency_ms), 'slave_okay': self.slave_okay, '_use_master': use_master} command_kwargs.update(kwargs) result, conn_id = self.__database._command( "aggregate", self.__name, **command_kwargs) if 'cursor' in result: return CommandCursor( self, result['cursor'], conn_id, command_kwargs.get('compile_re', True)) else: return result # TODO key and condition ought to be optional, but deprecation # could be painful as argument order would have to change. def group(self, key, condition, initial, reduce, finalize=None, **kwargs): """Perform a query similar to an SQL *group by* operation. Returns an array of grouped items. The `key` parameter can be: - ``None`` to use the entire document as a key. - A :class:`list` of keys (each a :class:`basestring` (:class:`str` in python 3)) to group by. - A :class:`basestring` (:class:`str` in python 3), or :class:`~bson.code.Code` instance containing a JavaScript function to be applied to each document, returning the key to group by. With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, if the `read_preference` attribute of this instance is not set to :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or :attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or the (deprecated) `slave_okay` attribute of this instance is set to `True`, the group command will be sent to a secondary or slave. :Parameters: - `key`: fields to group by (see above description) - `condition`: specification of rows to be considered (as a :meth:`find` query specification) - `initial`: initial value of the aggregation counter object - `reduce`: aggregation function as a JavaScript string - `finalize`: function to be called on each object in output list. .. versionchanged:: 2.2 Removed deprecated argument: command .. versionchanged:: 1.4 The `key` argument can now be ``None`` or a JavaScript function, in addition to a :class:`list` of keys. .. versionchanged:: 1.3 The `command` argument now defaults to ``True`` and is deprecated. """ group = {} if isinstance(key, basestring): group["$keyf"] = Code(key) elif key is not None: group = {"key": helpers._fields_list_to_dict(key)} group["ns"] = self.__name group["$reduce"] = Code(reduce) group["cond"] = condition group["initial"] = initial if finalize is not None: group["finalize"] = Code(finalize) use_master = not self.slave_okay and not self.read_preference return self.__database.command("group", group, uuid_subtype=self.uuid_subtype, read_preference=self.read_preference, tag_sets=self.tag_sets, secondary_acceptable_latency_ms=( self.secondary_acceptable_latency_ms), slave_okay=self.slave_okay, _use_master=use_master, **kwargs)["retval"] def rename(self, new_name, **kwargs): """Rename this collection. If operating in auth mode, client must be authorized as an admin to perform this operation. Raises :class:`TypeError` if `new_name` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName` if `new_name` is not a valid collection name. :Parameters: - `new_name`: new name for this collection - `**kwargs` (optional): any additional rename options should be passed as keyword arguments (i.e. ``dropTarget=True``) .. versionadded:: 1.7 support for accepting keyword arguments for rename options """ if not isinstance(new_name, basestring): raise TypeError("new_name must be an instance " "of %s" % (basestring.__name__,)) if not new_name or ".." in new_name: raise InvalidName("collection names cannot be empty") if new_name[0] == "." or new_name[-1] == ".": raise InvalidName("collecion names must not start or end with '.'") if "$" in new_name and not new_name.startswith("oplog.$main"): raise InvalidName("collection names must not contain '$'") new_name = "%s.%s" % (self.__database.name, new_name) client = self.__database.connection client.admin.command("renameCollection", self.__full_name, read_preference=ReadPreference.PRIMARY, to=new_name, **kwargs) def distinct(self, key): """Get a list of distinct values for `key` among all documents in this collection. Raises :class:`TypeError` if `key` is not an instance of :class:`basestring` (:class:`str` in python 3). To get the distinct values for a key in the result set of a query use :meth:`~pymongo.cursor.Cursor.distinct`. :Parameters: - `key`: name of key for which we want to get the distinct values .. note:: Requires server version **>= 1.1.0** .. versionadded:: 1.1.1 """ return self.find().distinct(key) def map_reduce(self, map, reduce, out, full_response=False, **kwargs): """Perform a map/reduce operation on this collection. If `full_response` is ``False`` (default) returns a :class:`~pymongo.collection.Collection` instance containing the results of the operation. Otherwise, returns the full response from the server to the `map reduce command`_. :Parameters: - `map`: map function (as a JavaScript string) - `reduce`: reduce function (as a JavaScript string) - `out`: output collection name or `out object` (dict). See the `map reduce command`_ documentation for available options. Note: `out` options are order sensitive. :class:`~bson.son.SON` can be used to specify multiple options. e.g. SON([('replace', <collection name>), ('db', <database name>)]) - `full_response` (optional): if ``True``, return full response to this command - otherwise just return the result collection - `**kwargs` (optional): additional arguments to the `map reduce command`_ may be passed as keyword arguments to this helper method, e.g.:: >>> db.test.map_reduce(map, reduce, "myresults", limit=2) .. note:: Requires server version **>= 1.1.1** .. seealso:: :doc:`/examples/aggregation` .. versionchanged:: 2.2 Removed deprecated arguments: merge_output and reduce_output .. versionchanged:: 1.11+ DEPRECATED The merge_output and reduce_output parameters. .. versionadded:: 1.2 .. _map reduce command: http://www.mongodb.org/display/DOCS/MapReduce .. mongodoc:: mapreduce """ if not isinstance(out, (basestring, dict)): raise TypeError("'out' must be an instance of " "%s or dict" % (basestring.__name__,)) if isinstance(out, dict) and out.get('inline'): must_use_master = False else: must_use_master = True response = self.__database.command("mapreduce", self.__name, uuid_subtype=self.uuid_subtype, map=map, reduce=reduce, read_preference=self.read_preference, tag_sets=self.tag_sets, secondary_acceptable_latency_ms=( self.secondary_acceptable_latency_ms), out=out, _use_master=must_use_master, **kwargs) if full_response or not response.get('result'): return response elif isinstance(response['result'], dict): dbase = response['result']['db'] coll = response['result']['collection'] return self.__database.connection[dbase][coll] else: return self.__database[response["result"]] def inline_map_reduce(self, map, reduce, full_response=False, **kwargs): """Perform an inline map/reduce operation on this collection. Perform the map/reduce operation on the server in RAM. A result collection is not created. The result set is returned as a list of documents. If `full_response` is ``False`` (default) returns the result documents in a list. Otherwise, returns the full response from the server to the `map reduce command`_. With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, if the `read_preference` attribute of this instance is not set to :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or :attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or the (deprecated) `slave_okay` attribute of this instance is set to `True`, the inline map reduce will be run on a secondary or slave. :Parameters: - `map`: map function (as a JavaScript string) - `reduce`: reduce function (as a JavaScript string) - `full_response` (optional): if ``True``, return full response to this command - otherwise just return the result collection - `**kwargs` (optional): additional arguments to the `map reduce command`_ may be passed as keyword arguments to this helper method, e.g.:: >>> db.test.inline_map_reduce(map, reduce, limit=2) .. note:: Requires server version **>= 1.7.4** .. versionadded:: 1.10 """ use_master = not self.slave_okay and not self.read_preference res = self.__database.command("mapreduce", self.__name, uuid_subtype=self.uuid_subtype, read_preference=self.read_preference, tag_sets=self.tag_sets, secondary_acceptable_latency_ms=( self.secondary_acceptable_latency_ms), slave_okay=self.slave_okay, _use_master=use_master, map=map, reduce=reduce, out={"inline": 1}, **kwargs) if full_response: return res else: return res.get("results") def find_and_modify(self, query={}, update=None, upsert=False, sort=None, full_response=False, manipulate=False, **kwargs): """Update and return an object. This is a thin wrapper around the findAndModify_ command. The positional arguments are designed to match the first three arguments to :meth:`update` however most options should be passed as named parameters. Either `update` or `remove` arguments are required, all others are optional. Returns either the object before or after modification based on `new` parameter. If no objects match the `query` and `upsert` is false, returns ``None``. If upserting and `new` is false, returns ``{}``. If the full_response parameter is ``True``, the return value will be the entire response object from the server, including the 'ok' and 'lastErrorObject' fields, rather than just the modified object. This is useful mainly because the 'lastErrorObject' document holds information about the command's execution. :Parameters: - `query`: filter for the update (default ``{}``) - `update`: see second argument to :meth:`update` (no default) - `upsert`: insert if object doesn't exist (default ``False``) - `sort`: a list of (key, direction) pairs specifying the sort order for this query. See :meth:`~pymongo.cursor.Cursor.sort` for details. - `full_response`: return the entire response object from the server (default ``False``) - `remove`: remove rather than updating (default ``False``) - `new`: return updated rather than original object (default ``False``) - `fields`: see second argument to :meth:`find` (default all) - `manipulate`: (optional): If ``True``, apply any outgoing SON manipulators before returning. Ignored when `full_response` is set to True. Defaults to ``False``. - `**kwargs`: any other options the findAndModify_ command supports can be passed here. .. mongodoc:: findAndModify .. _findAndModify: http://dochub.mongodb.org/core/findAndModify .. note:: Requires server version **>= 1.3.0** .. versionchanged:: 2.8 Added the optional manipulate parameter .. versionchanged:: 2.5 Added the optional full_response parameter .. versionchanged:: 2.4 Deprecated the use of mapping types for the sort parameter .. versionadded:: 1.10 """ if (not update and not kwargs.get('remove', None)): raise ValueError("Must either update or remove") if (update and kwargs.get('remove', None)): raise ValueError("Can't do both update and remove") # No need to include empty args if query: kwargs['query'] = query if update: kwargs['update'] = update if upsert: kwargs['upsert'] = upsert if sort: # Accept a list of tuples to match Cursor's sort parameter. if isinstance(sort, list): kwargs['sort'] = helpers._index_document(sort) # Accept OrderedDict, SON, and dict with len == 1 so we # don't break existing code already using find_and_modify. elif (isinstance(sort, ordered_types) or isinstance(sort, dict) and len(sort) == 1): warnings.warn("Passing mapping types for `sort` is deprecated," " use a list of (key, direction) pairs instead", DeprecationWarning, stacklevel=2) kwargs['sort'] = sort else: raise TypeError("sort must be a list of (key, direction) " "pairs, a dict of len 1, or an instance of " "SON or OrderedDict") no_obj_error = "No matching object found" out = self.__database.command("findAndModify", self.__name, allowable_errors=[no_obj_error], read_preference=ReadPreference.PRIMARY, uuid_subtype=self.uuid_subtype, **kwargs) if not out['ok']: if out["errmsg"] == no_obj_error: return None else: # Should never get here b/c of allowable_errors raise ValueError("Unexpected Error: %s" % (out,)) if full_response: return out else: document = out.get('value') if manipulate: document = self.__database._fix_outgoing(document, self) return document def __iter__(self): return self def next(self): raise TypeError("'Collection' object is not iterable") def __call__(self, *args, **kwargs): """This is only here so that some API misusages are easier to debug. """ if "." not in self.__name: raise TypeError("'Collection' object is not callable. If you " "meant to call the '%s' method on a 'Database' " "object it is failing because no such method " "exists." % self.__name) raise TypeError("'Collection' object is not callable. If you meant to " "call the '%s' method on a 'Collection' object it is " "failing because no such method exists." % self.__name.split(".")[-1])
mitodl/open-discussions
refs/heads/master
widgets/serializers/url.py
1
"""URL widget""" from widgets.serializers.widget_instance import ( WidgetConfigSerializer, WidgetInstanceSerializer, ) from widgets.serializers.react_fields import ReactURLField, ReactCharField class URLWidgetConfigSerializer(WidgetConfigSerializer): """Serializer for URLWidget config""" url = ReactURLField( help_text="Enter URL", label="URL", under_text="Paste url from YouTube, New York Times, Instragram and more than 400 content providers. Or any other web url", show_embed=True, required=False, allow_null=True, ) custom_html = ReactCharField( help_text="For more specific embeds, enter the embed code here", under_text="For security reasons, we only allow embed code from Twitter. If you have something else in mind, contact us.", default=None, required=False, allow_null=True, ) class URLWidgetSerializer(WidgetInstanceSerializer): """A basic url widget""" configuration_serializer_class = URLWidgetConfigSerializer name = "URL" description = "Embedded URL"
frituurpan/Solar-Turbulence
refs/heads/master
swost/transmissionmodel.py
1
import copy __author__ = 'Administrator' class TransmissionModel: raw_data = '' timestamp = '' def __init__(self, data): self.raw_data = data def get_total_kwh(self): return self.get_day_kwh() + self.get_night_kwh() def get_day_kwh(self): key = '1-0:1.8.1' val = self.get_row_by_key(key) val = str.replace(val, '*kWh)', '') val = str.replace(val, key + '(', '') return float(val) def get_night_kwh(self): key = '1-0:1.8.2' val = self.get_row_by_key(key) val = str.replace(val, '*kWh)', '') val = str.replace(val, key + '(', '') return float(val) def get_current_watts(self): """ The current watts row has no key :return: """ key = '1-0:1.7.0' val = self.get_row_by_key(key) val = str.replace(str.replace(val, key + '(', ''), '*kW)', '') return float(val) def get_gas_m3(self): val = self.get_next_row_by_key_of_previous_row('0-1:24.3.0') val = str.replace(str.replace(val, '(', ''), ')', '') return float(val) def get_row_by_key(self, key): for line in self.raw_data: if key in line: return line def get_next_row_by_key_of_previous_row(self, key): for (index, line) in enumerate(self.raw_data): if key in line: return self.raw_data[1 + index] def set_timestamp(self, timestamp): self.timestamp = copy.deepcopy(timestamp) def get_timestamp(self): return self.timestamp @staticmethod def get_value_from_row(row): return row[row.index('(') + 1:row.index(')')] @staticmethod def convert_value(val): return int(float(val) * 1000)
zahanm/foodpedia
refs/heads/master
django/core/handlers/modpython.py
189
import os from pprint import pformat import sys from warnings import warn from django import http from django.core import signals from django.core.handlers.base import BaseHandler from django.core.urlresolvers import set_script_prefix from django.utils import datastructures from django.utils.encoding import force_unicode, smart_str, iri_to_uri from django.utils.log import getLogger logger = getLogger('django.request') # NOTE: do *not* import settings (or any module which eventually imports # settings) until after ModPythonHandler has been called; otherwise os.environ # won't be set up correctly (with respect to settings). class ModPythonRequest(http.HttpRequest): def __init__(self, req): self._req = req # FIXME: This isn't ideal. The request URI may be encoded (it's # non-normalized) slightly differently to the "real" SCRIPT_NAME # and PATH_INFO values. This causes problems when we compute path_info, # below. For now, don't use script names that will be subject to # encoding/decoding. self.path = force_unicode(req.uri) root = req.get_options().get('django.root', '') self.django_root = root # req.path_info isn't necessarily computed correctly in all # circumstances (it's out of mod_python's control a bit), so we use # req.uri and some string manipulations to get the right value. if root and req.uri.startswith(root): self.path_info = force_unicode(req.uri[len(root):]) else: self.path_info = self.path if not self.path_info: # Django prefers empty paths to be '/', rather than '', to give us # a common start character for URL patterns. So this is a little # naughty, but also pretty harmless. self.path_info = u'/' self._post_parse_error = False self._stream = self._req self._read_started = False def __repr__(self): # Since this is called as part of error handling, we need to be very # robust against potentially malformed input. try: get = pformat(self.GET) except: get = '<could not parse>' if self._post_parse_error: post = '<could not parse>' else: try: post = pformat(self.POST) except: post = '<could not parse>' try: cookies = pformat(self.COOKIES) except: cookies = '<could not parse>' try: meta = pformat(self.META) except: meta = '<could not parse>' return smart_str(u'<ModPythonRequest\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % (self.path, unicode(get), unicode(post), unicode(cookies), unicode(meta))) def get_full_path(self): # RFC 3986 requires self._req.args to be in the ASCII range, but this # doesn't always happen, so rather than crash, we defensively encode it. return '%s%s' % (self.path, self._req.args and ('?' + iri_to_uri(self._req.args)) or '') def is_secure(self): try: return self._req.is_https() except AttributeError: # mod_python < 3.2.10 doesn't have req.is_https(). return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1') def _get_request(self): if not hasattr(self, '_request'): self._request = datastructures.MergeDict(self.POST, self.GET) return self._request def _get_get(self): if not hasattr(self, '_get'): self._get = http.QueryDict(self._req.args, encoding=self._encoding) return self._get def _set_get(self, get): self._get = get def _get_post(self): if not hasattr(self, '_post'): self._load_post_and_files() return self._post def _set_post(self, post): self._post = post def _get_cookies(self): if not hasattr(self, '_cookies'): self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', '')) return self._cookies def _set_cookies(self, cookies): self._cookies = cookies def _get_files(self): if not hasattr(self, '_files'): self._load_post_and_files() return self._files def _get_meta(self): "Lazy loader that returns self.META dictionary" if not hasattr(self, '_meta'): self._meta = { 'AUTH_TYPE': self._req.ap_auth_type, 'CONTENT_LENGTH': self._req.headers_in.get('content-length', 0), 'CONTENT_TYPE': self._req.headers_in.get('content-type'), 'GATEWAY_INTERFACE': 'CGI/1.1', 'PATH_INFO': self.path_info, 'PATH_TRANSLATED': None, # Not supported 'QUERY_STRING': self._req.args, 'REMOTE_ADDR': self._req.connection.remote_ip, 'REMOTE_HOST': None, # DNS lookups not supported 'REMOTE_IDENT': self._req.connection.remote_logname, 'REMOTE_USER': self._req.user, 'REQUEST_METHOD': self._req.method, 'SCRIPT_NAME': self.django_root, 'SERVER_NAME': self._req.server.server_hostname, 'SERVER_PORT': self._req.connection.local_addr[1], 'SERVER_PROTOCOL': self._req.protocol, 'SERVER_SOFTWARE': 'mod_python' } for key, value in self._req.headers_in.items(): key = 'HTTP_' + key.upper().replace('-', '_') self._meta[key] = value return self._meta def _get_method(self): return self.META['REQUEST_METHOD'].upper() GET = property(_get_get, _set_get) POST = property(_get_post, _set_post) COOKIES = property(_get_cookies, _set_cookies) FILES = property(_get_files) META = property(_get_meta) REQUEST = property(_get_request) method = property(_get_method) class ModPythonHandler(BaseHandler): request_class = ModPythonRequest def __call__(self, req): warn(('The mod_python handler is deprecated; use a WSGI or FastCGI server instead.'), PendingDeprecationWarning) # mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that os.environ.update(req.subprocess_env) # now that the environ works we can see the correct settings, so imports # that use settings now can work from django.conf import settings # if we need to set up middleware, now that settings works we can do it now. if self._request_middleware is None: self.load_middleware() set_script_prefix(req.get_options().get('django.root', '')) signals.request_started.send(sender=self.__class__) try: try: request = self.request_class(req) except UnicodeDecodeError: logger.warning('Bad Request (UnicodeDecodeError): %s' % request.path, exc_info=sys.exc_info(), extra={ 'status_code': 400, 'request': request } ) response = http.HttpResponseBadRequest() else: response = self.get_response(request) finally: signals.request_finished.send(sender=self.__class__) # Convert our custom HttpResponse object back into the mod_python req. req.content_type = response['Content-Type'] for key, value in response.items(): if key != 'content-type': req.headers_out[str(key)] = str(value) for c in response.cookies.values(): req.headers_out.add('Set-Cookie', c.output(header='')) req.status = response.status_code try: for chunk in response: req.write(chunk) finally: response.close() return 0 # mod_python.apache.OK def handler(req): # mod_python hooks into this function. return ModPythonHandler()(req)
1st/django
refs/heads/master
tests/template_tests/filter_tests/test_timeuntil.py
207
from __future__ import unicode_literals from datetime import datetime, timedelta from django.template.defaultfilters import timeuntil_filter from django.test import SimpleTestCase from django.test.utils import requires_tz_support from ..utils import setup from .timezone_utils import TimezoneTestCase class TimeuntilTests(TimezoneTestCase): # Default compare with datetime.now() @setup({'timeuntil01': '{{ a|timeuntil }}'}) def test_timeuntil01(self): output = self.engine.render_to_string('timeuntil01', {'a': datetime.now() + timedelta(minutes=2, seconds=10)}) self.assertEqual(output, '2\xa0minutes') @setup({'timeuntil02': '{{ a|timeuntil }}'}) def test_timeuntil02(self): output = self.engine.render_to_string('timeuntil02', {'a': (datetime.now() + timedelta(days=1, seconds=10))}) self.assertEqual(output, '1\xa0day') @setup({'timeuntil03': '{{ a|timeuntil }}'}) def test_timeuntil03(self): output = self.engine.render_to_string('timeuntil03', {'a': (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))}) self.assertEqual(output, '8\xa0hours, 10\xa0minutes') # Compare to a given parameter @setup({'timeuntil04': '{{ a|timeuntil:b }}'}) def test_timeuntil04(self): output = self.engine.render_to_string( 'timeuntil04', {'a': self.now - timedelta(days=1), 'b': self.now - timedelta(days=2)}, ) self.assertEqual(output, '1\xa0day') @setup({'timeuntil05': '{{ a|timeuntil:b }}'}) def test_timeuntil05(self): output = self.engine.render_to_string( 'timeuntil05', {'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=2, minutes=1)}, ) self.assertEqual(output, '1\xa0minute') # Regression for #7443 @setup({'timeuntil06': '{{ earlier|timeuntil }}'}) def test_timeuntil06(self): output = self.engine.render_to_string('timeuntil06', {'earlier': self.now - timedelta(days=7)}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil07': '{{ earlier|timeuntil:now }}'}) def test_timeuntil07(self): output = self.engine.render_to_string('timeuntil07', {'now': self.now, 'earlier': self.now - timedelta(days=7)}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil08': '{{ later|timeuntil }}'}) def test_timeuntil08(self): output = self.engine.render_to_string('timeuntil08', {'later': self.now + timedelta(days=7, hours=1)}) self.assertEqual(output, '1\xa0week') @setup({'timeuntil09': '{{ later|timeuntil:now }}'}) def test_timeuntil09(self): output = self.engine.render_to_string('timeuntil09', {'now': self.now, 'later': self.now + timedelta(days=7)}) self.assertEqual(output, '1\xa0week') # Ensures that differing timezones are calculated correctly. @requires_tz_support @setup({'timeuntil10': '{{ a|timeuntil }}'}) def test_timeuntil10(self): output = self.engine.render_to_string('timeuntil10', {'a': self.now_tz}) self.assertEqual(output, '0\xa0minutes') @requires_tz_support @setup({'timeuntil11': '{{ a|timeuntil }}'}) def test_timeuntil11(self): output = self.engine.render_to_string('timeuntil11', {'a': self.now_tz_i}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil12': '{{ a|timeuntil:b }}'}) def test_timeuntil12(self): output = self.engine.render_to_string('timeuntil12', {'a': self.now_tz_i, 'b': self.now_tz}) self.assertEqual(output, '0\xa0minutes') # Regression for #9065 (two date objects). @setup({'timeuntil13': '{{ a|timeuntil:b }}'}) def test_timeuntil13(self): output = self.engine.render_to_string('timeuntil13', {'a': self.today, 'b': self.today}) self.assertEqual(output, '0\xa0minutes') @setup({'timeuntil14': '{{ a|timeuntil:b }}'}) def test_timeuntil14(self): output = self.engine.render_to_string('timeuntil14', {'a': self.today, 'b': self.today - timedelta(hours=24)}) self.assertEqual(output, '1\xa0day') class FunctionTests(SimpleTestCase): def test_until_now(self): self.assertEqual(timeuntil_filter(datetime.now() + timedelta(1, 1)), '1\xa0day') def test_explicit_date(self): self.assertEqual(timeuntil_filter(datetime(2005, 12, 30), datetime(2005, 12, 29)), '1\xa0day')
mrumsky/congress-legislators
refs/heads/master
scripts/bioguide.py
10
#!/usr/bin/env python # gets fundamental information for every member with a bioguide ID: # first name, nickname, middle name, last name, name suffix # birthday # options: # --cache: load from cache if present on disk (default: true) # --current: do *only* current legislators (default: true) # --historical: do *only* historical legislators (default: false) # --bioguide: do *only* a single legislator # --relationships: Get familial relationships to other members of congress past and present, when applicable import lxml.html, io import datetime import re import utils from utils import download, load_data, save_data def run(): def update_birthday(bioguide, person, main): birthday = birthday_for(main) if not birthday: print("[%s] NO BIRTHDAY :(\n\n%s" % (bioguide, main.encode("utf8"))) warnings.append(bioguide) return if birthday == "UNKNOWN": return try: birthday = datetime.datetime.strptime(birthday.replace(",", ""), "%B %d %Y") except ValueError: print("[%s] BAD BIRTHDAY :(\n\n%s" % (bioguide, main.encode("utf8"))) warnings.append(bioguide) return birthday = "%04d-%02d-%02d" % (birthday.year, birthday.month, birthday.day) person.setdefault("bio", {})["birthday"] = birthday def birthday_for(string): # exceptions for not-nicely-placed semicolons string = string.replace("born in Cresskill, Bergen County, N. J.; April", "born April") string = string.replace("FOSTER, A. Lawrence, a Representative from New York; September 17, 1802;", "born September 17, 1802") string = string.replace("CAO, Anh (Joseph), a Representative from Louisiana; born in Ho Chi Minh City, Vietnam; March 13, 1967", "born March 13, 1967") string = string.replace("CRITZ, Mark S., a Representative from Pennsylvania; born in Irwin, Westmoreland County, Pa.; January 5, 1962;", "born January 5, 1962") string = string.replace("SCHIFF, Steven Harvey, a Representative from New Mexico; born in Chicago, Ill.; March 18, 1947", "born March 18, 1947") string = string.replace('KRATOVIL, Frank, M. Jr., a Representative from Maryland; born in Lanham, Prince George\u2019s County, Md.; May 29, 1968', "born May 29, 1968") # look for a date pattern = r"born [^;]*?((?:January|February|March|April|May|June|July|August|September|October|November|December),? \d{1,2},? \d{4})" match = re.search(pattern, string, re.I) if not match or not match.group(1): # specifically detect cases that we can't handle to avoid unnecessary warnings if re.search("birth dates? unknown|date of birth is unknown", string, re.I): return "UNKNOWN" if re.search("born [^;]*?(?:in|about|before )?(?:(?:January|February|March|April|May|June|July|August|September|October|November|December) )?\d{4}", string, re.I): return "UNKNOWN" return None return match.group(1).strip() def relationships_of(string): # relationship data is stored in a parenthetical immediately after the end of the </font> tag in the bio # e.g. "(son of Joseph Patrick Kennedy, II, and great-nephew of Edward Moore Kennedy and John Fitzgerald Kennedy)" pattern = "^\((.*?)\)" match = re.search(pattern, string, re.I) relationships = [] if match and len(match.groups()) > 0: relationship_text = match.group(1).encode("ascii", "replace") # since some relationships refer to multiple people--great-nephew of Edward Moore Kennedy AND John Fitzgerald Kennedy--we need a special grammar from nltk import tree, pos_tag, RegexpParser tokens = re.split("[ ,;]+|-(?![0-9])", relationship_text) pos = pos_tag(tokens) grammar = r""" NAME: {<NNP>+} NAMES: { <IN><NAME>(?:<CC><NAME>)* } RELATIONSHIP: { <JJ|NN|RB|VB|VBD|VBN|IN|PRP\$>+ } MATCH: { <RELATIONSHIP><NAMES> } """ cp = RegexpParser(grammar) chunks = cp.parse(pos) # iterate through the Relationship/Names pairs for n in chunks: if isinstance(n, tree.Tree) and n.node == "MATCH": people = [] relationship = None for piece in n: if piece.node == "RELATIONSHIP": relationship = " ".join([x[0] for x in piece]) elif piece.node == "NAMES": for name in [x for x in piece if isinstance(x, tree.Tree)]: people.append(" ".join([x[0] for x in name])) for person in people: relationships.append({ "relation": relationship, "name": person}) return relationships # default to caching cache = utils.flags().get('cache', True) force = not cache # pick either current or historical # order is important here, since current defaults to true if utils.flags().get('historical', False): filename = "legislators-historical.yaml" elif utils.flags().get('current', True): filename = "legislators-current.yaml" else: print("No legislators selected.") exit(0) print("Loading %s..." % filename) legislators = load_data(filename) # reoriented cache to access by bioguide ID by_bioguide = { } for m in legislators: if "bioguide" in m["id"]: by_bioguide[m["id"]["bioguide"]] = m # optionally focus on one legislator bioguide = utils.flags().get('bioguide', None) if bioguide: bioguides = [bioguide] else: bioguides = list(by_bioguide.keys()) warnings = [] missing = [] count = 0 families = 0 for bioguide in bioguides: # Download & parse the HTML of the bioguide page. try: dom = fetch_bioguide_page(bioguide, force) except Exception as e: print(e) missing.append(bioguide) continue # Extract the member's name and the biography paragraph (main). try: name = dom.cssselect("p font")[0] main = dom.cssselect("p")[0] except IndexError: print("[%s] Missing name or content!" % bioguide) exit(0) name = name.text_content().strip() main = main.text_content().strip().replace("\n", " ").replace("\r", " ") main = re.sub("\s+", " ", main) # Extract the member's birthday. update_birthday(bioguide, by_bioguide[bioguide], main) # Extract relationships with other Members of Congress. if utils.flags().get("relationships", False): #relationship information, if present, is in a parenthetical immediately after the name. #should always be present if we passed the IndexError catch above after_name = dom.cssselect("p font")[0].tail.strip() relationships = relationships_of(after_name) if len(relationships): families = families + 1 by_bioguide[bioguide]["family"] = relationships count = count + 1 print() if warnings: print("Missed %d birthdays: %s" % (len(warnings), str.join(", ", warnings))) if missing: print("Missing a page for %d bioguides: %s" % (len(missing), str.join(", ", missing))) print("Saving data to %s..." % filename) save_data(legislators, filename) print("Saved %d legislators to %s" % (count, filename)) if utils.flags().get("relationships", False): print("Found family members for %d of those legislators" % families) # Some testing code to help isolate and fix issued: # f # none = "PEARSON, Joseph, a Representative from North Carolina; born in Rowan County, N.C., in 1776; completed preparatory studies; studied law; was admitted to the bar and commenced practice in Salisbury, N.C.; member of the State house of commons; elected as a Federalist to the Eleventh, Twelfth, and Thirteenth Congresses (March 4, 1809-March 3, 1815); while in Congress fought a duel with John George Jackson, of Virginia, and on the second fire wounded his opponent in the hip; died in Salisbury, N.C., October 27, 1834." # print "Pearson (none): %s" % birthday_for(none) # owens = "OWENS, William, a Representative from New York; born in Brooklyn, Kings County, N.Y., January, 20, 1949; B.S., Manhattan College, Riverdale, N.Y., 1971; J.D., Fordham University, New York, N.Y., 1974; United States Air Force; lawyer, private practice; faculty, State University of New York, Plattsburgh, N.Y., 1978-1986; elected as a Democrat to the One Hundred Eleventh Congress, by special election to fill the vacancy caused by the resignation of United States Representative John McHugh, and reelected to the two succeeding Congresses (November 3, 2009-present)." # print "Owens (January, 20, 1949): %s" % birthday_for(owens) # shea = "SHEA-PORTER, Carol, a Representative from New Hampshire; born in New York City, New York County, N.Y., December, 1952; graduated from Oyster River High School, Durham, N.H., 1971; B.A., University of New Hampshire, Durham, N.H., 1975; M.P.A., University of New Hampshire, Durham, N.H., 1979; social worker; professor; elected as a Democrat to the One Hundred Tenth Congress and to the succeeding Congress (January 3, 2007-January 3, 2011); unsuccessful candidate for reelection to the One Hundred Twelfth Congress in 2010; elected as a Democrat to the One Hundred Thirteenth Congress (January 3, 2013-present)." # print "Shea (none): %s" % birthday_for(shea) # control = "PEARSON, Richmond, a Representative from North Carolina; born at Richmond Hill, Yadkin County, N.C., January 26, 1852; attended Horner's School, Oxford, N.C., and was graduated from Princeton College in 1872; studied law; was admitted to the bar in 1874; in the same year was appointed United States consul to Verviers and Liege, Belgium; resigned in 1877; member of the State house of representatives 1884-1886; elected as a Republican to the Fifty-fourth and Fifty-fifth Congresses (March 4, 1895-March 3, 1899); successfully contested the election of William T. Crawford to the Fifty-sixth Congress and served from May 10, 1900, to March 3, 1901; appointed by President Theodore Roosevelt as United States consul to Genoa, Italy, December 11, 1901, as Envoy Extraordinary and Minister Plenipotentiary to Persia in 1902, and as Minister to Greece and Montenegro in 1907; resigned from the diplomatic service in 1909; died at Richmond Hill, Asheville, N.C., September 12, 1923; interment in Riverside Cemetery." # print "\nControl (January 26, 1852): %s" % birthday_for(control) def fetch_bioguide_page(bioguide, force): url = "http://bioguide.congress.gov/scripts/biodisplay.pl?index=%s" % bioguide cache = "legislators/bioguide/%s.html" % bioguide try: body = download(url, cache, force) # Fix a problem? body = body.replace("&Aacute;\xc2\x81", "&Aacute;") # Entities like &#146; are in Windows-1252 encoding. Normally lxml # handles that for us, but we're also parsing HTML. The lxml.html.HTMLParser # doesn't support specifying an encoding, and the lxml.etree.HTMLParser doesn't # provide a cssselect method on element objects. So we'll just decode ourselves. body = utils.unescape(body, "Windows-1252") dom = lxml.html.parse(io.StringIO(body)).getroot() except lxml.etree.XMLSyntaxError: raise Exception("Error parsing: " + url) # Sanity check. if len(dom.cssselect("title")) == 0: raise Exception("No page for bioguide %s!" % bioguide) return dom if __name__ == '__main__': run()
clovett/MissionPlanner
refs/heads/master
Lib/audiodev.py
61
"""Classes for manipulating audio devices (currently only for Sun and SGI)""" from warnings import warnpy3k warnpy3k("the audiodev module has been removed in Python 3.0", stacklevel=2) del warnpy3k __all__ = ["error","AudioDev"] class error(Exception): pass class Play_Audio_sgi: # Private instance variables ## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \ ## params, config, inited_outrate, inited_width, \ ## inited_nchannels, port, converter, classinited: private classinited = 0 frameratelist = nchannelslist = sampwidthlist = None def initclass(self): import AL self.frameratelist = [ (48000, AL.RATE_48000), (44100, AL.RATE_44100), (32000, AL.RATE_32000), (22050, AL.RATE_22050), (16000, AL.RATE_16000), (11025, AL.RATE_11025), ( 8000, AL.RATE_8000), ] self.nchannelslist = [ (1, AL.MONO), (2, AL.STEREO), (4, AL.QUADRO), ] self.sampwidthlist = [ (1, AL.SAMPLE_8), (2, AL.SAMPLE_16), (3, AL.SAMPLE_24), ] self.classinited = 1 def __init__(self): import al, AL if not self.classinited: self.initclass() self.oldparams = [] self.params = [AL.OUTPUT_RATE, 0] self.config = al.newconfig() self.inited_outrate = 0 self.inited_width = 0 self.inited_nchannels = 0 self.converter = None self.port = None return def __del__(self): if self.port: self.stop() if self.oldparams: import al, AL al.setparams(AL.DEFAULT_DEVICE, self.oldparams) self.oldparams = [] def wait(self): if not self.port: return import time while self.port.getfilled() > 0: time.sleep(0.1) self.stop() def stop(self): if self.port: self.port.closeport() self.port = None if self.oldparams: import al, AL al.setparams(AL.DEFAULT_DEVICE, self.oldparams) self.oldparams = [] def setoutrate(self, rate): for (raw, cooked) in self.frameratelist: if rate == raw: self.params[1] = cooked self.inited_outrate = 1 break else: raise error, 'bad output rate' def setsampwidth(self, width): for (raw, cooked) in self.sampwidthlist: if width == raw: self.config.setwidth(cooked) self.inited_width = 1 break else: if width == 0: import AL self.inited_width = 0 self.config.setwidth(AL.SAMPLE_16) self.converter = self.ulaw2lin else: raise error, 'bad sample width' def setnchannels(self, nchannels): for (raw, cooked) in self.nchannelslist: if nchannels == raw: self.config.setchannels(cooked) self.inited_nchannels = 1 break else: raise error, 'bad # of channels' def writeframes(self, data): if not (self.inited_outrate and self.inited_nchannels): raise error, 'params not specified' if not self.port: import al, AL self.port = al.openport('Python', 'w', self.config) self.oldparams = self.params[:] al.getparams(AL.DEFAULT_DEVICE, self.oldparams) al.setparams(AL.DEFAULT_DEVICE, self.params) if self.converter: data = self.converter(data) self.port.writesamps(data) def getfilled(self): if self.port: return self.port.getfilled() else: return 0 def getfillable(self): if self.port: return self.port.getfillable() else: return self.config.getqueuesize() # private methods ## if 0: access *: private def ulaw2lin(self, data): import audioop return audioop.ulaw2lin(data, 2) class Play_Audio_sun: ## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \ ## inited_nchannels, converter: private def __init__(self): self.outrate = 0 self.sampwidth = 0 self.nchannels = 0 self.inited_outrate = 0 self.inited_width = 0 self.inited_nchannels = 0 self.converter = None self.port = None return def __del__(self): self.stop() def setoutrate(self, rate): self.outrate = rate self.inited_outrate = 1 def setsampwidth(self, width): self.sampwidth = width self.inited_width = 1 def setnchannels(self, nchannels): self.nchannels = nchannels self.inited_nchannels = 1 def writeframes(self, data): if not (self.inited_outrate and self.inited_width and self.inited_nchannels): raise error, 'params not specified' if not self.port: import sunaudiodev, SUNAUDIODEV self.port = sunaudiodev.open('w') info = self.port.getinfo() info.o_sample_rate = self.outrate info.o_channels = self.nchannels if self.sampwidth == 0: info.o_precision = 8 self.o_encoding = SUNAUDIODEV.ENCODING_ULAW # XXX Hack, hack -- leave defaults else: info.o_precision = 8 * self.sampwidth info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR self.port.setinfo(info) if self.converter: data = self.converter(data) self.port.write(data) def wait(self): if not self.port: return self.port.drain() self.stop() def stop(self): if self.port: self.port.flush() self.port.close() self.port = None def getfilled(self): if self.port: return self.port.obufcount() else: return 0 ## # Nobody remembers what this method does, and it's broken. :-( ## def getfillable(self): ## return BUFFERSIZE - self.getfilled() def AudioDev(): # Dynamically try to import and use a platform specific module. try: import al except ImportError: try: import sunaudiodev return Play_Audio_sun() except ImportError: try: import Audio_mac except ImportError: raise error, 'no audio device' else: return Audio_mac.Play_Audio_mac() else: return Play_Audio_sgi() def test(fn = None): import sys if sys.argv[1:]: fn = sys.argv[1] else: fn = 'f:just samples:just.aif' import aifc af = aifc.open(fn, 'r') print fn, af.getparams() p = AudioDev() p.setoutrate(af.getframerate()) p.setsampwidth(af.getsampwidth()) p.setnchannels(af.getnchannels()) BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels() while 1: data = af.readframes(BUFSIZ) if not data: break print len(data) p.writeframes(data) p.wait() if __name__ == '__main__': test()
ywcui1990/nupic.research
refs/heads/master
setup.py
9
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014-15, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import platform import sys from setuptools import find_packages, setup def findRequirements(): """ Read the requirements.txt file and parse into requirements for setup's install_requirements option. """ return [ line.strip() for line in open("requirements.txt").readlines() if not line.startswith("#") ] depLinks = [] if "linux" in sys.platform and platform.linux_distribution()[0] == "CentOS": depLinks = [ "https://pypi.numenta.com/pypi/nupic", "https://pypi.numenta.com/pypi/nupic.bindings" ] setup(name="htmresearch", version="0.0.1", description="Numenta's HTM research code", author="Subutai Ahmad", author_email="sahmad@numenta.com", url="https://github.com/numenta/nupic.research", packages=find_packages(), install_requires=findRequirements(), dependency_links = depLinks, )
artnez/faceoff
refs/heads/master
faceoff/helpers/decorators.py
1
""" Copyright: (c) 2012-2014 Artem Nezvigin <artem@artnez.com> License: MIT, see LICENSE for details """ from functools import wraps from flask import g, request, session, render_template, url_for, redirect from faceoff.models.user import find_user def templated(template_name=None): """ Automatically renders a template named after the current endpoint. Will also render the name provided if given. """ def closure(f): @wraps(f) def decorator(*args, **kwargs): template = template_name response = f(*args, **kwargs) if response is None: response = {} elif not isinstance(response, dict): return response if template is None: template = '%s.html' % request.endpoint return render_template(template, **response) return decorator return closure def authenticated(f): """ Asserts that an existing logged-in user session is active. If not, redirects to the authenticate gate. """ @wraps(f) def decorator(*args, **kwargs): user_id = session.get('user_id') if user_id is None: return redirect(url_for('gate')) user = find_user(id=user_id) if user is None: return redirect(url_for('gate')) g.current_user = user return f(*args, **kwargs) return decorator
hottwaj/django
refs/heads/master
django/contrib/admin/templatetags/admin_urls.py
553
from django import template from django.contrib.admin.utils import quote from django.core.urlresolvers import Resolver404, get_script_prefix, resolve from django.utils.http import urlencode from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse register = template.Library() @register.filter def admin_urlname(value, arg): return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg) @register.filter def admin_urlquote(value): return quote(value) @register.simple_tag(takes_context=True) def add_preserved_filters(context, url, popup=False, to_field=None): opts = context.get('opts') preserved_filters = context.get('preserved_filters') parsed_url = list(urlparse(url)) parsed_qs = dict(parse_qsl(parsed_url[4])) merged_qs = dict() if opts and preserved_filters: preserved_filters = dict(parse_qsl(preserved_filters)) match_url = '/%s' % url.partition(get_script_prefix())[2] try: match = resolve(match_url) except Resolver404: pass else: current_url = '%s:%s' % (match.app_name, match.url_name) changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name) if changelist_url == current_url and '_changelist_filters' in preserved_filters: preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters'])) merged_qs.update(preserved_filters) if popup: from django.contrib.admin.options import IS_POPUP_VAR merged_qs[IS_POPUP_VAR] = 1 if to_field: from django.contrib.admin.options import TO_FIELD_VAR merged_qs[TO_FIELD_VAR] = to_field merged_qs.update(parsed_qs) parsed_url[4] = urlencode(merged_qs) return urlunparse(parsed_url)
kmod/icbd
refs/heads/master
stdlib/python2.5/distutils/command/register.py
81
"""distutils.command.register Implements the Distutils 'register' command (register with the repository). """ # created 2002/10/21, Richard Jones __revision__ = "$Id: register.py 56542 2007-07-25 16:24:08Z martin.v.loewis $" import sys, os, string, urllib2, getpass, urlparse import StringIO, ConfigParser from distutils.core import Command from distutils.errors import * class register(Command): description = ("register the distribution with the Python package index") DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi' user_options = [ ('repository=', 'r', "url of repository [default: %s]"%DEFAULT_REPOSITORY), ('list-classifiers', None, 'list the valid Trove classifiers'), ('show-response', None, 'display full response text from server'), ] boolean_options = ['verify', 'show-response', 'list-classifiers'] def initialize_options(self): self.repository = None self.show_response = 0 self.list_classifiers = 0 def finalize_options(self): if self.repository is None: self.repository = self.DEFAULT_REPOSITORY def run(self): self.check_metadata() if self.dry_run: self.verify_metadata() elif self.list_classifiers: self.classifiers() else: self.send_metadata() def check_metadata(self): """Ensure that all required elements of meta-data (name, version, URL, (author and author_email) or (maintainer and maintainer_email)) are supplied by the Distribution object; warn if any are missing. """ metadata = self.distribution.metadata missing = [] for attr in ('name', 'version', 'url'): if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) if missing: self.warn("missing required meta-data: " + string.join(missing, ", ")) if metadata.author: if not metadata.author_email: self.warn("missing meta-data: if 'author' supplied, " + "'author_email' must be supplied too") elif metadata.maintainer: if not metadata.maintainer_email: self.warn("missing meta-data: if 'maintainer' supplied, " + "'maintainer_email' must be supplied too") else: self.warn("missing meta-data: either (author and author_email) " + "or (maintainer and maintainer_email) " + "must be supplied") def classifiers(self): ''' Fetch the list of classifiers from the server. ''' response = urllib2.urlopen(self.repository+'?:action=list_classifiers') print response.read() def verify_metadata(self): ''' Send the metadata to the package index server to be checked. ''' # send the info to the server and report the result (code, result) = self.post_to_server(self.build_post_data('verify')) print 'Server response (%s): %s'%(code, result) def send_metadata(self): ''' Send the metadata to the package index server. Well, do the following: 1. figure who the user is, and then 2. send the data as a Basic auth'ed POST. First we try to read the username/password from $HOME/.pypirc, which is a ConfigParser-formatted file with a section [server-login] containing username and password entries (both in clear text). Eg: [server-login] username: fred password: sekrit Otherwise, to figure who the user is, we offer the user three choices: 1. use existing login, 2. register as a new user, or 3. set the password to a random string and email the user. ''' choice = 'x' username = password = '' # see if we can short-cut and get the username/password from the # config config = None if os.environ.has_key('HOME'): rc = os.path.join(os.environ['HOME'], '.pypirc') if os.path.exists(rc): print 'Using PyPI login from %s'%rc config = ConfigParser.ConfigParser() config.read(rc) username = config.get('server-login', 'username') password = config.get('server-login', 'password') choice = '1' # get the user's login info choices = '1 2 3 4'.split() while choice not in choices: print '''We need to know who you are, so please choose either: 1. use your existing login, 2. register as a new user, 3. have the server generate a new password for you (and email it to you), or 4. quit Your selection [default 1]: ''', choice = raw_input() if not choice: choice = '1' elif choice not in choices: print 'Please choose one of the four options!' if choice == '1': # get the username and password while not username: username = raw_input('Username: ') while not password: password = getpass.getpass('Password: ') # set up the authentication auth = urllib2.HTTPPasswordMgr() host = urlparse.urlparse(self.repository)[1] auth.add_password('pypi', host, username, password) # send the info to the server and report the result code, result = self.post_to_server(self.build_post_data('submit'), auth) print 'Server response (%s): %s'%(code, result) # possibly save the login if os.environ.has_key('HOME') and config is None and code == 200: rc = os.path.join(os.environ['HOME'], '.pypirc') print 'I can store your PyPI login so future submissions will be faster.' print '(the login will be stored in %s)'%rc choice = 'X' while choice.lower() not in 'yn': choice = raw_input('Save your login (y/N)?') if not choice: choice = 'n' if choice.lower() == 'y': f = open(rc, 'w') f.write('[server-login]\nusername:%s\npassword:%s\n'%( username, password)) f.close() try: os.chmod(rc, 0600) except: pass elif choice == '2': data = {':action': 'user'} data['name'] = data['password'] = data['email'] = '' data['confirm'] = None while not data['name']: data['name'] = raw_input('Username: ') while data['password'] != data['confirm']: while not data['password']: data['password'] = getpass.getpass('Password: ') while not data['confirm']: data['confirm'] = getpass.getpass(' Confirm: ') if data['password'] != data['confirm']: data['password'] = '' data['confirm'] = None print "Password and confirm don't match!" while not data['email']: data['email'] = raw_input(' EMail: ') code, result = self.post_to_server(data) if code != 200: print 'Server response (%s): %s'%(code, result) else: print 'You will receive an email shortly.' print 'Follow the instructions in it to complete registration.' elif choice == '3': data = {':action': 'password_reset'} data['email'] = '' while not data['email']: data['email'] = raw_input('Your email address: ') code, result = self.post_to_server(data) print 'Server response (%s): %s'%(code, result) def build_post_data(self, action): # figure the data to send - the metadata plus some additional # information used by the package server meta = self.distribution.metadata data = { ':action': action, 'metadata_version' : '1.0', 'name': meta.get_name(), 'version': meta.get_version(), 'summary': meta.get_description(), 'home_page': meta.get_url(), 'author': meta.get_contact(), 'author_email': meta.get_contact_email(), 'license': meta.get_licence(), 'description': meta.get_long_description(), 'keywords': meta.get_keywords(), 'platform': meta.get_platforms(), 'classifiers': meta.get_classifiers(), 'download_url': meta.get_download_url(), # PEP 314 'provides': meta.get_provides(), 'requires': meta.get_requires(), 'obsoletes': meta.get_obsoletes(), } if data['provides'] or data['requires'] or data['obsoletes']: data['metadata_version'] = '1.1' return data def post_to_server(self, data, auth=None): ''' Post a query to the server, and return a string response. ''' # Build up the MIME payload for the urllib2 POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\n--' + boundary end_boundary = sep_boundary + '--' body = StringIO.StringIO() for key, value in data.items(): # handle multiple entries for the same name if type(value) not in (type([]), type( () )): value = [value] for value in value: value = unicode(value).encode("utf-8") body.write(sep_boundary) body.write('\nContent-Disposition: form-data; name="%s"'%key) body.write("\n\n") body.write(value) if value and value[-1] == '\r': body.write('\n') # write an extra newline (lurve Macs) body.write(end_boundary) body.write("\n") body = body.getvalue() # build the Request headers = { 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary, 'Content-length': str(len(body)) } req = urllib2.Request(self.repository, body, headers) # handle HTTP and include the Basic Auth handler opener = urllib2.build_opener( urllib2.HTTPBasicAuthHandler(password_mgr=auth) ) data = '' try: result = opener.open(req) except urllib2.HTTPError, e: if self.show_response: data = e.fp.read() result = e.code, e.msg except urllib2.URLError, e: result = 500, str(e) else: if self.show_response: data = result.read() result = 200, 'OK' if self.show_response: print '-'*75, data, '-'*75 return result
juliankiedaisch/braunanlage
refs/heads/testing
brausteuerung/server/sensor_test.py
2
print "running..." import ow import time ow.init( 'localhost:4304' ) # We're accessing the 1-wire bus directly from python but # if you want to use owserver: # ow.init( 'localhost:3030' ) # /opt/owfs/bin/owserver -p 3030 -u -r sensors = ow.Sensor("/").sensorList() # We're only interested in temperature sensors so remove # any 1-wire devices which aren't temperature sensors for sensor in sensors[:]: if sensor.type != 'DS18B20': sensors.remove( sensor ) print sensors # Print column headers for sensor in sensors: print sensor.r_address + "\t", print "\n", # Print temperatures while 1==1: print int(time.time()), "\t", for sensor in sensors: print sensor.temperature, "\t", print "\n", time.sleep(10)
n3wb13/OpenNfrGui-5.0-1
refs/heads/master
lib/python/Plugins/Extensions/TuxboxPlugins/__init__.py
12133432
xasopheno/audio_visual
refs/heads/master
audio/Training/csv/rnn.py
12133432
PatrickLeonard/superlists
refs/heads/master
accounts/migrations/__init__.py
12133432
sebastien-forestier/CogSci2017
refs/heads/master
cogsci2017/dmp/__init__.py
12133432
siosio/intellij-community
refs/heads/master
python/testData/refactoring/move/starImportWithUsages/after/src/zzz.py
12133432
davidbgk/croquemort
refs/heads/master
tests/__init__.py
12133432
erikr/django
refs/heads/master
tests/migrations/migrations_test_apps/lookuperror_a/migrations/0002_a2.py
381
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('lookuperror_a', '0001_initial'), ] operations = [ migrations.CreateModel( name='A2', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)), ], ), ]
alhashash/odoo
refs/heads/master
addons/website_forum_doc/__openerp__.py
322
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Documentation', 'category': 'Website', 'summary': 'Forum, Documentation', 'version': '1.0', 'description': """ Documentation based on question and pertinent answers of Forum """, 'author': 'OpenERP SA', 'depends': [ 'website_forum' ], 'data': [ 'data/doc_data.xml', 'security/ir.model.access.csv', 'views/doc.xml', 'views/website_doc.xml', ], 'demo': [ 'data/doc_demo.xml', ], 'installable': True, }
bbiskup/docker-python-devenv
refs/heads/dev
pkg1/mod1.py
1
import zmq def f1(): print "f1() called" return 2 if __name__ == '__main__': f1()
tafaRU/account-financial-tools
refs/heads/8.0
__unported__/l10n_fr_siret/company.py
11
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 Numérigraphe SARL. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, orm class res_company(orm.Model): """Replace the company's fields for SIRET/RC with the partner's""" _inherit = 'res.company' def _get_partner_change(self, cr, uid, ids, context=None): return self.pool['res.company'].search( cr, uid, [('partner_id', 'in', ids)], context=context) _columns = { 'siret': fields.related( 'partner_id', 'siret', type='char', string='SIRET', store={ 'res.partner': (_get_partner_change, ['siren', 'nic'], 20), 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['partner_id'], 20), }), 'company_registry': fields.related( 'partner_id', 'company_registry', type='char', string='Company Registry', store={ 'res.partner': (_get_partner_change, ['company_registry'], 20), 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['partner_id'], 20), }) }
samdowd/drumm-farm
refs/heads/master
drumm_env/lib/python2.7/site-packages/phonenumbers/data/region_VU.py
2
"""Auto-generated file, do not edit by hand. VU metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_VU = PhoneMetadata(id='VU', country_code=678, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='[2-57-9]\\d{4,6}', possible_length=(5, 7)), fixed_line=PhoneNumberDesc(national_number_pattern='(?:2[02-9]\\d|3(?:[5-7]\\d|8[0-8])|48[4-9]|88\\d)\\d{2}', example_number='22123', possible_length=(5,)), mobile=PhoneNumberDesc(national_number_pattern='(?:5(?:7[2-5]|[0-689]\\d)|7[013-7]\\d)\\d{4}', example_number='5912345', possible_length=(7,)), uan=PhoneNumberDesc(national_number_pattern='3[03]\\d{3}|900\\d{4}', example_number='30123', possible_length=(5, 7)), number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[579]'])])
Kleptobismol/scikit-bio
refs/heads/master
skbio/parse/sequences/tests/test_fasta.py
2
#!/usr/bin/env python # ----------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import tempfile from unittest import TestCase, main import numpy.testing as npt from skbio import parse_fasta, parse_qual from skbio.io import RecordError FASTA_PARSERS_DATA = { 'labels': '>abc\n>def\n>ghi\n', 'oneseq': '>abc\nUCAG\n', 'multiline': '>xyz\nUUUU\nCC\nAAAAA\nG', 'threeseq': '>123\na\n> \t abc \t \ncag\ngac\n>456\nc\ng', 'twogood': '>123\n\n> \t abc \t \ncag\ngac\n>456\nc\ng', 'oneX': '>123\nX\n> \t abc \t \ncag\ngac\n>456\nc\ng', 'nolabels': 'GJ>DSJGSJDF\nSFHKLDFS>jkfs\n', 'empty': '', 'qualscores': '>x\n5 10 5\n12\n>y foo bar\n30 40\n>a \n5 10 5\n12\n' '>b baz\n30 40', 'invalidqual': '>x\n5 10 5\n12\n>y\n30 40\n>a\n5 10 5\n12 brofist 42' } class IterableData(object): """Store fasta data as lists of strings.""" def setUp(self): for attr, val in FASTA_PARSERS_DATA.items(): setattr(self, attr, val.split('\n')) class FileData(object): """Store fasta data as file names pointing to the data.""" def setUp(self): tmp_files = [] for attr, val in FASTA_PARSERS_DATA.items(): tmp_file = tempfile.NamedTemporaryFile('r+') tmp_file.write(val) tmp_file.flush() tmp_file.seek(0) setattr(self, attr, tmp_file.name) tmp_files.append(tmp_file) self._tmp_files = tmp_files def tearDown(self): for tmp_file in self._tmp_files: tmp_file.close() class ParseFastaTests(object): """Tests of parse_fasta: returns (label, seq) tuples.""" def test_empty(self): """parse_fasta should return empty list from 'file' w/o labels """ self.assertEqual(list(parse_fasta(self.empty)), []) self.assertEqual(list(parse_fasta(self.nolabels, strict=False)), []) self.assertRaises(RecordError, list, parse_fasta(self.nolabels)) def test_no_labels(self): """parse_fasta should return empty list from file w/o seqs""" # should fail if strict (the default) self.assertRaises(RecordError, list, parse_fasta(self.labels, strict=True)) # if not strict, should skip the records self.assertEqual(list(parse_fasta(self.labels, strict=False)), []) def test_single(self): """parse_fasta should read single record as (label, seq) tuple """ f = list(parse_fasta(self.oneseq)) self.assertEqual(len(f), 1) a = f[0] self.assertEqual(a, ('abc', 'UCAG')) f = list(parse_fasta(self.multiline)) self.assertEqual(len(f), 1) a = f[0] self.assertEqual(a, ('xyz', 'UUUUCCAAAAAG')) def test_gt_bracket_in_seq(self): """parse_fasta handles alternate finder function this test also illustrates how to use the parse_fasta to handle "sequences" that start with a > symbol, which can happen when we abuse the parse_fasta to parse fasta-like sequence quality files. """ oneseq_w_gt = '>abc\n>CAG\n'.split('\n') def get_two_line_records(infile): line1 = None for line in infile: if line1 is None: line1 = line else: yield (line1, line) line1 = None f = list(parse_fasta(oneseq_w_gt, finder=get_two_line_records)) self.assertEqual(len(f), 1) a = f[0] self.assertEqual(a, ('abc', '>CAG')) def test_parse_fasta_ignore_comment(self): """parse_fasta correct ignores label comments when requested """ in_ = '>1\nCAG\n>2 some other info\nCCAG\n>3 \nA'.split('\n') # ignore_comment = False actual = list(parse_fasta(in_)) expected = [('1', 'CAG'), ('2 some other info', 'CCAG'), ('3', 'A')] self.assertEqual(actual, expected) # ignore_comment = True actual = list(parse_fasta(in_, ignore_comment=True)) expected = [('1', 'CAG'), ('2', 'CCAG'), ('3', 'A')] self.assertEqual(actual, expected) def test_parse_fasta_label_to_name(self): exp = [('brofist', 'a'), ('brofist', 'caggac'), ('brofist', 'cg')] # the most powerful fasta label converter known to mankind obs = list(parse_fasta(self.threeseq, label_to_name=lambda _: 'brofist')) self.assertEqual(obs, exp) def test_multiple(self): """parse_fasta should read multiline records correctly""" f = list(parse_fasta(self.threeseq)) self.assertEqual(len(f), 3) a, b, c = f self.assertEqual(a, ('123', 'a')) self.assertEqual(b, ('abc', 'caggac')) self.assertEqual(c, ('456', 'cg')) def test_multiple_bad_strict(self): with self.assertRaises(RecordError): list(parse_fasta(self.twogood)) def test_multiple_bad_not_strict(self): f = list(parse_fasta(self.twogood, strict=False)) self.assertEqual(len(f), 2) a, b = f self.assertEqual(a, ('abc', 'caggac')) def test_parse_qual(self): exp = [('x', [5, 10, 5, 12]), ('y', [30, 40]), ('a', [5, 10, 5, 12]), ('b', [30, 40])] obs = parse_qual(self.qualscores) for o, e in zip(obs, exp): npt.assert_equal(o, e) def test_parse_qual_invalid_qual_file(self): with self.assertRaises(RecordError): list(parse_qual(self.invalidqual)) def test_parse_qual_full_header(self): exp = [('x', [5, 10, 5, 12]), ('y foo bar', [30, 40]), ('a', [5, 10, 5, 12]), ('b baz', [30, 40])] obs = parse_qual(self.qualscores, full_header=True) for o, e in zip(obs, exp): npt.assert_equal(o, e) class ParseFastaTestsInputIsIterable(IterableData, ParseFastaTests, TestCase): """Mixin: `parse_fasta` and `parse_qual` in ParseFastaTests gets lists of strings. """ pass class ParseFastaTestsInputIsFileNames(FileData, ParseFastaTests, TestCase): """Mixin: `parse_fasta` and `parse_qual` in ParseFastaTests gets a file name. """ pass if __name__ == "__main__": main()
calatre/epidemics_network
refs/heads/master
plt/SIR 1 plot.py
1
# Universidade de Aveiro - Physics Department # 2016/2017 Project - Andre Calatre, 73207 # "Simulation of an epidemic" - 16/5/2017 # Plotting Multiple Simulations of a SIR Epidemic Model #import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import rcParams rcParams['lines.linewidth'] = 0.2 rcParams['axes.linewidth'] = 0.1 #set the value globally #Choosing the values for c and r to study cvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.25, 0.5, 0.75, 1] rvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.25, 0.5, 0.75, 1] #xlab = range(0,501,10) i = 0 #box = {'facecolor':'yellow', 'alpha':0.9, 'pad':2} #plt.figure(figsize = (20,30)) for cvar in cvalues: for rvar in rvalues: i += 1 print('Working...') tblnm = 'c='+str(cvar)+'|r='+ str(rvar) data = pd.read_excel('data/SIR_ns_data_shift.xlsx', sheetname = tblnm) print('plotting...............................'+str(tblnm)) #print(data['S_Avg']) plt.subplot(14,14,i) y1 = data['Susceptible'] y2 = data['Infected'] y3 = data['Removed'] #e1 = data['nS_StD'] #e2 = data['nI_StD'] #e3 = data['nR_StD'] #ind = y1.index.values plt.plot(y1,'g-') #plt.fill_between(ind, y1-e1, y1+e1, linewidth=0, #facecolor = 'g', alpha = 0.3, antialiased = True) plt.plot(y2,'r-') #plt.fill_between(ind, y2-e2, y2+e2, linewidth=0, #facecolor = 'r', alpha = 0.3, antialiased = True) plt.plot(y3,'b-') # plt.fill_between(ind, y3-e3, y3+e3, linewidth=0, #facecolor = 'b', alpha = 0.2, antialiased = True) plt.axis([0,250,0,10000]) #plt.text(300,9500,tblnm, bbox= box) plt.title('c*p='+str(cvar)+'|r='+ str(rvar), size=4, y=0.75)#, loc='right') plt.subplots_adjust(bottom=0.01, right=0.99, top=0.97, left=0.01, hspace=.4, wspace=.08) #plt.xticks([]) #plt.yticks([]) plt.tick_params(labelbottom='off', labelleft='off', width = 0.05) plt.grid(True, linewidth = 0.05) #plt.tight_layout() #plt.show() plt.savefig('img/test.png', format='png', dpi=1200, figsize=(40,30))
pirate42/xhtml2pdf
refs/heads/master
xhtml2pdf/pdf.py
56
# -*- coding: utf-8 -*- # Copyright 2010 Dirk Holtwick, holtwick.it # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from xhtml2pdf.util import pisaTempFile, getFile, PyPDF2 import logging log = logging.getLogger("xhtml2pdf") class pisaPDF: def __init__(self, capacity=-1): self.capacity = capacity self.files = [] def addFromURI(self, url, basepath=None): obj = getFile(url, basepath) if obj and (not obj.notFound()): self.files.append(obj.getFile()) addFromFileName = addFromURI def addFromFile(self, f): if hasattr(f, "read"): self.files.append(f) self.addFromURI(f) def addFromString(self, data): self.files.append(pisaTempFile(data, capacity=self.capacity)) def addDocument(self, doc): if hasattr(doc.dest, "read"): self.files.append(doc.dest) def join(self, file=None): output = PyPDF2.PdfFileWriter() for pdffile in self.files: input = PyPDF2.PdfFileReader(pdffile) for pageNumber in xrange(input.getNumPages()): output.addPage(input.getPage(pageNumber)) if file is not None: output.write(file) return file out = pisaTempFile(capacity=self.capacity) output.write(out) return out.getvalue() getvalue = join __str__ = join
mail-apps/translate
refs/heads/master
translate/convert/tiki2po.py
25
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008 Mozilla Corporation, Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Convert TikiWiki's language.php files to GetText PO files. See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/tiki2po.html for examples and usage instructions. """ import sys from translate.storage import po, tiki class tiki2po: def __init__(self, includeunused=False): """ :param includeunused: On conversion, should the "unused" section be preserved? Default: False """ self.includeunused = includeunused def convertstore(self, thetikifile): """Converts a given (parsed) tiki file to a po file. :param thetikifile: a tikifile pre-loaded with input data """ thetargetfile = po.pofile() # For each lang unit, make the new po unit accordingly for unit in thetikifile.units: if not self.includeunused and "unused" in unit.getlocations(): continue newunit = po.pounit() newunit.source = unit.source newunit.settarget(unit.target) locations = unit.getlocations() if locations: newunit.addlocations(locations) thetargetfile.addunit(newunit) return thetargetfile def converttiki(inputfile, outputfile, template=None, includeunused=False): """Converts from tiki file format to po. :param inputfile: file handle of the source :param outputfile: file handle to write to :param template: unused :param includeunused: Include the "usused" section of the tiki file? Default: False """ convertor = tiki2po(includeunused=includeunused) inputstore = tiki.TikiStore(inputfile) outputstore = convertor.convertstore(inputstore) if outputstore.isempty(): return False outputfile.write(str(outputstore)) return True def main(argv=None): """Converts tiki .php files to .po.""" from translate.convert import convert from translate.misc import stdiotell sys.stdout = stdiotell.StdIOWrapper(sys.stdout) formats = {"php": ("po", converttiki)} parser = convert.ConvertOptionParser(formats, description=__doc__) parser.add_option("", "--include-unused", dest="includeunused", action="store_true", default=False, help="Include strings in the unused section") parser.passthrough.append("includeunused") parser.run(argv) if __name__ == '__main__': main()
CopeX/odoo
refs/heads/8.0
addons/sales_team/res_config.py
366
# -*- coding: utf-8 -*- from openerp.osv import fields, osv class sales_team_configuration(osv.TransientModel): _name = 'sale.config.settings' _inherit = ['sale.config.settings'] def set_group_multi_salesteams(self, cr, uid, ids, context=None): """ This method is automatically called by res_config as it begins with set. It is used to implement the 'one group or another' behavior. We have to perform some group manipulation by hand because in res_config.execute(), set_* methods are called after group_*; therefore writing on an hidden res_config file could not work. If group_multi_salesteams is checked: remove group_mono_salesteams from group_user, remove the users. Otherwise, just add group_mono_salesteams in group_user. The inverse logic about group_multi_salesteams is managed by the normal behavior of 'group_multi_salesteams' field. """ def ref(xml_id): mod, xml = xml_id.split('.', 1) return self.pool['ir.model.data'].get_object(cr, uid, mod, xml, context) for obj in self.browse(cr, uid, ids, context=context): config_group = ref('base.group_mono_salesteams') base_group = ref('base.group_user') if obj.group_multi_salesteams: base_group.write({'implied_ids': [(3, config_group.id)]}) config_group.write({'users': [(3, u.id) for u in base_group.users]}) else: base_group.write({'implied_ids': [(4, config_group.id)]}) return True _columns = { 'group_multi_salesteams': fields.boolean("Organize Sales activities into multiple Sales Teams", implied_group='base.group_multi_salesteams', help="""Allows you to use Sales Teams to manage your leads and opportunities."""), }
jdemel/gnuradio
refs/heads/master
gr-digital/python/digital/qamlike.py
1
# Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # """ This file contains constellations that are similar to QAM, but are not perfect squares. """ from __future__ import absolute_import from __future__ import unicode_literals from . import digital_python from .qam import large_ampls_to_corners_mapping def qam32_holeinside_constellation(large_ampls_to_corners=False): # First make constellation for one quadrant. # 0 1 2 # 2 - 010 111 110 # 1 - 011 101 100 # 0 - 000 001 # Have put hole in the side rather than corner. # Corner point is helpful for frequency locking. # It has an attempt at some gray-coding, but not # a very good one. # Indices are (horizontal, vertical). indices_and_numbers = ( ((0, 0), 0b000), ((0, 1), 0b011), ((0, 2), 0b010), ((1, 0), 0b001), ((1, 1), 0b101), ((1, 2), 0b111), ((2, 1), 0b100), ((2, 2), 0b110), ) points = [None]*32 for indices, number in indices_and_numbers: p_in_quadrant = 0.5+indices[0] + 1j*(0.5+indices[1]) for quadrant in range(4): index = number + 8 * quadrant rotation = pow(1j, quadrant) p = p_in_quadrant * rotation points[index] = p side = 6 width = 1 # Double number of boxes on side # This is so that points in the 'hole' get assigned correctly. side = 12 width = 0.5 pre_diff_code = [] if not large_ampls_to_corners: constellation = digital_python.constellation_rect(points, pre_diff_code, 4, side, side, width, width) else: sector_values = large_ampls_to_corners_mapping(side, points, width) constellation = digital_python.constellation_expl_rect( points, pre_diff_code, 4, side, side, width, width, sector_values) return constellation
hellsgate1001/bookit
refs/heads/master
docs/env/Lib/site-packages/django/conf/locale/__init__.py
111
# -*- encoding: utf-8 -*- from __future__ import unicode_literals # About name_local: capitalize it as if your language name was appearing # inside a sentence in your language. LANG_INFO = { 'af': { 'bidi': False, 'code': 'af', 'name': 'Afrikaans', 'name_local': 'Afrikaans', }, 'ar': { 'bidi': True, 'code': 'ar', 'name': 'Arabic', 'name_local': 'العربيّة', }, 'az': { 'bidi': True, 'code': 'az', 'name': 'Azerbaijani', 'name_local': 'azərbaycan dili', }, 'be': { 'bidi': False, 'code': 'be', 'name': 'Belarusian', 'name_local': 'беларуская', }, 'bg': { 'bidi': False, 'code': 'bg', 'name': 'Bulgarian', 'name_local': 'български', }, 'bn': { 'bidi': False, 'code': 'bn', 'name': 'Bengali', 'name_local': 'বাংলা', }, 'br': { 'bidi': False, 'code': 'br', 'name': 'Breton', 'name_local': 'brezhoneg', }, 'bs': { 'bidi': False, 'code': 'bs', 'name': 'Bosnian', 'name_local': 'bosanski', }, 'ca': { 'bidi': False, 'code': 'ca', 'name': 'Catalan', 'name_local': 'català', }, 'cs': { 'bidi': False, 'code': 'cs', 'name': 'Czech', 'name_local': 'česky', }, 'cy': { 'bidi': False, 'code': 'cy', 'name': 'Welsh', 'name_local': 'Cymraeg', }, 'da': { 'bidi': False, 'code': 'da', 'name': 'Danish', 'name_local': 'dansk', }, 'de': { 'bidi': False, 'code': 'de', 'name': 'German', 'name_local': 'Deutsch', }, 'el': { 'bidi': False, 'code': 'el', 'name': 'Greek', 'name_local': 'Ελληνικά', }, 'en': { 'bidi': False, 'code': 'en', 'name': 'English', 'name_local': 'English', }, 'en-gb': { 'bidi': False, 'code': 'en-gb', 'name': 'British English', 'name_local': 'British English', }, 'eo': { 'bidi': False, 'code': 'eo', 'name': 'Esperanto', 'name_local': 'Esperanto', }, 'es': { 'bidi': False, 'code': 'es', 'name': 'Spanish', 'name_local': 'español', }, 'es-ar': { 'bidi': False, 'code': 'es-ar', 'name': 'Argentinian Spanish', 'name_local': 'español de Argentina', }, 'es-mx': { 'bidi': False, 'code': 'es-mx', 'name': 'Mexican Spanish', 'name_local': 'español de Mexico', }, 'es-ni': { 'bidi': False, 'code': 'es-ni', 'name': 'Nicaraguan Spanish', 'name_local': 'español de Nicaragua', }, 'es-ve': { 'bidi': False, 'code': 'es-ve', 'name': 'Venezuelan Spanish', 'name_local': 'español de Venezuela', }, 'et': { 'bidi': False, 'code': 'et', 'name': 'Estonian', 'name_local': 'eesti', }, 'eu': { 'bidi': False, 'code': 'eu', 'name': 'Basque', 'name_local': 'Basque', }, 'fa': { 'bidi': True, 'code': 'fa', 'name': 'Persian', 'name_local': 'فارسی', }, 'fi': { 'bidi': False, 'code': 'fi', 'name': 'Finnish', 'name_local': 'suomi', }, 'fr': { 'bidi': False, 'code': 'fr', 'name': 'French', 'name_local': 'français', }, 'fy-nl': { 'bidi': False, 'code': 'fy-nl', 'name': 'Frisian', 'name_local': 'Frisian', }, 'ga': { 'bidi': False, 'code': 'ga', 'name': 'Irish', 'name_local': 'Gaeilge', }, 'gl': { 'bidi': False, 'code': 'gl', 'name': 'Galician', 'name_local': 'galego', }, 'he': { 'bidi': True, 'code': 'he', 'name': 'Hebrew', 'name_local': 'עברית', }, 'hi': { 'bidi': False, 'code': 'hi', 'name': 'Hindi', 'name_local': 'Hindi', }, 'hr': { 'bidi': False, 'code': 'hr', 'name': 'Croatian', 'name_local': 'Hrvatski', }, 'hu': { 'bidi': False, 'code': 'hu', 'name': 'Hungarian', 'name_local': 'Magyar', }, 'ia': { 'bidi': False, 'code': 'ia', 'name': 'Interlingua', 'name_local': 'Interlingua', }, 'id': { 'bidi': False, 'code': 'id', 'name': 'Indonesian', 'name_local': 'Bahasa Indonesia', }, 'is': { 'bidi': False, 'code': 'is', 'name': 'Icelandic', 'name_local': 'Íslenska', }, 'it': { 'bidi': False, 'code': 'it', 'name': 'Italian', 'name_local': 'italiano', }, 'ja': { 'bidi': False, 'code': 'ja', 'name': 'Japanese', 'name_local': '日本語', }, 'ka': { 'bidi': False, 'code': 'ka', 'name': 'Georgian', 'name_local': 'ქართული', }, 'kk': { 'bidi': False, 'code': 'kk', 'name': 'Kazakh', 'name_local': 'Қазақ', }, 'km': { 'bidi': False, 'code': 'km', 'name': 'Khmer', 'name_local': 'Khmer', }, 'kn': { 'bidi': False, 'code': 'kn', 'name': 'Kannada', 'name_local': 'Kannada', }, 'ko': { 'bidi': False, 'code': 'ko', 'name': 'Korean', 'name_local': '한국어', }, 'lb': { 'bidi': False, 'code': 'lb', 'name': 'Luxembourgish', 'name_local': 'Lëtzebuergesch', }, 'lt': { 'bidi': False, 'code': 'lt', 'name': 'Lithuanian', 'name_local': 'Lietuviškai', }, 'lv': { 'bidi': False, 'code': 'lv', 'name': 'Latvian', 'name_local': 'latviešu', }, 'mk': { 'bidi': False, 'code': 'mk', 'name': 'Macedonian', 'name_local': 'Македонски', }, 'ml': { 'bidi': False, 'code': 'ml', 'name': 'Malayalam', 'name_local': 'Malayalam', }, 'mn': { 'bidi': False, 'code': 'mn', 'name': 'Mongolian', 'name_local': 'Mongolian', }, 'my': { 'bidi': False, 'code': 'my', 'name': 'Burmese', 'name_local': 'မြန်မာဘာသာ', }, 'nb': { 'bidi': False, 'code': 'nb', 'name': 'Norwegian Bokmal', 'name_local': 'norsk (bokmål)', }, 'ne': { 'bidi': False, 'code': 'ne', 'name': 'Nepali', 'name_local': 'नेपाली', }, 'nl': { 'bidi': False, 'code': 'nl', 'name': 'Dutch', 'name_local': 'Nederlands', }, 'nn': { 'bidi': False, 'code': 'nn', 'name': 'Norwegian Nynorsk', 'name_local': 'norsk (nynorsk)', }, 'no': { 'bidi': False, 'code': 'no', 'name': 'Norwegian', 'name_local': 'norsk', }, 'os': { 'bidi': False, 'code': 'os', 'name': 'Ossetic', 'name_local': 'Ирон', }, 'pa': { 'bidi': False, 'code': 'pa', 'name': 'Punjabi', 'name_local': 'Punjabi', }, 'pl': { 'bidi': False, 'code': 'pl', 'name': 'Polish', 'name_local': 'polski', }, 'pt': { 'bidi': False, 'code': 'pt', 'name': 'Portuguese', 'name_local': 'Português', }, 'pt-br': { 'bidi': False, 'code': 'pt-br', 'name': 'Brazilian Portuguese', 'name_local': 'Português Brasileiro', }, 'ro': { 'bidi': False, 'code': 'ro', 'name': 'Romanian', 'name_local': 'Română', }, 'ru': { 'bidi': False, 'code': 'ru', 'name': 'Russian', 'name_local': 'Русский', }, 'sk': { 'bidi': False, 'code': 'sk', 'name': 'Slovak', 'name_local': 'slovenský', }, 'sl': { 'bidi': False, 'code': 'sl', 'name': 'Slovenian', 'name_local': 'Slovenščina', }, 'sq': { 'bidi': False, 'code': 'sq', 'name': 'Albanian', 'name_local': 'shqip', }, 'sr': { 'bidi': False, 'code': 'sr', 'name': 'Serbian', 'name_local': 'српски', }, 'sr-latn': { 'bidi': False, 'code': 'sr-latn', 'name': 'Serbian Latin', 'name_local': 'srpski (latinica)', }, 'sv': { 'bidi': False, 'code': 'sv', 'name': 'Swedish', 'name_local': 'svenska', }, 'sw': { 'bidi': False, 'code': 'sw', 'name': 'Swahili', 'name_local': 'Kiswahili', }, 'ta': { 'bidi': False, 'code': 'ta', 'name': 'Tamil', 'name_local': 'தமிழ்', }, 'te': { 'bidi': False, 'code': 'te', 'name': 'Telugu', 'name_local': 'తెలుగు', }, 'th': { 'bidi': False, 'code': 'th', 'name': 'Thai', 'name_local': 'ภาษาไทย', }, 'tr': { 'bidi': False, 'code': 'tr', 'name': 'Turkish', 'name_local': 'Türkçe', }, 'tt': { 'bidi': False, 'code': 'tt', 'name': 'Tatar', 'name_local': 'Татарча', }, 'udm': { 'bidi': False, 'code': 'udm', 'name': 'Udmurt', 'name_local': 'Удмурт', }, 'uk': { 'bidi': False, 'code': 'uk', 'name': 'Ukrainian', 'name_local': 'Українська', }, 'ur': { 'bidi': True, 'code': 'ur', 'name': 'Urdu', 'name_local': 'اردو', }, 'vi': { 'bidi': False, 'code': 'vi', 'name': 'Vietnamese', 'name_local': 'Tiếng Việt', }, 'zh-cn': { 'bidi': False, 'code': 'zh-cn', 'name': 'Simplified Chinese', 'name_local': '简体中文', }, 'zh-tw': { 'bidi': False, 'code': 'zh-tw', 'name': 'Traditional Chinese', 'name_local': '繁體中文', } }
Fl0rianFischer/sme_odoo
refs/heads/9.0
addons/website_hr/models/__init__.py
439
import hr
bop/foundation
refs/heads/master
lib/python2.7/site-packages/django/views/generic/create_update.py
87
from django.forms.models import ModelFormMetaclass, ModelForm from django.template import RequestContext, loader from django.http import Http404, HttpResponse, HttpResponseRedirect from django.core.xheaders import populate_xheaders from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured from django.utils.translation import ugettext from django.contrib.auth.views import redirect_to_login from django.views.generic import GenericViewError from django.contrib import messages import warnings warnings.warn( 'Function-based generic views have been deprecated; use class-based views instead.', DeprecationWarning ) def apply_extra_context(extra_context, context): """ Adds items from extra_context dict to context. If a value in extra_context is callable, then it is called and the result is added to context. """ for key, value in extra_context.iteritems(): if callable(value): context[key] = value() else: context[key] = value def get_model_and_form_class(model, form_class): """ Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``. """ if form_class: return form_class._meta.model, form_class if model: # The inner Meta class fails if model = model is used for some reason. tmp_model = model # TODO: we should be able to construct a ModelForm without creating # and passing in a temporary inner class. class Meta: model = tmp_model class_name = model.__name__ + 'Form' form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta}) return model, form_class raise GenericViewError("Generic view must be called with either a model or" " form_class argument.") def redirect(post_save_redirect, obj): """ Returns a HttpResponseRedirect to ``post_save_redirect``. ``post_save_redirect`` should be a string, and can contain named string- substitution place holders of ``obj`` field names. If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method, then raise ImproperlyConfigured. This function is meant to handle the post_save_redirect parameter to the ``create_object`` and ``update_object`` views. """ if post_save_redirect: return HttpResponseRedirect(post_save_redirect % obj.__dict__) elif hasattr(obj, 'get_absolute_url'): return HttpResponseRedirect(obj.get_absolute_url()) else: raise ImproperlyConfigured( "No URL to redirect to. Either pass a post_save_redirect" " parameter to the generic view or define a get_absolute_url" " method on the Model.") def lookup_object(model, object_id, slug, slug_field): """ Return the ``model`` object with the passed ``object_id``. If ``object_id`` is None, then return the object whose ``slug_field`` equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed, then raise Http404 exception. """ lookup_kwargs = {} if object_id: lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id elif slug and slug_field: lookup_kwargs['%s__exact' % slug_field] = slug else: raise GenericViewError( "Generic view must be called with either an object_id or a" " slug/slug_field.") try: return model.objects.get(**lookup_kwargs) except ObjectDoesNotExist: raise Http404("No %s found for %s" % (model._meta.verbose_name, lookup_kwargs)) def create_object(request, model=None, template_name=None, template_loader=loader, extra_context=None, post_save_redirect=None, login_required=False, context_processors=None, form_class=None): """ Generic object-creation function. Templates: ``<app_label>/<model_name>_form.html`` Context: form the form for the object """ if extra_context is None: extra_context = {} if login_required and not request.user.is_authenticated(): return redirect_to_login(request.path) model, form_class = get_model_and_form_class(model, form_class) if request.method == 'POST': form = form_class(request.POST, request.FILES) if form.is_valid(): new_object = form.save() msg = ugettext("The %(verbose_name)s was created successfully.") %\ {"verbose_name": model._meta.verbose_name} messages.success(request, msg, fail_silently=True) return redirect(post_save_redirect, new_object) else: form = form_class() # Create the template, context, response if not template_name: template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { 'form': form, }, context_processors) apply_extra_context(extra_context, c) return HttpResponse(t.render(c)) def update_object(request, model=None, object_id=None, slug=None, slug_field='slug', template_name=None, template_loader=loader, extra_context=None, post_save_redirect=None, login_required=False, context_processors=None, template_object_name='object', form_class=None): """ Generic object-update function. Templates: ``<app_label>/<model_name>_form.html`` Context: form the form for the object object the original object being edited """ if extra_context is None: extra_context = {} if login_required and not request.user.is_authenticated(): return redirect_to_login(request.path) model, form_class = get_model_and_form_class(model, form_class) obj = lookup_object(model, object_id, slug, slug_field) if request.method == 'POST': form = form_class(request.POST, request.FILES, instance=obj) if form.is_valid(): obj = form.save() msg = ugettext("The %(verbose_name)s was updated successfully.") %\ {"verbose_name": model._meta.verbose_name} messages.success(request, msg, fail_silently=True) return redirect(post_save_redirect, obj) else: form = form_class(instance=obj) if not template_name: template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { 'form': form, template_object_name: obj, }, context_processors) apply_extra_context(extra_context, c) response = HttpResponse(t.render(c)) populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname)) return response def delete_object(request, model, post_delete_redirect, object_id=None, slug=None, slug_field='slug', template_name=None, template_loader=loader, extra_context=None, login_required=False, context_processors=None, template_object_name='object'): """ Generic object-delete function. The given template will be used to confirm deletetion if this view is fetched using GET; for safty, deletion will only be performed if this view is POSTed. Templates: ``<app_label>/<model_name>_confirm_delete.html`` Context: object the original object being deleted """ if extra_context is None: extra_context = {} if login_required and not request.user.is_authenticated(): return redirect_to_login(request.path) obj = lookup_object(model, object_id, slug, slug_field) if request.method == 'POST': obj.delete() msg = ugettext("The %(verbose_name)s was deleted.") %\ {"verbose_name": model._meta.verbose_name} messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(post_delete_redirect) else: if not template_name: template_name = "%s/%s_confirm_delete.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { template_object_name: obj, }, context_processors) apply_extra_context(extra_context, c) response = HttpResponse(t.render(c)) populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname)) return response
rishabhmalhotra/FireSync
refs/heads/master
PyObjC/Message/__init__.py
1
''' Python mapping for the Message framework. This module does not contain docstrings for the wrapped code, check Apple's documentation for details on how to use these functions and classes. ''' import sys import objc import Foundation from Message import _metadata sys.modules['Message'] = mod = objc.ObjCLazyModule('Message', "com.apple.MessageFramework", objc.pathForFramework("/System/Library/Frameworks/Message.framework"), _metadata.__dict__, None, { '__doc__': __doc__, '__path__': __path__, 'objc': objc, }, ( Foundation,))
CharlelieMichaud/paparazzi
refs/heads/master
sw/ground_segment/python/atc/atc.py
22
#!/usr/bin/env python # # Copyright (C) 2016 TUDelft # # This file is part of paparazzi. # # paparazzi is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # paparazzi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with paparazzi. If not, see <http://www.gnu.org/licenses/>. # import wx import atc_frame class Atc(wx.App): def OnInit(self): self.main = atc_frame.AtcFrame() self.main.Show() self.SetTopWindow(self.main) return True def main(): application = Atc(0) application.MainLoop() if __name__ == '__main__': main()
htwenhe/DJOA
refs/heads/master
env/Lib/site-packages/django/conf/locale/en_GB/__init__.py
12133432
agaffney/ansible
refs/heads/devel
test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py
12133432
utlco/tcnc
refs/heads/master
tcnc/svg/geomsvg.py
1
#----------------------------------------------------------------------------- # Copyright 2012-2016 Claude Zervas # email: claude@utlco.com #----------------------------------------------------------------------------- """ Methods for converting SVG shape elements to geometry objects. """ # Python 3 compatibility boilerplate from __future__ import (absolute_import, division, print_function, unicode_literals) from future_builtins import * import math import logging import geom from geom import transform2d from geom import bezier from . import svg logger = logging.getLogger(__name__) def svg_to_geometry(svg_elements, parent_transform=None): """Convert the SVG shape elements to Line, Arc, and/or CubicBezier segments, and apply node/parent transforms. The coordinates of the segments will be absolute with respect to the parent container. Args: svg_elements: An iterable collection of 2-tuples consisting of SVG Element node and transform matrix. parent_transform: An optional parent transform to apply to all nodes. Default is None. Returns: A list of paths, where a path is a list of one or more segments made of Line, Arc, or CubicBezier objects. """ path_list = [] for element, element_transform in svg_elements: transformed_paths = svg_element_to_geometry(element, element_transform, parent_transform) if transformed_paths: path_list.extend(transformed_paths) return path_list def svg_element_to_geometry(element, element_transform=None, parent_transform=None): """Convert the SVG shape element to a list of one or more Line, Arc, and/or CubicBezier segments, and apply node/parent transforms. The coordinates of the segments will be absolute with respect to the parent container. Args: element: An SVG Element shape node. element_transform: An optional transform to apply to the element. Default is None. parent_transform: An optional parent transform to apply to the element. Default is None. Returns: A list of zero or more paths. A path being a list of zero or more Line, Arc, EllipticalArc, or CubicBezier objects. """ # Convert the element to a list of subpaths subpath_list = [] tag = svg.strip_ns(element.tag) # tag stripped of namespace part if tag == 'path': d = element.get('d') if d is not None and d: subpath_list = parse_path_geom(d, ellipse_to_bezier=True) else: subpath = [] if tag == 'line': subpath = convert_line(element) elif tag == 'ellipse': ellipse = convert_ellipse(element) subpath = bezier.bezier_ellipse(ellipse) elif tag == 'rect': subpath = convert_rect(element) elif tag == 'circle': subpath = convert_circle(element) elif tag == 'polyline': subpath = convert_polyline(element) elif tag == 'polygon': subpath = convert_polygon(element) if subpath: subpath_list = [subpath, ] if subpath_list: # Create a transform matrix that is composed of the # parent transform and the element transform # so that control points are in absolute coordinates. if parent_transform is not None: element_transform = transform2d.compose_transform(parent_transform, element_transform) if element_transform is not None: x_subpath_list = [] for subpath in subpath_list: x_subpath = [] for segment in subpath: # Skip zero-length segments. if not segment.p1 == segment.p2: segment = segment.transform(element_transform) x_subpath.append(segment) x_subpath_list.append(x_subpath) return x_subpath_list return subpath_list def parse_path_geom(path_data, ellipse_to_bezier=False): """ Parse SVG path data and convert to geometry objects. Args: path_data: The `d` attribute value of an SVG path element. ellipse_to_bezier: Convert elliptical arcs to bezier curves if True. Default is False. Returns: A list of zero or more subpaths. A subpath being a list of zero or more Line, Arc, EllipticalArc, or CubicBezier objects. """ subpath = [] subpath_list = [] p1 = (0.0, 0.0) for cmd, params in svg.parse_path(path_data): p2 = (params[-2], params[-1]) if cmd == 'M': # Start of path or sub-path if subpath: subpath_list.append(subpath) subpath = [] elif cmd == 'L': subpath.append(geom.Line(p1, p2)) elif cmd == 'A': rx = params[0] ry = params[1] phi = params[2] large_arc = params[3] sweep_flag = params[4] elliptical_arc = geom.ellipse.EllipticalArc.from_endpoints( p1, p2, rx, ry, large_arc, sweep_flag, phi) if elliptical_arc is None: # Parameters must be degenerate... # Try just making a line logger = logging.getLogger(__name__) logger.debug('Degenerate arc...') subpath.append(geom.Line(p1, p2)) elif geom.float_eq(rx, ry): # If it's a circular arc then create one using # the previously computed ellipse parameters. segment = geom.Arc(p1, p2, rx, elliptical_arc.sweep_angle, elliptical_arc.center) subpath.append(segment) elif ellipse_to_bezier: # Convert the elliptical arc to cubic Beziers subpath.extend(bezier.bezier_ellipse(elliptical_arc)) else: subpath.append(elliptical_arc) elif cmd == 'C': c1 = (params[0], params[1]) c2 = (params[2], params[3]) subpath.append(bezier.CubicBezier(p1, c1, c2, p2)) elif cmd == 'Q': c1 = (params[0], params[1]) subpath.append(bezier.CubicBezier.from_quadratic(p1, c1, p2)) p1 = p2 if subpath: subpath_list.append(subpath) return subpath_list def convert_rect(element): """Convert an SVG rect shape element to four geom.Line segments. Args: element: An SVG 'rect' element of the form <rect x='X' y='Y' width='W' height='H'/> Returns: A clockwise wound polygon as a list of geom.Line segments. """ # Convert to a clockwise wound polygon x1 = float(element.get('x', 0)) y1 = float(element.get('y', 0)) x2 = x1 + float(element.get('width', 0)) y2 = y1 + float(element.get('height', 0)) p1 = (x1, y1) p2 = (x1, y2) p3 = (x2, y2) p4 = (x2, y1) return [geom.Line(p1, p2), geom.Line(p2, p3), geom.Line(p3, p4), geom.Line(p4, p1)] def convert_line(element): """Convert an SVG line shape element to a geom.Line. Args: element: An SVG 'line' element of the form: <line x1='X1' y1='Y1' x2='X2' y2='Y2/> Returns: A line segment: geom.Line((x1, y1), (x2, y2)) """ x1 = float(element.get('x1', 0)) y1 = float(element.get('y1', 0)) x2 = float(element.get('x2', 0)) y2 = float(element.get('y2', 0)) return geom.Line((x1, y1), (x2, y2)) def convert_circle(element): """Convert an SVG circle shape element to four circular arc segments. Args: element: An SVG 'circle' element of the form: <circle r='RX' cx='X' cy='Y'/> Returns: A counter-clockwise wound list of four circular geom.Arc segments. """ # Convert to four arcs. CCW winding. r = abs(float(element.get('r', 0))) cx = float(element.get('cx', 0)) cy = float(element.get('cy', 0)) center = (cx, cy) p1 = (cx + r, cy) p2 = (cx, cy + r) p3 = (cx - r, cy) p4 = (cx, cy - r) a1 = geom.Arc(p1, p2, r, math.pi / 2, center) a2 = geom.Arc(p2, p3, r, math.pi / 2, center) a3 = geom.Arc(p3, p4, r, math.pi / 2, center) a4 = geom.Arc(p4, p1, r, math.pi / 2, center) return [a1, a2, a3, a4] def convert_ellipse(element): """Convert an SVG ellipse shape element to a geom.Ellipse. Args: element: An SVG 'ellipse' element of the form: <ellipse rx='RX' ry='RY' cx='X' cy='Y'/> Returns: A geom.Ellipse. """ rx = float(element.get('rx', 0)) ry = float(element.get('ry', 0)) cx = float(element.get('cx', 0)) cy = float(element.get('cy', 0)) return geom.ellipse.Ellipse((cx, cy), rx, ry) def convert_polyline(element): """Convert an SVG `polyline` shape element to a list of line segments. Args: element: An SVG 'polyline' element of the form: <polyline points='x1,y1 x2,y2 x3,y3 [...]'/> Returns: A list of geom.Line segments. """ segments = [] points = element.get('points', '').split() sx, sy = points[0].split(',') start_p = geom.P(float(sx), float(sy)) prev_p = start_p for point in points[1:]: sx, sy = point.split(',') p = geom.P(float(sx), float(sy)) segments.append(geom.Line(prev_p, p)) prev_p = p return segments def convert_polygon(element): """Convert an SVG `polygon` shape element to a list line segments. Args: element: An SVG 'polygon' element of the form: <polygon points='x1,y1 x2,y2 x3,y3 [...]'/> Returns: A list of geom.Line segments. The polygon will be closed. """ segments = convert_polyline(element) # Close the polygon if not already so if len(segments) > 1 and segments[-1] != segments[0]: segments.append(geom.Line(segments[-1], segments[0])) return segments
Max-Vader/namebench
refs/heads/master
nb_third_party/dns/renderer.py
248
# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Help for building DNS wire format messages""" import cStringIO import struct import random import time import dns.exception import dns.tsig QUESTION = 0 ANSWER = 1 AUTHORITY = 2 ADDITIONAL = 3 class Renderer(object): """Helper class for building DNS wire-format messages. Most applications can use the higher-level L{dns.message.Message} class and its to_wire() method to generate wire-format messages. This class is for those applications which need finer control over the generation of messages. Typical use:: r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512) r.add_question(qname, qtype, qclass) r.add_rrset(dns.renderer.ANSWER, rrset_1) r.add_rrset(dns.renderer.ANSWER, rrset_2) r.add_rrset(dns.renderer.AUTHORITY, ns_rrset) r.add_edns(0, 0, 4096) r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1) r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2) r.write_header() r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac) wire = r.get_wire() @ivar output: where rendering is written @type output: cStringIO.StringIO object @ivar id: the message id @type id: int @ivar flags: the message flags @type flags: int @ivar max_size: the maximum size of the message @type max_size: int @ivar origin: the origin to use when rendering relative names @type origin: dns.name.Name object @ivar compress: the compression table @type compress: dict @ivar section: the section currently being rendered @type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER, dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL) @ivar counts: list of the number of RRs in each section @type counts: int list of length 4 @ivar mac: the MAC of the rendered message (if TSIG was used) @type mac: string """ def __init__(self, id=None, flags=0, max_size=65535, origin=None): """Initialize a new renderer. @param id: the message id @type id: int @param flags: the DNS message flags @type flags: int @param max_size: the maximum message size; the default is 65535. If rendering results in a message greater than I{max_size}, then L{dns.exception.TooBig} will be raised. @type max_size: int @param origin: the origin to use when rendering relative names @type origin: dns.name.Namem or None. """ self.output = cStringIO.StringIO() if id is None: self.id = random.randint(0, 65535) else: self.id = id self.flags = flags self.max_size = max_size self.origin = origin self.compress = {} self.section = QUESTION self.counts = [0, 0, 0, 0] self.output.write('\x00' * 12) self.mac = '' def _rollback(self, where): """Truncate the output buffer at offset I{where}, and remove any compression table entries that pointed beyond the truncation point. @param where: the offset @type where: int """ self.output.seek(where) self.output.truncate() keys_to_delete = [] for k, v in self.compress.iteritems(): if v >= where: keys_to_delete.append(k) for k in keys_to_delete: del self.compress[k] def _set_section(self, section): """Set the renderer's current section. Sections must be rendered order: QUESTION, ANSWER, AUTHORITY, ADDITIONAL. Sections may be empty. @param section: the section @type section: int @raises dns.exception.FormError: an attempt was made to set a section value less than the current section. """ if self.section != section: if self.section > section: raise dns.exception.FormError self.section = section def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN): """Add a question to the message. @param qname: the question name @type qname: dns.name.Name @param rdtype: the question rdata type @type rdtype: int @param rdclass: the question rdata class @type rdclass: int """ self._set_section(QUESTION) before = self.output.tell() qname.to_wire(self.output, self.compress, self.origin) self.output.write(struct.pack("!HH", rdtype, rdclass)) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[QUESTION] += 1 def add_rrset(self, section, rrset, **kw): """Add the rrset to the specified section. Any keyword arguments are passed on to the rdataset's to_wire() routine. @param section: the section @type section: int @param rrset: the rrset @type rrset: dns.rrset.RRset object """ self._set_section(section) before = self.output.tell() n = rrset.to_wire(self.output, self.compress, self.origin, **kw) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[section] += n def add_rdataset(self, section, name, rdataset, **kw): """Add the rdataset to the specified section, using the specified name as the owner name. Any keyword arguments are passed on to the rdataset's to_wire() routine. @param section: the section @type section: int @param name: the owner name @type name: dns.name.Name object @param rdataset: the rdataset @type rdataset: dns.rdataset.Rdataset object """ self._set_section(section) before = self.output.tell() n = rdataset.to_wire(name, self.output, self.compress, self.origin, **kw) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[section] += n def add_edns(self, edns, ednsflags, payload, options=None): """Add an EDNS OPT record to the message. @param edns: The EDNS level to use. @type edns: int @param ednsflags: EDNS flag values. @type ednsflags: int @param payload: The EDNS sender's payload field, which is the maximum size of UDP datagram the sender can handle. @type payload: int @param options: The EDNS options list @type options: list of dns.edns.Option instances @see: RFC 2671 """ # make sure the EDNS version in ednsflags agrees with edns ednsflags &= 0xFF00FFFFL ednsflags |= (edns << 16) self._set_section(ADDITIONAL) before = self.output.tell() self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload, ednsflags, 0)) if not options is None: lstart = self.output.tell() for opt in options: stuff = struct.pack("!HH", opt.otype, 0) self.output.write(stuff) start = self.output.tell() opt.to_wire(self.output) end = self.output.tell() assert end - start < 65536 self.output.seek(start - 2) stuff = struct.pack("!H", end - start) self.output.write(stuff) self.output.seek(0, 2) lend = self.output.tell() assert lend - lstart < 65536 self.output.seek(lstart - 2) stuff = struct.pack("!H", lend - lstart) self.output.write(stuff) self.output.seek(0, 2) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.counts[ADDITIONAL] += 1 def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data, request_mac, algorithm=dns.tsig.default_algorithm): """Add a TSIG signature to the message. @param keyname: the TSIG key name @type keyname: dns.name.Name object @param secret: the secret to use @type secret: string @param fudge: TSIG time fudge @type fudge: int @param id: the message id to encode in the tsig signature @type id: int @param tsig_error: TSIG error code; default is 0. @type tsig_error: int @param other_data: TSIG other data. @type other_data: string @param request_mac: This message is a response to the request which had the specified MAC. @param algorithm: the TSIG algorithm to use @type request_mac: string """ self._set_section(ADDITIONAL) before = self.output.tell() s = self.output.getvalue() (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s, keyname, secret, int(time.time()), fudge, id, tsig_error, other_data, request_mac, algorithm=algorithm) keyname.to_wire(self.output, self.compress, self.origin) self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG, dns.rdataclass.ANY, 0, 0)) rdata_start = self.output.tell() self.output.write(tsig_rdata) after = self.output.tell() assert after - rdata_start < 65536 if after >= self.max_size: self._rollback(before) raise dns.exception.TooBig self.output.seek(rdata_start - 2) self.output.write(struct.pack('!H', after - rdata_start)) self.counts[ADDITIONAL] += 1 self.output.seek(10) self.output.write(struct.pack('!H', self.counts[ADDITIONAL])) self.output.seek(0, 2) def write_header(self): """Write the DNS message header. Writing the DNS message header is done asfter all sections have been rendered, but before the optional TSIG signature is added. """ self.output.seek(0) self.output.write(struct.pack('!HHHHHH', self.id, self.flags, self.counts[0], self.counts[1], self.counts[2], self.counts[3])) self.output.seek(0, 2) def get_wire(self): """Return the wire format message. @rtype: string """ return self.output.getvalue()
savoirfairelinux/OpenUpgrade
refs/heads/master
addons/l10n_lu/__init__.py
376
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
wzbozon/statsmodels
refs/heads/master
statsmodels/graphics/tests/test_factorplots.py
27
import numpy as np from nose import SkipTest from pandas import Series from statsmodels.graphics.factorplots import interaction_plot try: import matplotlib.pyplot as plt import matplotlib have_matplotlib = True except ImportError: have_matplotlib = False class TestInteractionPlot(object): @classmethod def setupClass(cls): if not have_matplotlib: raise SkipTest('matplotlib not available') np.random.seed(12345) cls.weight = np.random.randint(1,4,size=60) cls.duration = np.random.randint(1,3,size=60) cls.days = np.log(np.random.randint(1,30, size=60)) def test_plot_both(self): fig = interaction_plot(self.weight, self.duration, self.days, colors=['red','blue'], markers=['D','^'], ms=10) plt.close(fig) def test_plot_rainbow(self): fig = interaction_plot(self.weight, self.duration, self.days, markers=['D','^'], ms=10) plt.close(fig) def test_plot_pandas(self): weight = Series(self.weight, name='Weight') duration = Series(self.duration, name='Duration') days = Series(self.days, name='Days') fig = interaction_plot(weight, duration, days, markers=['D','^'], ms=10) ax = fig.axes[0] trace = ax.get_legend().get_title().get_text() assert trace == 'Duration' assert ax.get_ylabel() == 'mean of Days' assert ax.get_xlabel() == 'Weight' plt.close(fig)
smessmer/cryfs
refs/heads/develop
vendor/googletest/gtest/googlemock/scripts/generator/gmock_gen.py
19
#!/usr/bin/env python # # Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Driver for starting up Google Mock class generator.""" import os import sys if __name__ == '__main__': # Add the directory of this script to the path so we can import gmock_class. sys.path.append(os.path.dirname(__file__)) from cpp import gmock_class # Fix the docstring in case they require the usage. gmock_class.__doc__ = gmock_class.__doc__.replace('gmock_class.py', __file__) gmock_class.main()
popazerty/obh-sh4
refs/heads/master
lib/python/Tools/Profile.py
47
# the implementation here is a bit crappy. import time from Directories import resolveFilename, SCOPE_CONFIG PERCENTAGE_START = 0 PERCENTAGE_END = 100 profile_start = time.time() profile_data = {} total_time = 1 profile_file = None try: profile_old = open(resolveFilename(SCOPE_CONFIG, "profile"), "r").readlines() t = None for line in profile_old: (t, id) = line[:-1].split('\t') t = float(t) total_time = t profile_data[id] = t except: print "no profile data available" try: profile_file = open(resolveFilename(SCOPE_CONFIG, "profile"), "w") except IOError: print "WARNING: couldn't open profile file!" def profile(id): now = time.time() - profile_start if profile_file: profile_file.write("%7.3f\t%s\n" % (now, id)) if id in profile_data: t = profile_data[id] if total_time: perc = t * (PERCENTAGE_END - PERCENTAGE_START) / total_time + PERCENTAGE_START else: perc = PERCENTAGE_START try: open("/proc/progress", "w").write("%d \n" % perc) except IOError: pass def profile_final(): global profile_file if profile_file is not None: profile_file.close() profile_file = None
lightbase/LBApp
refs/heads/master
lbapp/views/__init__.py
12133432
bencesomogyi/pyCFD
refs/heads/master
pyCFD_geometric_tools/cython_boost_win32/__init__.py
12133432
manashmndl/deeppy
refs/heads/master
deeppy/siamese/__init__.py
12133432
nkhuyu/commons
refs/heads/master
src/python/twitter/common/python/installer.py
14
from __future__ import absolute_import from pex.installer import *
drmrd/ansible
refs/heads/devel
test/units/modules/cloud/amazon/__init__.py
12133432
dims/cinder
refs/heads/master
cinder/tests/unit/scheduler/__init__.py
12133432
mihail911/nupic
refs/heads/master
nupic/research/monitor_mixin/__init__.py
12133432
rohitwaghchaure/erpnext_smart
refs/heads/develop
erpnext/hr/doctype/employee_leave_approver/__init__.py
12133432
hardikamal/foursquared.eclair
refs/heads/master
mock_server/playfoursquare.py
127
#!/usr/bin/python2.6 # # Simple http server to emulate api.playfoursquare.com import logging import shutil import urlparse import SimpleHTTPServer import BaseHTTPServer class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """Handle playfoursquare.com requests, for testing.""" def do_GET(self): logging.warn('do_GET: %s, %s', self.command, self.path) url = urlparse.urlparse(self.path) logging.warn('do_GET: %s', url) query = urlparse.parse_qs(url.query) query_keys = [pair[0] for pair in query] response = self.handle_url(url) if response != None: self.send_200() shutil.copyfileobj(response, self.wfile) self.wfile.close() do_POST = do_GET def handle_url(self, url): path = None if url.path == '/v1/venue': path = '../captures/api/v1/venue.xml' elif url.path == '/v1/venues': path = '../captures/api/v1/venues.xml' elif url.path == '/v1/user': path = '../captures/api/v1/user.xml' elif url.path == '/v1/checkcity': path = '../captures/api/v1/checkcity.xml' elif url.path == '/v1/checkins': path = '../captures/api/v1/checkins.xml' elif url.path == '/v1/cities': path = '../captures/api/v1/cities.xml' elif url.path == '/v1/switchcity': path = '../captures/api/v1/switchcity.xml' elif url.path == '/v1/tips': path = '../captures/api/v1/tips.xml' elif url.path == '/v1/checkin': path = '../captures/api/v1/checkin.xml' if path is None: self.send_error(404) else: logging.warn('Using: %s' % path) return open(path) def send_200(self): self.send_response(200) self.send_header('Content-type', 'text/xml') self.end_headers() def main(): server_address = ('0.0.0.0', 8080) httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler) sa = httpd.socket.getsockname() print "Serving HTTP on", sa[0], "port", sa[1], "..." httpd.serve_forever() if __name__ == '__main__': main()
codilime/cloudify-openstack-plugin
refs/heads/master
neutron_plugin/subnet.py
2
######### # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from cloudify import ctx from cloudify.decorators import operation from cloudify.exceptions import NonRecoverableError from openstack_plugin_common import ( with_neutron_client, transform_resource_name, get_resource_id, get_openstack_id_of_single_connected_node_by_openstack_type, delete_resource_and_runtime_properties, delete_runtime_properties, use_external_resource, validate_resource, validate_ip_or_range_syntax, OPENSTACK_ID_PROPERTY, OPENSTACK_TYPE_PROPERTY, OPENSTACK_NAME_PROPERTY, COMMON_RUNTIME_PROPERTIES_KEYS ) from neutron_plugin.network import NETWORK_OPENSTACK_TYPE SUBNET_OPENSTACK_TYPE = 'subnet' # Runtime properties RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS @operation @with_neutron_client def create(neutron_client, **kwargs): if use_external_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE): try: net_id = \ get_openstack_id_of_single_connected_node_by_openstack_type( ctx, NETWORK_OPENSTACK_TYPE, True) if net_id: subnet_id = \ ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] if neutron_client.show_subnet( subnet_id)['subnet']['network_id'] != net_id: raise NonRecoverableError( 'Expected external resources subnet {0} and network' ' {1} to be connected'.format(subnet_id, net_id)) return except Exception: delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) raise net_id = get_openstack_id_of_single_connected_node_by_openstack_type( ctx, NETWORK_OPENSTACK_TYPE) subnet = { 'name': get_resource_id(ctx, SUBNET_OPENSTACK_TYPE), 'network_id': net_id, } subnet.update(ctx.node.properties['subnet']) transform_resource_name(ctx, subnet) s = neutron_client.create_subnet({'subnet': subnet})['subnet'] ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = s['id'] ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ SUBNET_OPENSTACK_TYPE ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = subnet['name'] @operation @with_neutron_client def delete(neutron_client, **kwargs): delete_resource_and_runtime_properties(ctx, neutron_client, RUNTIME_PROPERTIES_KEYS) @operation @with_neutron_client def creation_validation(neutron_client, **kwargs): validate_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE) if 'cidr' not in ctx.node.properties['subnet']: err = '"cidr" property must appear under the "subnet" property of a ' \ 'subnet node' ctx.logger.error('VALIDATION ERROR: ' + err) raise NonRecoverableError(err) validate_ip_or_range_syntax(ctx, ctx.node.properties['subnet']['cidr'])
zx8/youtube-dl
refs/heads/master
youtube_dl/extractor/iprima.py
96
# -*- coding: utf-8 -*- from __future__ import unicode_literals import re from random import random from math import floor from .common import InfoExtractor from ..compat import ( compat_urllib_request, ) from ..utils import ( ExtractorError, remove_end, ) class IPrimaIE(InfoExtractor): _VALID_URL = r'https?://play\.iprima\.cz/(?:[^/]+/)*(?P<id>[^?#]+)' _TESTS = [{ 'url': 'http://play.iprima.cz/particka/particka-92', 'info_dict': { 'id': '39152', 'ext': 'flv', 'title': 'Partička (92)', 'description': 'md5:74e9617e51bca67c3ecfb2c6f9766f45', 'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg', }, 'params': { 'skip_download': True, # requires rtmpdump }, }, { 'url': 'http://play.iprima.cz/particka/tchibo-particka-jarni-moda', 'info_dict': { 'id': '9718337', 'ext': 'flv', 'title': 'Tchibo Partička - Jarní móda', 'thumbnail': 're:^http:.*\.jpg$', }, 'params': { 'skip_download': True, # requires rtmpdump }, }, { 'url': 'http://play.iprima.cz/zpravy-ftv-prima-2752015', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) if re.search(r'Nemáte oprávnění přistupovat na tuto stránku\.\s*</div>', webpage): raise ExtractorError( '%s said: You do not have permission to access this page' % self.IE_NAME, expected=True) player_url = ( 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % (floor(random() * 1073741824), floor(random() * 1073741824)) ) req = compat_urllib_request.Request(player_url) req.add_header('Referer', url) playerpage = self._download_webpage(req, video_id) base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1]) zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO') if zoneGEO != '0': base_url = base_url.replace('token', 'token_' + zoneGEO) formats = [] for format_id in ['lq', 'hq', 'hd']: filename = self._html_search_regex( r'"%s_id":(.+?),' % format_id, webpage, 'filename') if filename == 'null': continue real_id = self._search_regex( r'Prima-(?:[0-9]{10}|WEB)-([0-9]+)[-_]', filename, 'real video id') if format_id == 'lq': quality = 0 elif format_id == 'hq': quality = 1 elif format_id == 'hd': quality = 2 filename = 'hq/' + filename formats.append({ 'format_id': format_id, 'url': base_url, 'quality': quality, 'play_path': 'mp4:' + filename.replace('"', '')[:-4], 'rtmp_live': True, 'ext': 'flv', }) self._sort_formats(formats) return { 'id': real_id, 'title': remove_end(self._og_search_title(webpage), ' | Prima PLAY'), 'thumbnail': self._og_search_thumbnail(webpage), 'formats': formats, 'description': self._search_regex( r'<p[^>]+itemprop="description"[^>]*>([^<]+)', webpage, 'description', default=None), }
puremourning/ycmd-1
refs/heads/master
ycmd/completers/general_completer.py
4
# Copyright (C) 2020 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from ycmd.completers.completer import Completer class GeneralCompleter( Completer ): """ A base class for General completers in YCM. A general completer is used in all filetypes. Because this is a subclass of Completer class, you should refer to the Completer class documentation. Do NOT use this class for semantic completers! Subclass Completer directly. """ def __init__( self, user_options ): super().__init__( user_options ) def SupportedFiletypes( self ): return set()
lotaku/fzz
refs/heads/master
server/opccode.py
1
#encoding: utf8 import player handler={ 1:player.c2gsEnterWorld, 2:player.c2gsPlayerMove, } def handlePacket(player,packet): handler[packet.id](player,packet)
mythmon/airmozilla
refs/heads/master
airmozilla/manage/widgets.py
12
import cgi from django.forms import widgets from django.utils.safestring import mark_safe from funfactory.urlresolvers import reverse from airmozilla.main.models import Picture from airmozilla.main.helpers import thumbnail class PictureWidget(widgets.Select): def __init__(self, instance, attrs=None, **kwargs): super(PictureWidget, self).__init__(attrs) self.instance = instance self.editable = kwargs.get('editable', True) def render(self, name, value, attrs=None, **__): if value: picture = Picture.objects.get(id=value) thumb = thumbnail(picture.file, '96x54', crop='center') img = ( '<img src="%s" width="%d" height="%d" alt="%s">' % ( thumb.url, thumb.width, thumb.height, picture.notes and cgi.escape(picture.notes) or '' ) ) html = ( '<input type="hidden" name="%s" value="%d">' '<a href="%s" title="Current picture">%s</a> ' % ( name, picture.id, reverse('manage:picture_edit', args=(picture.id,)), img, ) ) if self.editable: html += ( '<a href="%s?event=%d" ' 'title="This will leave the editing without saving"' '>Pick another</a>' % ( reverse('manage:picturegallery'), self.instance.id ) ) else: html += ( 'You can pick a different picture later' ) return mark_safe(html) else: html = ( '<a href="%s?event=%d" ' 'title="This will leave the editing without saving">' 'Pick a picture from the gallery</a>' % ( reverse('manage:picturegallery'), self.instance.id, ) ) return mark_safe(html)
hefen1/chromium
refs/heads/master
third_party/cython/src/Tools/cython-epydoc.py
125
#! /usr/bin/env python # -------------------------------------------------------------------- import re from epydoc import docstringparser as dsp CYTHON_SIGNATURE_RE = re.compile( # Class name (for builtin methods) r'^\s*((?P<class>\w+)\.)?' + # The function name r'(?P<func>\w+)' + # The parameters r'\(((?P<self>(?:self|cls|mcs)),?)?(?P<params>.*)\)' + # The return value (optional) r'(\s*(->)\s*(?P<return>\w+(?:\s*\w+)))?' + # The end marker r'\s*(?:\n|$)') parse_signature = dsp.parse_function_signature def parse_function_signature(func_doc, doc_source, docformat, parse_errors): PYTHON_SIGNATURE_RE = dsp._SIGNATURE_RE assert PYTHON_SIGNATURE_RE is not CYTHON_SIGNATURE_RE try: dsp._SIGNATURE_RE = CYTHON_SIGNATURE_RE found = parse_signature(func_doc, doc_source, docformat, parse_errors) dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE if not found: found = parse_signature(func_doc, doc_source, docformat, parse_errors) return found finally: dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE dsp.parse_function_signature = parse_function_signature # -------------------------------------------------------------------- from epydoc.cli import cli cli() # --------------------------------------------------------------------
vergl4s/instarecon
refs/heads/master
scripts/instarecon.py
1
#!/usr/bin/env python import argparse import csv import logging import os import sys import ipaddress as ipa # https://docs.python.org/3/library/ipaddress.html import dns.resolver from src.ip import IP from src.host import Host from src.network import Network from src import lookup from src._version import __version__ class InstaRecon(object): """ Holds all Host entries and manages scans, interpret user input, threads and outputs. Keyword arguments: nameserver -- Str DNS server to be used for lookups (consumed by dns.resolver module). targets -- Set of Hosts or Networks that will be scanned. bad_targets -- Set of user inputs that could not be understood or resolved. versobe -- Bool flag for verbose output printing. Passed to logs. shodan_key -- Str key used for Shodan lookups. Passed to lookups. """ entry_banner = "# InstaRecon v" + __version__ + " - by Luis Teixeira (teix.co)" exit_banner = "# Done" def __init__(self, nameserver=None, timeout=None, shodan_key=None, verbose=0): self.targets = set() self.bad_targets = set() if nameserver: lookup.dns_resolver.nameservers = [nameserver] if timeout: lookup.dns_resolver.timeout = timeout lookup.dns_resolver.lifetime = timeout if shodan_key: lookup.shodan_key = shodan_key # https://docs.python.org/2/library/logging.html#logging-levels logging_level = 40 # ERROR log_format = "[-] %(levelname)s: %(message)s" if verbose == 1: logging_level = 30 # WARNING elif verbose == 2: logging_level = 20 # INFO elif verbose > 2: logging_level = 10 # DEBUG log_format = "[-] %(levelname)s:%(module)s:%(funcName)s:%(lineno)d: %(message)s" logging.basicConfig(format=log_format, level=logging_level) def populate(self, user_supplied_list): for user_supplied in user_supplied_list: self.add_host(user_supplied) if not self.targets: print("# No hosts to scan") else: print( "# Scanning {}/{} hosts".format( str(len(self.targets)), str(len(user_supplied_list)) ) ) if not lookup.shodan_key: print("# No Shodan key provided") def add_host(self, user_supplied): """ Add string passed by user to self.targets as proper Host/Network objects For this, it attempts to create these objects and moves on if got a ValueError. """ # Test if user_supplied is an IP? try: self.targets.add(Host(ips=[user_supplied])) return except ValueError: pass try: self.targets.add(Network(user_supplied)) return except ValueError: pass # Test if user_supplied is a valid DNS? Needs strict flag, otherwise no ValueError will be raise by Host try: self.targets.add(Host(domain=user_supplied, strict=False)) return except ValueError: logging.critical("Couldn't resolve or understand " + user_supplied) pass self.bad_targets.add(user_supplied) def scan_targets(self): for target in self.targets: if isinstance(target, Host): self.scan_host(target) elif isinstance(target, Network): self.scan_network(target) def scan_host(self, host): print("") print("# ____________________ Scanning {} ____________________ #".format(str(host))) print("") # flags_default = not (args.dns or args.whois or args.shodan or args.google) # if self.scan_flags['dns'] or flags_default: # self.scan_host_dns(host) # if self.scan_flags['whois'] or flags_default: # self.scan_host_whois(host) # if self.scan_flags['shodan'] or flags_default: # self.scan_host_shodan(host) # if self.scan_flags['google'] or flags_default: # self.scan_host_google(host) self.scan_host_dns(host) self.scan_host_whois(host) self.scan_host_shodan(host) self.scan_host_google(host) def scan_host_dns(self, host): # DNS and Whois lookups host.lookup_dns() if host.domain: print("[*] Domain: " + host.domain) # IPs and reverse domains if host.ips: print("") print("[*] IPs & reverse DNS:") print(host.print_all_ips()) host.lookup_dns_ns() # NS records if host.ns: print("") print("[*] NS records:") print(host.print_all_ns()) host.lookup_dns_mx() # MX records if host.mx: print("") print("[*] MX records:") print(host.print_all_mx()) print("") def scan_host_whois(self, host): # Domain whois host.lookup_whois_domain() if host.whois_domain: print("[*] Whois domain:") print(host.whois_domain.decode()) # IP whois host.lookup_whois_ip_all() m = host.print_all_whois_ip() if m: for result in m: print("") print("[*] Whois IP for " + result) # CIDRs if host.cidrs: print("") print("[*] Related CIDR:\n{}".format(host.print_all_cidrs())) print("") def scan_host_shodan(self, host): # Shodan if lookup.shodan_key: print("# Querying Shodan for open ports") host.lookup_shodan_all() m = host.print_all_shodan() if m: print("[*] Shodan:") print(m) else: logging.error("No Shodan entries found") else: print( "# Can't do Shodan lookups without a key (pass one with -s or with unix environment variable SHODAN_KEY)" ) print("") def scan_host_google(self, host): # Google subdomains lookup if host.domain: print("# Querying Google for subdomains and Linkedin pages, this might take a while") host.google_lookups() if host.linkedin_page: print("[*] Possible LinkedIn page: " + host.linkedin_page) if host.google_subdomains: print("[*] Subdomains:" + "\n" + host.print_google_subdomains()) else: logging.error( "No subdomains found in Google. If you are scanning a lot, Google might be blocking your requests." ) print("") def scan_network(self, network): """Scan a network object""" print("") print("# _____________ Reverse DNS lookups on {} _____________ #".format(str(network))) self.reverse_dns_on_cidr(network) @staticmethod def reverse_dns_on_cidr(target): """Does reverse dns lookups on a target, and saves results to target using target.add_related_host""" if not isinstance(target, Network): raise ValueError cidr = target.cidr for ip, reverse_domains in lookup.rev_dns_on_cidr(cidr): new_host = Host(ips=[ip], reverse_domains=reverse_domains) target.add_related_host(new_host) print(new_host.print_all_ips()) if not target.related_hosts: print("# No results for this range") def test_output_csv(self, filename=None): """Test if file is writable before running any scan""" if filename: with open(filename, "wb") as f: # If file isn't writable this raises an IOError, which is caught in main pass def write_output_csv(self, filename=None): """Writes output for each target as csv in filename""" if filename: filename = os.path.expanduser(filename) print("# Saving output csv file") output_as_lines = [] for host in self.targets: for line in host.print_as_csv_lines(): output_as_lines.append(line) output_as_lines.append(["\n"]) with open(filename, "wb") as f: writer = csv.writer(f) for line in output_as_lines: writer.writerow(line) output_written = True if __name__ == "__main__": parser = argparse.ArgumentParser( description=InstaRecon.entry_banner, usage="%(prog)s [options] target1 [target2 ... targetN]", epilog=argparse.SUPPRESS, ) parser.add_argument( "targets", nargs="+", help="targets to be scanned - can be a domain (google.com), an IP (8.8.8.8) or a network range (8.8.8.0/24)", ) parser.add_argument("-o", "--output", required=False, nargs="?", help="output filename as csv") parser.add_argument( "-n", "--nameserver", required=False, nargs="?", help="alternative DNS server to query" ) parser.add_argument( "-s", "--shodan_key", required=False, nargs="?", help="shodan key for automated port/service information (SHODAN_KEY environment variable also works for this)", ) parser.add_argument( "-t", "--timeout", required=False, nargs="?", type=float, help="timeout for DNS lookups (default is 2s)", ) parser.add_argument( "-v", "--verbose", action="count", default=0, help="verbose errors (-vv or -vvv for extra verbosity)", ) args = parser.parse_args() targets = sorted(set(args.targets)) if args.shodan_key: shodan_key = args.shodan_key else: shodan_key = os.getenv("SHODAN_KEY") scan = InstaRecon( nameserver=args.nameserver, shodan_key=shodan_key, timeout=args.timeout, verbose=args.verbose, ) try: print(scan.entry_banner) scan.test_output_csv(args.output) scan.populate(targets) scan.scan_targets() except KeyboardInterrupt: logging.warning("Scan interrupted") except lookup.NoInternetAccess: logging.critical("Something went wrong. Sure you got internet connection?") sys.exit() except IOError: logging.critical("Can't write to file.. Better not start scanning anything, right?") sys.exit() scan.write_output_csv(args.output) print(scan.exit_banner)
JetBrains/intellij-community
refs/heads/master
python/testData/console/ipython/psi/shell4.py
14
!!dir
maniteja123/scipy
refs/heads/master
scipy/sparse/linalg/isolve/setup.py
108
#!/usr/bin/env python from __future__ import division, print_function, absolute_import from os.path import join def configuration(parent_package='',top_path=None): from numpy.distutils.system_info import get_info, NotFoundError from numpy.distutils.misc_util import Configuration from scipy._build_utils import get_g77_abi_wrappers config = Configuration('isolve',parent_package,top_path) lapack_opt = get_info('lapack_opt') if not lapack_opt: raise NotFoundError('no lapack/blas resources found') # iterative methods methods = ['BiCGREVCOM.f.src', 'BiCGSTABREVCOM.f.src', 'CGREVCOM.f.src', 'CGSREVCOM.f.src', # 'ChebyREVCOM.f.src', 'GMRESREVCOM.f.src', # 'JacobiREVCOM.f.src', 'QMRREVCOM.f.src', # 'SORREVCOM.f.src' ] Util = ['STOPTEST2.f.src','getbreak.f.src'] sources = Util + methods + ['_iterative.pyf.src'] sources = [join('iterative', x) for x in sources] sources += get_g77_abi_wrappers(lapack_opt) config.add_extension('_iterative', sources=sources, extra_info=lapack_opt) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
onitake/ansible
refs/heads/devel
lib/ansible/modules/cloud/rackspace/rax_dns.py
118
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_dns short_description: Manage domains on Rackspace Cloud DNS description: - Manage domains on Rackspace Cloud DNS version_added: 1.5 options: comment: description: - Brief description of the domain. Maximum length of 160 characters email: description: - Email address of the domain administrator name: description: - Domain name to create state: description: - Indicate desired state of the resource choices: - present - absent default: present ttl: description: - Time to live of domain in seconds default: 3600 notes: - "It is recommended that plays utilizing this module be run with C(serial: 1) to avoid exceeding the API request limit imposed by the Rackspace CloudDNS API" author: "Matt Martz (@sivel)" extends_documentation_fragment: - rackspace - rackspace.openstack ''' EXAMPLES = ''' - name: Create domain hosts: all gather_facts: False tasks: - name: Domain create request local_action: module: rax_dns credentials: ~/.raxpub name: example.org email: admin@example.org register: rax_dns ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module, ) def rax_dns(module, comment, email, name, state, ttl): changed = False dns = pyrax.cloud_dns if not dns: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if state == 'present': if not email: module.fail_json(msg='An "email" attribute is required for ' 'creating a domain') try: domain = dns.find(name=name) except pyrax.exceptions.NoUniqueMatch as e: module.fail_json(msg='%s' % e.message) except pyrax.exceptions.NotFound: try: domain = dns.create(name=name, emailAddress=email, ttl=ttl, comment=comment) changed = True except Exception as e: module.fail_json(msg='%s' % e.message) update = {} if comment != getattr(domain, 'comment', None): update['comment'] = comment if ttl != getattr(domain, 'ttl', None): update['ttl'] = ttl if email != getattr(domain, 'emailAddress', None): update['emailAddress'] = email if update: try: domain.update(**update) changed = True domain.get() except Exception as e: module.fail_json(msg='%s' % e.message) elif state == 'absent': try: domain = dns.find(name=name) except pyrax.exceptions.NotFound: domain = {} except Exception as e: module.fail_json(msg='%s' % e.message) if domain: try: domain.delete() changed = True except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, domain=rax_to_dict(domain)) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( comment=dict(), email=dict(), name=dict(), state=dict(default='present', choices=['present', 'absent']), ttl=dict(type='int', default=3600), ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together(), ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') comment = module.params.get('comment') email = module.params.get('email') name = module.params.get('name') state = module.params.get('state') ttl = module.params.get('ttl') setup_rax_module(module, pyrax, False) rax_dns(module, comment, email, name, state, ttl) if __name__ == '__main__': main()
MythicApps/MythicAppsSite
refs/heads/master
General/tests.py
24123
from django.test import TestCase # Create your tests here.
dana-i2cat/felix
refs/heads/master
modules/resource/orchestrator/src/extensions/geni/util/cred_util.py
2
#---------------------------------------------------------------------- # Copyright (c) 2010 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- ''' Credential creation and verification utilities. ''' import os import logging import xmlrpclib import sys import datetime import dateutil import extensions.sfa.trust.credential as cred import extensions.sfa.trust.gid as gid import extensions.sfa.trust.rights as rights from extensions.sfa.util.xrn import hrn_authfor_hrn def naiveUTC(dt): """Converts dt to a naive datetime in UTC. if 'dt' has a timezone then convert to UTC strip off timezone (make it "naive" in Python parlance) """ if dt.tzinfo: tz_utc = dateutil.tz.tzutc() dt = dt.astimezone(tz_utc) dt = dt.replace(tzinfo=None) return dt class CredentialVerifier(object): """Utilities to verify signed credentials from a given set of root certificates. Will compare target and source URNs, and privileges. See verify and verify_from_strings methods in particular.""" CATEDCERTSFNAME = 'CATedCACerts.pem' # root_cert_file is a trusted root file file or directory of # trusted roots for verifying credentials def __init__(self, root_cert_fileordir): self.logger = logging.getLogger('cred-verifier') if root_cert_fileordir is None: raise Exception("Missing Root certs argument") elif os.path.isdir(root_cert_fileordir): files = os.listdir(root_cert_fileordir) self.root_cert_files = [] for file in files: # FIXME: exclude files that aren't cert files? The combo cert file? if file == CredentialVerifier.CATEDCERTSFNAME: continue self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file))) self.logger.info('Will accept credentials signed by any of %d root certs found in %s: %r' % (len(self.root_cert_files), root_cert_fileordir, self.root_cert_files)) elif os.path.isfile(root_cert_fileordir): self.logger.info('Will accept credentials signed by the single root cert %s' % root_cert_fileordir) self.root_cert_files = [root_cert_fileordir] else: raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir) @classmethod def getCAsFileFromDir(cls, caCerts): '''Take a directory of CA certificates and concatenate them into a single file suitable for use by the Python SSL library to validate client credentials. Existing file is replaced.''' if caCerts is None: raise Exception ('Missing caCerts argument') if os.path.isfile(os.path.expanduser(caCerts)): return caCerts if not os.path.isdir(os.path.expanduser(caCerts)): raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts) logger = logging.getLogger('cred-verifier') # Now we have a dir of caCerts files # For each file in the dir (isfile), concatenate them into a new file comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME) caFiles = os.listdir(caCerts) #logger.debug('Got %d potential caCert files in the dir', len(caFiles)) outfile = open(comboFullPath, "w") okFileCount = 0 for filename in caFiles: filepath = os.path.join(caCerts, filename) # Confirm it's a CA file? # if not file.endswith('.pem'): # continue if not os.path.isfile(os.path.expanduser(filepath)): logger.debug('Skipping non file %s', filepath) continue if filename == CredentialVerifier.CATEDCERTSFNAME: # logger.debug('Skipping previous cated certs file') continue okFileCount += 1 logger.info("Adding trusted cert file %s", filename) certfile = open(filepath) for line in certfile: outfile.write(line) certfile.close() outfile.close() if okFileCount == 0: sys.exit('Found NO trusted certs in %s!' % caCerts) else: logger.info('Combined dir of %d trusted certs %s into file %s for Python SSL support', okFileCount, caCerts, comboFullPath) return comboFullPath def verify_from_strings(self, gid_string, cred_strings, target_urn, privileges): '''Create Credential and GID objects from the given strings, and then verify the GID has the right privileges according to the given credentials on the given target.''' if gid_string is None: return def make_cred(cred_string): return cred.Credential(string=cred_string) return self.verify(gid.GID(string=gid_string), map(make_cred, cred_strings), target_urn, privileges) def verify_source(self, source_gid, credential): '''Ensure the credential is giving privileges to the caller/client. Return True iff the given source (client) GID's URN is == the given credential's Caller (Owner) URN''' source_urn = source_gid.get_urn() cred_source_urn = credential.get_gid_caller().get_urn() #self.logger.debug('Verifying source %r against credential source %r (cred target %s)', # source_urn, cred_source_urn, credential.get_gid_object().get_urn()) result = (cred_source_urn == source_urn) if result: # self.logger.debug('Source URNs match') pass else: self.logger.debug('Source URNs do not match. Source URN %r != credential source URN %r', source_urn, cred_source_urn) return result def verify_target(self, target_urn, credential): '''Ensure the credential is giving privileges on the right subject/target. Return True if no target is specified, or the target URN matches the credential's Object's (target's) URN, else return False. No target is required, for example, to ListResources.''' if not target_urn: # self.logger.debug('No target specified, considering it a match.') return True else: cred_target_urn = credential.get_gid_object().get_urn() # self.logger.debug('Verifying target %r against credential target %r', # target_urn, cred_target_urn) result = target_urn == cred_target_urn if result: # self.logger.debug('Target URNs match.') pass else: self.logger.debug('Target URNs do NOT match. Target URN %r != Credential URN %r', target_urn, cred_target_urn) return result def verify_privileges(self, privileges, credential): ''' Return True iff the given credential gives the privilege to perform ALL of the privileges (actions) in the given list. In particular, the given list of 'privileges' is really a list of names of operations. The privileges in credentials are each turned in to Rights objects (see sfa/trust/rights.py). And the SFA rights table is used to map from names of privileges as specified in credentials, to names of operations.''' result = True privs = credential.get_privileges() for priv in privileges: if not privs.can_perform(priv): self.logger.debug('Privilege %s not found on credential %s of %s', priv, credential.get_gid_object().get_urn(), credential.get_gid_caller().get_urn()) result = False return result def verify(self, gid, credentials, target_urn, privileges): '''Verify that the given Source GID supplied at least one credential in the given list of credentials that has all the privileges required in the privileges list on the given target. IE if any of the supplied credentials has a caller that matches gid and a target that matches target_urn, and has all the privileges in the given list, then return the list of credentials that were ok. Throw an Exception if we fail to verify any credential.''' # Note that here we treat a list of credentials as being options # Alternatively could accumulate privileges for example # The semantics of the list of credentials is under specified. self.logger.debug('Verifying privileges') result = list() failure = "" tried_creds = "" for cred in credentials: if tried_creds != "": tried_creds = "%s, %s" % (tried_creds, cred.get_gid_caller().get_urn()) else: tried_creds = cred.get_gid_caller().get_urn() if not self.verify_source(gid, cred): failure = "Cred %s fails: Source URNs dont match" % cred.get_gid_caller().get_urn() continue if not self.verify_target(target_urn, cred): failure = "Cred %s on %s fails: Target URNs dont match" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn()) continue if not self.verify_privileges(privileges, cred): failure = "Cert %s doesn't have sufficient privileges" % cred.get_gid_caller().get_urn() continue print try: if not cred.verify(self.root_cert_files): failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files)) continue except Exception, exc: failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs: %s: %s" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files), exc.__class__.__name__, exc) self.logger.info(failure) continue # If got here it verified result.append(cred) if result and result != list(): # At least one credential verified ok and was added to the list # return that list return result else: # We did not find any credential with sufficient privileges # Raise an exception. fault_code = 'Insufficient privileges' fault_string = 'No credential was found with appropriate privileges. Tried %s. Last failure: %s' % (tried_creds, failure) self.logger.error(fault_string) raise xmlrpclib.Fault(fault_code, fault_string) def create_credential(caller_gid, object_gid, expiration, typename, issuer_keyfile, issuer_certfile, trusted_roots, delegatable=False): '''Create and Return a Credential object issued by given key/cert for the given caller and object GID objects, given life in seconds, and given type. Privileges are determined by type per sfa/trust/rights.py Privileges are delegatable if requested.''' # FIXME: Validate args: my gids, >0 life, # type of cred one I can issue # and readable key and cert files if caller_gid is None: raise ValueError("Missing Caller GID") if object_gid is None: raise ValueError("Missing Object GID") if expiration is None: raise ValueError("Missing expiration") naive_expiration = naiveUTC(expiration) duration = naive_expiration - datetime.datetime.utcnow() life_secs = duration.seconds + duration.days * 24 * 3600 if life_secs < 1: raise ValueError("Credential expiration is in the past") if trusted_roots is None: raise ValueError("Missing list of trusted roots") if typename is None or typename.strip() == '': raise ValueError("Missing credential type") typename = typename.strip().lower() if typename not in ("user", "sa", "ma", "authority", "slice", "component"): raise ValueError("Unknown credential type %s" % typename) if not os.path.isfile(issuer_keyfile): raise ValueError("Cant read issuer key file %s" % issuer_keyfile) if not os.path.isfile(issuer_certfile): raise ValueError("Cant read issuer cert file %s" % issuer_certfile) issuer_gid = gid.GID(filename=issuer_certfile) if not (object_gid.get_urn() == issuer_gid.get_urn() or (issuer_gid.get_type().find('authority') == 0 and hrn_authfor_hrn(issuer_gid.get_hrn(), object_gid.get_hrn()))): raise ValueError("Issuer not authorized to issue credential: Issuer=%s Target=%s" % (issuer_gid.get_urn(), object_gid.get_urn())) ucred = cred.Credential() # FIXME: Validate the caller_gid and object_gid # are my user and slice # Do get_issuer and compare to the issuer cert? # Or do gid.is_signed_by_cert(issuer_certfile)? ucred.set_gid_caller(caller_gid) ucred.set_gid_object(object_gid) ucred.set_expiration(expiration) # Use sfa/trust/rights.py to figure out what privileges # the credential should have. # user means refresh, resolve, info # per the privilege_table that lets users do # remove, update, resolve, list, getcredential, # listslices, listnodes, getpolicy # Note that it does not allow manipulating slivers # And every right is delegatable if any are delegatable (default False) privileges = rights.determine_rights(typename, None) privileges.delegate_all_privileges(delegatable) ucred.set_privileges(privileges) ucred.encode() ucred.set_issuer_keys(issuer_keyfile, issuer_certfile) ucred.sign() try: ucred.verify(trusted_roots) except Exception, exc: raise Exception("Create Credential failed to verify new credential from trusted roots: %s" % exc) return ucred
AndyDiamondstein/vitess
refs/heads/master
test/custom_sharding.py
2
#!/usr/bin/env python # # Copyright 2015, Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. import base64 import unittest from vtproto import topodata_pb2 from vtdb import vtgate_client import environment import tablet import utils # shards need at least 1 replica for semi-sync ACK, and 1 rdonly for SplitQuery. shard_0_master = tablet.Tablet() shard_0_replica = tablet.Tablet() shard_0_rdonly = tablet.Tablet() shard_1_master = tablet.Tablet() shard_1_replica = tablet.Tablet() shard_1_rdonly = tablet.Tablet() all_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly, shard_1_master, shard_1_replica, shard_1_rdonly] def setUpModule(): try: environment.topo_server().setup() setup_procs = [t.init_mysql() for t in all_tablets] utils.Vtctld().start() utils.wait_procs(setup_procs) except: tearDownModule() raise def tearDownModule(): utils.required_teardown() if utils.options.skip_teardown: return teardown_procs = [t.teardown_mysql() for t in all_tablets] utils.wait_procs(teardown_procs, raise_on_error=False) environment.topo_server().teardown() utils.kill_sub_processes() utils.remove_tmp_files() for t in all_tablets: t.remove_tree() class TestCustomSharding(unittest.TestCase): """Test a custom-shared keyspace.""" def _vtdb_conn(self): protocol, addr = utils.vtgate.rpc_endpoint(python=True) return vtgate_client.connect(protocol, addr, 30.0) def _insert_data(self, shard, start, count, table='data'): sql = 'insert into ' + table + '(id, name) values (:id, :name)' conn = self._vtdb_conn() cursor = conn.cursor( tablet_type='master', keyspace='test_keyspace', shards=[shard], writable=True) for x in xrange(count): bindvars = { 'id': start+x, 'name': 'row %d' % (start+x), } conn.begin() cursor.execute(sql, bindvars) conn.commit() conn.close() def _check_data(self, shard, start, count, table='data'): sql = 'select name from ' + table + ' where id=:id' conn = self._vtdb_conn() cursor = conn.cursor( tablet_type='master', keyspace='test_keyspace', shards=[shard]) for x in xrange(count): bindvars = { 'id': start+x, } cursor.execute(sql, bindvars) qr = cursor.fetchall() self.assertEqual(len(qr), 1) v = qr[0][0] self.assertEqual(v, 'row %d' % (start+x)) conn.close() def test_custom_end_to_end(self): """Runs through the common operations of a custom sharded keyspace. Tests creation with one shard, schema change, reading / writing data, adding one more shard, reading / writing data from both shards, applying schema changes again, and reading / writing data from both shards again. """ # start the first shard only for now shard_0_master.start_vttablet( wait_for_state=None, target_tablet_type='replica', init_keyspace='test_keyspace', init_shard='0') shard_0_replica.start_vttablet( wait_for_state=None, target_tablet_type='replica', init_keyspace='test_keyspace', init_shard='0') shard_0_rdonly.start_vttablet( wait_for_state=None, target_tablet_type='rdonly', init_keyspace='test_keyspace', init_shard='0') for t in [shard_0_master, shard_0_replica, shard_0_rdonly]: t.wait_for_vttablet_state('NOT_SERVING') utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0', shard_0_master.tablet_alias], auto_log=True) for t in [shard_0_master, shard_0_replica, shard_0_rdonly]: t.wait_for_vttablet_state('SERVING') self._check_shards_count_in_srv_keyspace(1) s = utils.run_vtctl_json(['GetShard', 'test_keyspace/0']) self.assertEqual(len(s['served_types']), 3) # create a table on shard 0 sql = '''create table data( id bigint auto_increment, name varchar(64), primary key (id) ) Engine=InnoDB''' utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'], auto_log=True) # reload schema everywhere so the QueryService knows about the tables for t in [shard_0_master, shard_0_replica, shard_0_rdonly]: utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True) # create shard 1 shard_1_master.start_vttablet( wait_for_state=None, target_tablet_type='replica', init_keyspace='test_keyspace', init_shard='1') shard_1_replica.start_vttablet( wait_for_state=None, target_tablet_type='replica', init_keyspace='test_keyspace', init_shard='1') shard_1_rdonly.start_vttablet( wait_for_state=None, target_tablet_type='rdonly', init_keyspace='test_keyspace', init_shard='1') for t in [shard_1_master, shard_1_replica, shard_1_rdonly]: t.wait_for_vttablet_state('NOT_SERVING') s = utils.run_vtctl_json(['GetShard', 'test_keyspace/1']) self.assertEqual(len(s['served_types']), 3) utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/1', shard_1_master.tablet_alias], auto_log=True) for t in [shard_1_master, shard_1_replica, shard_1_rdonly]: t.wait_for_vttablet_state('SERVING') utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias, 'test_keyspace/1'], auto_log=True) # must start vtgate after tablets are up, or else wait until 1min refresh utils.VtGate().start() # insert and check data on shard 0 self._insert_data('0', 100, 10) self._check_data('0', 100, 10) # insert and check data on shard 1 self._insert_data('1', 200, 10) self._check_data('1', 200, 10) # create a second table on all shards sql = '''create table data2( id bigint auto_increment, name varchar(64), primary key (id) ) Engine=InnoDB''' utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'], auto_log=True) # reload schema everywhere so the QueryService knows about the tables for t in all_tablets: utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True) # insert and read data on all shards self._insert_data('0', 300, 10, table='data2') self._insert_data('1', 400, 10, table='data2') self._check_data('0', 300, 10, table='data2') self._check_data('1', 400, 10, table='data2') utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) self._check_shards_count_in_srv_keyspace(2) # Now test SplitQuery API works (used in MapReduce usually, but bringing # up a full MR-capable cluster is too much for this test environment) sql = 'select id, name from data' s = utils.vtgate.split_query(sql, 'test_keyspace', 4) self.assertEqual(len(s), 4) shard0count = 0 shard1count = 0 for q in s: if q['shard_part']['shards'][0] == '0': shard0count += 1 if q['shard_part']['shards'][0] == '1': shard1count += 1 self.assertEqual(shard0count, 2) self.assertEqual(shard1count, 2) # run the queries, aggregate the results, make sure we have all rows rows = {} for q in s: bindvars = {} for name, value in q['query']['bind_variables'].iteritems(): # vtctl encodes bytes as base64. bindvars[name] = int(base64.standard_b64decode(value['value'])) qr = utils.vtgate.execute_shards( q['query']['sql'], 'test_keyspace', ','.join(q['shard_part']['shards']), tablet_type='master', bindvars=bindvars) for r in qr['rows']: rows[int(r[0])] = r[1] self.assertEqual(len(rows), 20) expected = {} for i in xrange(10): expected[100 + i] = 'row %d' % (100 + i) expected[200 + i] = 'row %d' % (200 + i) self.assertEqual(rows, expected) self._test_vtclient_execute_shards_fallback() def _check_shards_count_in_srv_keyspace(self, shard_count): ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) check_types = set([topodata_pb2.MASTER, topodata_pb2.REPLICA, topodata_pb2.RDONLY]) for p in ks['partitions']: if p['served_type'] in check_types: self.assertEqual(len(p['shard_references']), shard_count) check_types.remove(p['served_type']) self.assertEqual(len(check_types), 0, 'The number of expected shard_references in GetSrvKeyspace' ' was not equal %d for all expected tablet types.' % shard_count) def _test_vtclient_execute_shards_fallback(self): """Test per-shard mode of Go SQL driver (through vtclient).""" for shard in [0, 1]: id_val = (shard + 1) * 1000 # example: 1000, 2000 name_val = 'row %d' % id_val # write utils.vtgate.vtclient('insert into data(id, name) values (:v1, :v2)', bindvars=[id_val, name_val], keyspace='test_keyspace', shard=str(shard)) want = { u'fields': [u'id', u'name'], u'rows': [[unicode(id_val), unicode(name_val)]] } # read non-streaming out, _ = utils.vtgate.vtclient( 'select * from data where id = :v1', bindvars=[id_val], keyspace='test_keyspace', shard=str(shard), json_output=True) self.assertEqual(out, want) # read streaming out, _ = utils.vtgate.vtclient( 'select * from data where id = :v1', bindvars=[id_val], keyspace='test_keyspace', shard=str(shard), streaming=True, json_output=True) self.assertEqual(out, want) if __name__ == '__main__': utils.main()
armenzg/build-mozharness
refs/heads/master
configs/b2g/gaia_integration_config.py
2
# This is a template config file for b2g emulator unittest testing import platform HG_SHARE_BASE_DIR = "/builds/hg-shared" if platform.system().lower() == 'darwin': xre_url = "https://api.pub.build.mozilla.org/tooltool/sha512/4d8d7a37d90c34a2a2fda3066a8fe85c189b183d05389cb957fc6fed31f10a6924e50c1b84488ff61c015293803f58a3aed5d4819374d04c8e0ee2b9e3997278" else: xre_url = "https://api.pub.build.mozilla.org/tooltool/sha512/dc9503b21c87b5a469118746f99e4f41d73888972ce735fa10a80f6d218086da0e3da525d9a4cd8e4ea497ec199fef720e4a525873d77a1af304ac505e076462" config = { # mozharness script options "xre_url": xre_url, "vcs_share_base": HG_SHARE_BASE_DIR, "exes": { 'python': '/tools/buildbot/bin/python', 'virtualenv': ['/tools/buildbot/bin/python', '/tools/misc-python/virtualenv.py'], 'tooltool.py': "/tools/tooltool.py", }, "find_links": [ "http://pypi.pvt.build.mozilla.org/pub", "http://pypi.pub.build.mozilla.org/pub", ], "pip_index": False, "buildbot_json_path": "buildprops.json", "default_actions": [ 'clobber', 'read-buildbot-config', 'pull', 'download-and-extract', 'create-virtualenv', 'install', 'run-tests', ], "vcs_output_timeout": 1760, }
mthornhill/django-pressroom
refs/heads/master
src/pressroom/tests/test_articles.py
1
import random from django.contrib.webdesign.lorem_ipsum import words, sentence, paragraphs from django.template.defaultfilters import slugify from django.test import TestCase from pressroom.models import Article class PressroomTests(TestCase): def test_environment(self): """Just make sure everything is set up correctly.""" self.assert_(True) def test_published(self): headline = words(random.randint(5,10), common=False) a1, created = Article.objects.get_or_create( headline=headline, slug=slugify(headline), summary=sentence(), author=words(1,common=False), body=paragraphs(5), publish=True) a2, created = Article.objects.get_or_create( headline=headline, slug=slugify(headline), summary=sentence(), author=words(1,common=False), body=paragraphs(5), publish=False) published_articles = Article.objects.get_published() self.assertEqual(1, len(published_articles)) def test_canonical_published(self): headline = words(random.randint(5,10), common=False) a1, created = Article.objects.get_or_create( headline=headline, slug=slugify(headline), summary=sentence(), author=words(1,common=False), body=paragraphs(5), publish=True) a2, created = Article.objects.get_or_create( headline=headline, slug=slugify(headline), summary=sentence(), author=words(1,common=False), body=paragraphs(5), publish=True) self.assertEqual(2, len(Article.objects.get_published())) # set a2 to be a translation of a1 a2.translation_of = a1 a2.save() # we only expect 1 canonical object now as a2 is a translation of a1 self.assertEqual(1, len(Article.objects.get_published()))
nox/servo
refs/heads/master
tests/wpt/web-platform-tests/XMLHttpRequest/resources/chunked.py
219
def main(request, response): chunks = ["First chunk\r\n", "Second chunk\r\n", "Yet another (third) chunk\r\n", "Yet another (fourth) chunk\r\n", ] response.headers.set("Transfer-Encoding", "chunked"); response.headers.set("Trailer", "X-Test-Me"); response.headers.set("Content-Type", "text/plain"); response.write_status_headers() for value in chunks: response.writer.write("%x\r\n" % len(value)) response.writer.write(value) response.writer.write("\r\n") response.writer.write("0\r\n") response.writer.write("X-Test-Me: Trailer header value\r\n\r\n")
northern-bites/nao-man
refs/heads/master
noggin/players/SoccerFSA.py
1
# Soccer FSA that implements an FSA but holds all the important # soccer-playing functionality # # from man.motion import HeadMoves import man.motion as motion from ..util import FSA from ..navigator import NavHelper as helper from . import CoreSoccerStates class SoccerFSA(FSA.FSA): def __init__(self,brain): FSA.FSA.__init__(self, brain) #self.setTimeFunction(self.brain.nao.getSimulatedTime) self.addStates(CoreSoccerStates) self.brain = brain self.motion = brain.motion #set default behavior for soccer players - override it if you want self.setPrintStateChanges(True) # set printing to be done with colors self.stateChangeColor = 'red' self.setPrintFunction(self.brain.out.printf) def run(self): FSA.FSA.run(self) def executeMove(self,sweetMove): """ Method to enqueue a SweetMove Can either take in a head move or a body command (see SweetMove files for descriptions of command tuples) """ self.brain.nav.performSweetMove(sweetMove) ## for position in sweetMove: ## if len(position) == 7: ## move = motion.BodyJointCommand(position[4], #time ## position[0], #larm ## position[1], #lleg ## position[2], #rleg ## position[3], #rarm ## position[6], # Chain Stiffnesses ## position[5], #interpolation type ## ) ## elif len(position) == 5: ## move = motion.BodyJointCommand(position[2], # time ## position[0], # chainID ## position[1], # chain angles ## position[4], # chain stiffnesses ## position[3], # interpolation type ## ) ## else: ## self.printf("What kind of sweet ass-Move is this?") ## self.brain.motion.enqueue(move) def setWalk(self,x,y,theta): """ Wrapper method to easily change the walk vector of the robot """ if x == 0 and y == 0 and theta == 0: self.stopWalking() else: self.brain.nav.walk(x,y,theta) # else: # self.printf("WARNING NEW WALK of %g,%g,%g" % (x,y,theta) + # " is ignored") def getWalk(self): """ returns a tuple of current walk parameters """ nav = self.brain.nav return (nav.walkX, nav.walkY, nav.walkTheta) def setSteps(self, x, y, theta, numSteps=1): """ Have the robot walk a specified number of steps """ if self.brain.motion.isWalkActive(): return False else: self.brain.nav.takeSteps(x, y, theta, numSteps) return True def standup(self): self.brain.nav.stop() def walkPose(self): """ we return to std walk pose when we stop walking """ self.brain.nav.stop() def stopWalking(self): """ Wrapper method to navigator to easily stop the robot from walking """ nav = self.brain.nav if not nav.isStopped(): self.brain.nav.stop() def atDestinationGoalie(self): nav = self.brain.nav return helper.atDestinationGoalie(self.brain.my, nav.dest) def atDestinationCloser(self): nav = self.brain.nav return helper.atDestinationCloser(self.brain.my, nav.dest) def atHeading(self): nav = self.brain.nav return helper.atHeading(self.brain.my, nav.dest.h) ##### Direct Motion Calls def gainsOff(self): """ Turn off the gains """ freeze = motion.FreezeCommand() self.brain.motion.sendFreezeCommand(freeze) def gainsOn(self): """ Turn on the gains """ unFreeze = motion.UnfreezeCommand(0.85) self.brain.motion.sendFreezeCommand(unFreeze) ##### HEAD-TRACKING Methods def penalizeHeads(self): """ Put head into penalized position, stop tracker """ self.brain.tracker.performHeadMove(HeadMoves.PENALIZED_HEADS) def zeroHeads(self): """ Put heads into neutral position """ self.brain.tracker.performHeadMove(HeadMoves.ZERO_HEADS) def kickScan(self): self.brain.tracker.performHeadMove(HeadMoves.KICK_SCAN)
vikas1885/test1
refs/heads/master
common/djangoapps/student/migrations/0017_rename_date_to_created.py
188
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Rename 'date' field to 'created' db.rename_column('student_courseenrollment', 'date', 'created') def backwards(self, orm): # Rename 'created' field to 'date' db.rename_column('student_courseenrollment', 'created', 'date') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}), 'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}), 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'student.courseenrollment': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'student.pendingemailchange': { 'Meta': {'object_name': 'PendingEmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.pendingnamechange': { 'Meta': {'object_name': 'PendingNameChange'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.registration': { 'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.userprofile': { 'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"}, 'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), 'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}), 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}) }, 'student.usertestgroup': { 'Meta': {'object_name': 'UserTestGroup'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'}) } } complete_apps = ['student']
lakshayg/tensorflow
refs/heads/master
tensorflow/contrib/layers/python/layers/regularizers_test.py
73
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for regularizers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.layers.python.layers import regularizers from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class RegularizerTest(test.TestCase): def test_l1(self): with self.assertRaises(ValueError): regularizers.l1_regularizer(-1.) with self.assertRaises(ValueError): regularizers.l1_regularizer(0) self.assertIsNone(regularizers.l1_regularizer(0.)(None)) values = np.array([1., -1., 4., 2.]) weights = constant_op.constant(values) with session.Session() as sess: result = sess.run(regularizers.l1_regularizer(.5)(weights)) self.assertAllClose(np.abs(values).sum() * .5, result) def test_l2(self): with self.assertRaises(ValueError): regularizers.l2_regularizer(-1.) with self.assertRaises(ValueError): regularizers.l2_regularizer(0) self.assertIsNone(regularizers.l2_regularizer(0.)(None)) values = np.array([1., -1., 4., 2.]) weights = constant_op.constant(values) with session.Session() as sess: result = sess.run(regularizers.l2_regularizer(.42)(weights)) self.assertAllClose(np.power(values, 2).sum() / 2.0 * .42, result) def test_l1_l2(self): with self.assertRaises(ValueError): regularizers.l1_l2_regularizer(-1., 0.5) with self.assertRaises(ValueError): regularizers.l1_l2_regularizer(0.5, -1.) with self.assertRaises(ValueError): regularizers.l1_l2_regularizer(0, 0.5) with self.assertRaises(ValueError): regularizers.l1_l2_regularizer(0.5, 0) with self.test_session(): shape = [5, 5, 5] num_elem = 5 * 5 * 5 tensor = constant_op.constant(1.0, shape=shape) loss = regularizers.l1_l2_regularizer(1.0, 1.0)(tensor) self.assertEquals(loss.op.name, 'l1_l2_regularizer') self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5) def test_l1_l2_scale_l1Zero(self): shape = [5, 5, 5] num_elem = 5 * 5 * 5 tensor = constant_op.constant(1.0, shape=shape) loss = regularizers.l1_l2_regularizer(0.0, 1.0)(tensor) with self.test_session(): self.assertEquals(loss.op.name, 'l1_l2_regularizer') self.assertAlmostEqual(loss.eval(), num_elem / 2, 5) def test_l1_l2_scale_l2Zero(self): shape = [5, 5, 5] num_elem = 5 * 5 * 5 tensor = constant_op.constant(1.0, shape=shape) loss = regularizers.l1_l2_regularizer(1.0, 0.0)(tensor) with self.test_session(): self.assertEquals(loss.op.name, 'l1_l2_regularizer') self.assertAlmostEqual(loss.eval(), num_elem, 5) def test_l1_l2_scales_Zero(self): shape = [5, 5, 5] tensor = constant_op.constant(1.0, shape=shape) loss = regularizers.l1_l2_regularizer(0.0, 0.0)(tensor) self.assertEquals(loss, None) def testL1L2RegularizerWithScope(self): with self.test_session(): shape = [5, 5, 5] num_elem = 5 * 5 * 5 tensor = constant_op.constant(1.0, shape=shape) with ops.name_scope('foo'): loss = regularizers.l1_l2_regularizer(1.0, 1.0, scope='l1_l2')(tensor) self.assertEquals(loss.op.name, 'foo/l1_l2') self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5) def test_sum_regularizer(self): l1_function = regularizers.l1_regularizer(.1) l2_function = regularizers.l2_regularizer(.2) self.assertIsNone(regularizers.sum_regularizer([])) self.assertIsNone(regularizers.sum_regularizer([None])) values = np.array([-3.]) weights = constant_op.constant(values) with session.Session() as sess: l1_reg1 = regularizers.sum_regularizer([l1_function]) l1_result1 = sess.run(l1_reg1(weights)) l1_reg2 = regularizers.sum_regularizer([l1_function, None]) l1_result2 = sess.run(l1_reg2(weights)) l1_l2_reg = regularizers.sum_regularizer([l1_function, l2_function]) l1_l2_result = sess.run(l1_l2_reg(weights)) self.assertAllClose(.1 * np.abs(values).sum(), l1_result1) self.assertAllClose(.1 * np.abs(values).sum(), l1_result2) self.assertAllClose( .1 * np.abs(values).sum() + .2 * np.power(values, 2).sum() / 2.0, l1_l2_result) def test_apply_regularization(self): dummy_regularizer = lambda x: math_ops.reduce_sum(2 * x) array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]] tensor_weights_list = [constant_op.constant(x) for x in array_weights_list] expected = sum([2 * x for l in array_weights_list for x in l]) with self.test_session(): result = regularizers.apply_regularization(dummy_regularizer, tensor_weights_list) self.assertAllClose(expected, result.eval()) def test_apply_zero_regularization(self): regularizer = regularizers.l2_regularizer(0.0) array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]] tensor_weights_list = [constant_op.constant(x) for x in array_weights_list] with self.test_session(): result = regularizers.apply_regularization(regularizer, tensor_weights_list) self.assertAllClose(0.0, result.eval()) def test_apply_regularization_invalid_regularizer(self): non_scalar_regularizer = lambda x: array_ops.tile(x, [2]) tensor_weights_list = [ constant_op.constant(x) for x in [[1.5], [2, 3, 4.2], [10, 42, 666.6]] ] with self.test_session(): with self.assertRaises(ValueError): regularizers.apply_regularization(non_scalar_regularizer, tensor_weights_list) if __name__ == '__main__': test.main()
imply/chuu
refs/heads/master
chrome/browser/resources/test_presubmit.py
61
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for Web Development Style Guide checker.""" import os import re import sys import unittest test_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.extend([ os.path.normpath(os.path.join(test_dir, '..', '..', '..', 'tools')), os.path.join(test_dir), ]) import find_depot_tools # pylint: disable=W0611 from testing_support.super_mox import SuperMoxTestBase from web_dev_style import css_checker, js_checker # pylint: disable=F0401 class JsStyleGuideTest(SuperMoxTestBase): def setUp(self): SuperMoxTestBase.setUp(self) input_api = self.mox.CreateMockAnything() input_api.re = re output_api = self.mox.CreateMockAnything() self.checker = js_checker.JSChecker(input_api, output_api) def GetHighlight(self, line, error): """Returns the substring of |line| that is highlighted in |error|.""" error_lines = error.split('\n') highlight = error_lines[error_lines.index(line) + 1] return ''.join(ch1 for (ch1, ch2) in zip(line, highlight) if ch2 == '^') def ShouldFailConstCheck(self, line): """Checks that the 'const' checker flags |line| as a style error.""" error = self.checker.ConstCheck(1, line) self.assertNotEqual('', error, 'Should be flagged as style error: ' + line) self.assertEqual(self.GetHighlight(line, error), 'const') def ShouldPassConstCheck(self, line): """Checks that the 'const' checker doesn't flag |line| as a style error.""" self.assertEqual('', self.checker.ConstCheck(1, line), 'Should not be flagged as style error: ' + line) def testConstFails(self): lines = [ "const foo = 'bar';", " const bar = 'foo';", # Trying to use |const| as a variable name "var const = 0;", "var x = 5; const y = 6;", "for (var i=0, const e=10; i<e; i++) {", "for (const x=0; x<foo; i++) {", "while (const x = 7) {", ] for line in lines: self.ShouldFailConstCheck(line) def testConstPasses(self): lines = [ # sanity check "var foo = 'bar'", # @const JsDoc tag "/** @const */ var SEVEN = 7;", # @const tag in multi-line comment " * @const", " * @const", # @constructor tag in multi-line comment " * @constructor", " * @constructor", # words containing 'const' "if (foo.constructor) {", "var deconstruction = 'something';", "var madeUpWordconst = 10;", # Strings containing the word |const| "var str = 'const at the beginning';", "var str = 'At the end: const';", # doing this one with regex is probably not practical #"var str = 'a const in the middle';", ] for line in lines: self.ShouldPassConstCheck(line) def ShouldFailChromeSendCheck(self, line): """Checks that the 'chrome.send' checker flags |line| as a style error.""" error = self.checker.ChromeSendCheck(1, line) self.assertNotEqual('', error, 'Should be flagged as style error: ' + line) self.assertEqual(self.GetHighlight(line, error), ', []') def ShouldPassChromeSendCheck(self, line): """Checks that the 'chrome.send' checker doesn't flag |line| as a style error. """ self.assertEqual('', self.checker.ChromeSendCheck(1, line), 'Should not be flagged as style error: ' + line) def testChromeSendFails(self): lines = [ "chrome.send('message', []);", " chrome.send('message', []);", ] for line in lines: self.ShouldFailChromeSendCheck(line) def testChromeSendPasses(self): lines = [ "chrome.send('message', constructArgs('foo', []));", " chrome.send('message', constructArgs('foo', []));", "chrome.send('message', constructArgs([]));", " chrome.send('message', constructArgs([]));", ] for line in lines: self.ShouldPassChromeSendCheck(line) def ShouldFailGetElementByIdCheck(self, line): """Checks that the 'getElementById' checker flags |line| as a style error. """ error = self.checker.GetElementByIdCheck(1, line) self.assertNotEqual('', error, 'Should be flagged as style error: ' + line) self.assertEqual(self.GetHighlight(line, error), 'document.getElementById') def ShouldPassGetElementByIdCheck(self, line): """Checks that the 'getElementById' checker doesn't flag |line| as a style error. """ self.assertEqual('', self.checker.GetElementByIdCheck(1, line), 'Should not be flagged as style error: ' + line) def testGetElementByIdFails(self): lines = [ "document.getElementById('foo');", " document.getElementById('foo');", "var x = document.getElementById('foo');", "if (document.getElementById('foo').hidden) {", ] for line in lines: self.ShouldFailGetElementByIdCheck(line) def testGetElementByIdPasses(self): lines = [ "elem.ownerDocument.getElementById('foo');", " elem.ownerDocument.getElementById('foo');", "var x = elem.ownerDocument.getElementById('foo');", "if (elem.ownerDocument.getElementById('foo').hidden) {", "doc.getElementById('foo');", " doc.getElementById('foo');", "cr.doc.getElementById('foo');", " cr.doc.getElementById('foo');", "var x = doc.getElementById('foo');", "if (doc.getElementById('foo').hidden) {", ] for line in lines: self.ShouldPassGetElementByIdCheck(line) def ShouldFailInheritDocCheck(self, line): """Checks that the '@inheritDoc' checker flags |line| as a style error.""" error = self.checker.InheritDocCheck(1, line) self.assertNotEqual('', error, msg='Should be flagged as style error: ' + line) self.assertEqual(self.GetHighlight(line, error), '@inheritDoc') def ShouldPassInheritDocCheck(self, line): """Checks that the '@inheritDoc' checker doesn't flag |line| as a style error. """ self.assertEqual('', self.checker.InheritDocCheck(1, line), msg='Should not be flagged as style error: ' + line) def testInheritDocFails(self): lines = [ " /** @inheritDoc */", " * @inheritDoc", ] for line in lines: self.ShouldFailInheritDocCheck(line) def testInheritDocPasses(self): lines = [ "And then I said, but I won't @inheritDoc! Hahaha!", " If your dad's a doctor, do you inheritDoc?", " What's up, inherit doc?", " this.inheritDoc(someDoc)", ] for line in lines: self.ShouldPassInheritDocCheck(line) def ShouldFailWrapperTypeCheck(self, line): """Checks that the use of wrapper types (i.e. new Number(), @type {Number}) is a style error. """ error = self.checker.WrapperTypeCheck(1, line) self.assertNotEqual('', error, msg='Should be flagged as style error: ' + line) highlight = self.GetHighlight(line, error) self.assertTrue(highlight in ('Boolean', 'Number', 'String')) def ShouldPassWrapperTypeCheck(self, line): """Checks that the wrapper type checker doesn't flag |line| as a style error. """ self.assertEqual('', self.checker.WrapperTypeCheck(1, line), msg='Should not be flagged as style error: ' + line) def testWrapperTypePasses(self): lines = [ "/** @param {!ComplexType} */", " * @type {Object}", " * @param {Function=} opt_callback", " * @param {} num Number of things to add to {blah}.", " * @return {!print_preview.PageNumberSet}", " /* @returns {Number} */", # Should be /** @return {Number} */ "* @param {!LocalStrings}" " Your type of Boolean is false!", " Then I parameterized her Number from her friend!", " A String of Pearls", " types.params.aBoolean.typeString(someNumber)", ] for line in lines: self.ShouldPassWrapperTypeCheck(line) def testWrapperTypeFails(self): lines = [ " /**@type {String}*/(string)", " * @param{Number=} opt_blah A number", "/** @private @return {!Boolean} */", " * @param {number|String}", ] for line in lines: self.ShouldFailWrapperTypeCheck(line) def ShouldFailVarNameCheck(self, line): """Checks that var unix_hacker, $dollar are style errors.""" error = self.checker.VarNameCheck(1, line) self.assertNotEqual('', error, msg='Should be flagged as style error: ' + line) highlight = self.GetHighlight(line, error) self.assertFalse('var ' in highlight); def ShouldPassVarNameCheck(self, line): """Checks that variableNamesLikeThis aren't style errors.""" self.assertEqual('', self.checker.VarNameCheck(1, line), msg='Should not be flagged as style error: ' + line) def testVarNameFails(self): lines = [ "var private_;", " var _super_private", " var unix_hacker = someFunc();", ] for line in lines: self.ShouldFailVarNameCheck(line) def testVarNamePasses(self): lines = [ " var namesLikeThis = [];", " for (var i = 0; i < 10; ++i) { ", "for (var i in obj) {", " var one, two, three;", " var magnumPI = {};", " var g_browser = 'da browzer';", "/** @const */ var Bla = options.Bla;", # goog.scope() replacement. " var $ = function() {", # For legacy reasons. " var StudlyCaps = cr.define('bla')", # Classes. " var SCARE_SMALL_CHILDREN = [", # TODO(dbeam): add @const in # front of all these vars like "/** @const */ CONST_VAR = 1;", # this line has (<--). ] for line in lines: self.ShouldPassVarNameCheck(line) class CssStyleGuideTest(SuperMoxTestBase): def setUp(self): SuperMoxTestBase.setUp(self) self.fake_file_name = 'fake.css' self.fake_file = self.mox.CreateMockAnything() self.mox.StubOutWithMock(self.fake_file, 'LocalPath') self.fake_file.LocalPath().AndReturn(self.fake_file_name) # Actual calls to NewContents() are defined in each test. self.mox.StubOutWithMock(self.fake_file, 'NewContents') self.input_api = self.mox.CreateMockAnything() self.input_api.re = re self.mox.StubOutWithMock(self.input_api, 'AffectedSourceFiles') self.input_api.AffectedFiles( include_deletes=False, file_filter=None).AndReturn([self.fake_file]) # Actual creations of PresubmitPromptWarning are defined in each test. self.output_api = self.mox.CreateMockAnything() self.mox.StubOutWithMock(self.output_api, 'PresubmitPromptWarning', use_mock_anything=True) author_msg = ('Was the CSS checker useful? ' 'Send feedback or hate mail to dbeam@chromium.org.') self.output_api = self.mox.CreateMockAnything() self.mox.StubOutWithMock(self.output_api, 'PresubmitNotifyResult', use_mock_anything=True) self.output_api.PresubmitNotifyResult(author_msg).AndReturn(None) def VerifyContentsProducesOutput(self, contents, output): self.fake_file.NewContents().AndReturn(contents.splitlines()) self.output_api.PresubmitPromptWarning( self.fake_file_name + ':\n' + output.strip()).AndReturn(None) self.mox.ReplayAll() css_checker.CSSChecker(self.input_api, self.output_api).RunChecks() def testCssAlphaWithAtBlock(self): self.VerifyContentsProducesOutput(""" <include src="../shared/css/cr/ui/overlay.css"> <include src="chrome://resources/totally-cool.css" /> /* A hopefully safely ignored comment and @media statement. /**/ @media print { div { display: block; color: red; } } .rule { z-index: 5; <if expr="not is macosx"> background-image: url(chrome://resources/BLAH); /* TODO(dbeam): Fix this. */ background-color: rgb(235, 239, 249); </if> <if expr="is_macosx"> background-color: white; background-image: url(chrome://resources/BLAH2); </if> color: black; } <if expr="is_macosx"> .language-options-right { visibility: hidden; opacity: 1; /* TODO(dbeam): Fix this. */ } </if>""", """ - Alphabetize properties and list vendor specific (i.e. -webkit) above standard. display: block; color: red; z-index: 5; color: black;""") def testCssAlphaWithNonStandard(self): self.VerifyContentsProducesOutput(""" div { /* A hopefully safely ignored comment and @media statement. /**/ color: red; -webkit-margin-start: 5px; }""", """ - Alphabetize properties and list vendor specific (i.e. -webkit) above standard. color: red; -webkit-margin-start: 5px;""") def testCssAlphaWithLongerDashedProps(self): self.VerifyContentsProducesOutput(""" div { border-left: 5px; /* A hopefully removed comment. */ border: 5px solid red; }""", """ - Alphabetize properties and list vendor specific (i.e. -webkit) above standard. border-left: 5px; border: 5px solid red;""") def testCssBracesHaveSpaceBeforeAndNothingAfter(self): self.VerifyContentsProducesOutput(""" /* Hello! */div/* Comment here*/{ display: block; } blah /* hey! */ { rule: value; } .this.is { /* allowed */ rule: value; }""", """ - Start braces ({) end a selector, have a space before them and no rules after. div{ {""") def testCssClassesUseDashes(self): self.VerifyContentsProducesOutput(""" .className, .ClassName, .class-name /* We should not catch this. */, .class_name { display: block; }""", """ - Classes use .dash-form. .className, .ClassName, .class_name {""") def testCssCloseBraceOnNewLine(self): self.VerifyContentsProducesOutput(""" @media { /* TODO(dbeam) Fix this case. */ .rule { display: block; }} @-webkit-keyframe blah { 100% { height: -500px 0; } } #rule { rule: value; }""", """ - Always put a rule closing brace (}) on a new line. rule: value; }""") def testCssColonsHaveSpaceAfter(self): self.VerifyContentsProducesOutput(""" div:not(.class):not([attr=5]), /* We should not catch this. */ div:not(.class):not([attr]) /* Nor this. */ { background: url(data:image/jpeg,asdfasdfsadf); /* Ignore this. */ background: -webkit-linear-gradient(left, red, 80% blah blee blar); color: red; display:block; }""", """ - Colons (:) should have a space after them. display:block; - Don't use data URIs in source files. Use grit instead. background: url(data:image/jpeg,asdfasdfsadf);""") def testCssFavorSingleQuotes(self): self.VerifyContentsProducesOutput(""" html[dir="rtl"] body, html[dir=ltr] body /* TODO(dbeam): Require '' around rtl in future? */ { background: url("chrome://resources/BLAH"); font-family: "Open Sans"; <if expr="is_macosx"> blah: blee; </if> }""", """ - Use single quotes (') instead of double quotes (") in strings. html[dir="rtl"] body, background: url("chrome://resources/BLAH"); font-family: "Open Sans";""") def testCssHexCouldBeShorter(self): self.VerifyContentsProducesOutput(""" #abc, #abc-, #abc-ghij, #abcdef-, #abcdef-ghij, #aaaaaa, #bbaacc { background-color: #336699; /* Ignore short hex rule if not gray. */ color: #999999; color: #666; }""", """ - Use abbreviated hex (#rgb) when in form #rrggbb. color: #999999; (replace with #999) - Use rgb() over #hex when not a shade of gray (like #333). background-color: #336699; (replace with rgb(51, 102, 153))""") def testCssUseMillisecondsForSmallTimes(self): self.VerifyContentsProducesOutput(""" .transition-0s /* This is gross but may happen. */ { transform: one 0.2s; transform: two .1s; transform: tree 1s; transform: four 300ms; }""", """ - Use milliseconds for time measurements under 1 second. transform: one 0.2s; (replace with 200ms) transform: two .1s; (replace with 100ms)""") def testCssNoDataUrisInSourceFiles(self): self.VerifyContentsProducesOutput(""" img { background: url( data:image/jpeg,4\/\/350|\/|3|2 ); background: url('data:image/jpeg,4\/\/350|\/|3|2'); }""", """ - Don't use data URIs in source files. Use grit instead. background: url( data:image/jpeg,4\/\/350|\/|3|2 ); background: url('data:image/jpeg,4\/\/350|\/|3|2');""") def testCssOneRulePerLine(self): self.VerifyContentsProducesOutput(""" a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type, a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type ~ input[type='checkbox']:not([hidden]), div { background: url(chrome://resources/BLAH); rule: value; /* rule: value; */ rule: value; rule: value; }""", """ - One rule per line (what not to do: color: red; margin: 0;). rule: value; rule: value;""") def testCssOneSelectorPerLine(self): self.VerifyContentsProducesOutput(""" a, div,a, div,/* Hello! */ span, #id.class([dir=rtl):not(.class):any(a, b, d) { rule: value; } a, div,a { some-other: rule here; }""", """ - One selector per line (what not to do: a, b {}). div,a, div, span, div,a {""") def testCssPseudoElementDoubleColon(self): self.VerifyContentsProducesOutput(""" a:href, br::after, ::-webkit-scrollbar-thumb, a:not([empty]):hover:focus:active, /* shouldn't catch here and above */ abbr:after, .tree-label:empty:after, b:before, :-WebKit-ScrollBar { rule: value; }""", """ - Pseudo-elements should use double colon (i.e. ::after). :after (should be ::after) :after (should be ::after) :before (should be ::before) :-WebKit-ScrollBar (should be ::-WebKit-ScrollBar) """) def testCssRgbIfNotGray(self): self.VerifyContentsProducesOutput(""" #abc, #aaa, #aabbcc { background: -webkit-linear-gradient(left, from(#abc), to(#def)); color: #bad; color: #bada55; }""", """ - Use rgb() over #hex when not a shade of gray (like #333). background: -webkit-linear-gradient(left, from(#abc), to(#def)); """ """(replace with rgb(170, 187, 204), rgb(221, 238, 255)) color: #bad; (replace with rgb(187, 170, 221)) color: #bada55; (replace with rgb(186, 218, 85))""") def testCssZeroLengthTerms(self): self.VerifyContentsProducesOutput(""" @-webkit-keyframe anim { 0% { /* Ignore key frames */ width: 0px; } 10% { width: 10px; } 100% { width: 100px; } } .media-button.play > .state0.active, .media-button[state='0'] > .state0.normal /* blah */, /* blee */ .media-button[state='0']:not(.disabled):hover > .state0.hover { -webkit-animation: anim 0s; -webkit-animation-duration: anim 0ms; -webkit-transform: scale(0%), translateX(0deg), translateY(0rad), translateZ(0grad); background-position-x: 0em; background-position-y: 0ex; border-width: 0em; color: hsl(0, 0%, 85%); /* Shouldn't trigger error. */ opacity: .0; opacity: 0.0; opacity: 0.; } @page { border-width: 0mm; height: 0cm; width: 0in; }""", """ - Make all zero length terms (i.e. 0px) 0 unless inside of hsl() or part of""" """ @keyframe. width: 0px; -webkit-animation: anim 0s; -webkit-animation-duration: anim 0ms; -webkit-transform: scale(0%), translateX(0deg), translateY(0rad), translateZ(0grad); background-position-x: 0em; background-position-y: 0ex; border-width: 0em; opacity: .0; opacity: 0.0; opacity: 0.; border-width: 0mm; height: 0cm; width: 0in; """) if __name__ == '__main__': unittest.main()
thesuperzapper/tensorflow
refs/heads/master
tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py
75
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlowDataFrame implements convenience functions using TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import numpy as np from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask from tensorflow.python.client import session as sess from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import io_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner as qr def _expand_file_names(filepatterns): """Takes a list of file patterns and returns a list of resolved file names.""" if not isinstance(filepatterns, (list, tuple, set)): filepatterns = [filepatterns] filenames = set() for filepattern in filepatterns: names = set(gfile.Glob(filepattern)) filenames |= names return list(filenames) def _dtype_to_nan(dtype): if dtype is dtypes.string: return b"" elif dtype.is_integer: return np.nan elif dtype.is_floating: return np.nan elif dtype is dtypes.bool: return np.nan else: raise ValueError("Can't parse type without NaN into sparse tensor: %s" % dtype) def _get_default_value(feature_spec): if isinstance(feature_spec, parsing_ops.FixedLenFeature): return feature_spec.default_value else: return _dtype_to_nan(feature_spec.dtype) class TensorFlowDataFrame(df.DataFrame): """TensorFlowDataFrame implements convenience functions using TensorFlow.""" def run(self, num_batches=None, graph=None, session=None, start_queues=True, initialize_variables=True, **kwargs): """Builds and runs the columns of the `DataFrame` and yields batches. This is a generator that yields a dictionary mapping column names to evaluated columns. Args: num_batches: the maximum number of batches to produce. If none specified, the returned value will iterate through infinite batches. graph: the `Graph` in which the `DataFrame` should be built. session: the `Session` in which to run the columns of the `DataFrame`. start_queues: if true, queues will be started before running and halted after producting `n` batches. initialize_variables: if true, variables will be initialized. **kwargs: Additional keyword arguments e.g. `num_epochs`. Yields: A dictionary, mapping column names to the values resulting from running each column for a single batch. """ if graph is None: graph = ops.get_default_graph() with graph.as_default(): if session is None: session = sess.Session() self_built = self.build(**kwargs) keys = list(self_built.keys()) cols = list(self_built.values()) if initialize_variables: if variables.local_variables(): session.run(variables.local_variables_initializer()) if variables.global_variables(): session.run(variables.global_variables_initializer()) if start_queues: coord = coordinator.Coordinator() threads = qr.start_queue_runners(sess=session, coord=coord) i = 0 while num_batches is None or i < num_batches: i += 1 try: values = session.run(cols) yield collections.OrderedDict(zip(keys, values)) except errors.OutOfRangeError: break if start_queues: coord.request_stop() coord.join(threads) def select_rows(self, boolean_series): """Returns a `DataFrame` with only the rows indicated by `boolean_series`. Note that batches may no longer have consistent size after calling `select_rows`, so the new `DataFrame` may need to be rebatched. For example: ''' filtered_df = df.select_rows(df["country"] == "jp").batch(64) ''' Args: boolean_series: a `Series` that evaluates to a boolean `Tensor`. Returns: A new `DataFrame` with the same columns as `self`, but selecting only the rows where `boolean_series` evaluated to `True`. """ result = type(self)() for key, col in self._columns.items(): try: result[key] = col.select_rows(boolean_series) except AttributeError as e: raise NotImplementedError(( "The select_rows method is not implemented for Series type {}. " "Original error: {}").format(type(col), e)) return result def split(self, index_series, proportion, batch_size=None): """Deterministically split a `DataFrame` into two `DataFrame`s. Note this split is only as deterministic as the underlying hash function; see `tf.string_to_hash_bucket_fast`. The hash function is deterministic for a given binary, but may change occasionally. The only way to achieve an absolute guarantee that the split `DataFrame`s do not change across runs is to materialize them. Note too that the allocation of a row to one partition or the other is evaluated independently for each row, so the exact number of rows in each partition is binomially distributed. Args: index_series: a `Series` of unique strings, whose hash will determine the partitioning; or the name in this `DataFrame` of such a `Series`. (This `Series` must contain strings because TensorFlow provides hash ops only for strings, and there are no number-to-string converter ops.) proportion: The proportion of the rows to select for the 'left' partition; the remaining (1 - proportion) rows form the 'right' partition. batch_size: the batch size to use when rebatching the left and right `DataFrame`s. If None (default), the `DataFrame`s are not rebatched; thus their batches will have variable sizes, according to which rows are selected from each batch of the original `DataFrame`. Returns: Two `DataFrame`s containing the partitioned rows. """ if isinstance(index_series, str): index_series = self[index_series] left_mask, = split_mask.SplitMask(proportion)(index_series) right_mask = ~left_mask left_rows = self.select_rows(left_mask) right_rows = self.select_rows(right_mask) if batch_size: left_rows = left_rows.batch(batch_size=batch_size, shuffle=False) right_rows = right_rows.batch(batch_size=batch_size, shuffle=False) return left_rows, right_rows def split_fast(self, index_series, proportion, batch_size, base_batch_size=1000): """Deterministically split a `DataFrame` into two `DataFrame`s. Note this split is only as deterministic as the underlying hash function; see `tf.string_to_hash_bucket_fast`. The hash function is deterministic for a given binary, but may change occasionally. The only way to achieve an absolute guarantee that the split `DataFrame`s do not change across runs is to materialize them. Note too that the allocation of a row to one partition or the other is evaluated independently for each row, so the exact number of rows in each partition is binomially distributed. Args: index_series: a `Series` of unique strings, whose hash will determine the partitioning; or the name in this `DataFrame` of such a `Series`. (This `Series` must contain strings because TensorFlow provides hash ops only for strings, and there are no number-to-string converter ops.) proportion: The proportion of the rows to select for the 'left' partition; the remaining (1 - proportion) rows form the 'right' partition. batch_size: the batch size to use when rebatching the left and right `DataFrame`s. If None (default), the `DataFrame`s are not rebatched; thus their batches will have variable sizes, according to which rows are selected from each batch of the original `DataFrame`. base_batch_size: the batch size to use for materialized data, prior to the split. Returns: Two `DataFrame`s containing the partitioned rows. """ if isinstance(index_series, str): index_series = self[index_series] left_mask, = split_mask.SplitMask(proportion)(index_series) right_mask = ~left_mask self["left_mask__"] = left_mask self["right_mask__"] = right_mask # TODO(soergel): instead of base_batch_size can we just do one big batch? # avoid computing the hashes twice m = self.materialize_to_memory(batch_size=base_batch_size) left_rows_df = m.select_rows(m["left_mask__"]) right_rows_df = m.select_rows(m["right_mask__"]) del left_rows_df[["left_mask__", "right_mask__"]] del right_rows_df[["left_mask__", "right_mask__"]] # avoid recomputing the split repeatedly left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size) right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size) return left_rows_df, right_rows_df def run_one_batch(self): """Creates a new 'Graph` and `Session` and runs a single batch. Returns: A dictionary mapping column names to numpy arrays that contain a single batch of the `DataFrame`. """ return list(self.run(num_batches=1))[0] def run_one_epoch(self): """Creates a new 'Graph` and `Session` and runs a single epoch. Naturally this makes sense only for DataFrames that fit in memory. Returns: A dictionary mapping column names to numpy arrays that contain a single epoch of the `DataFrame`. """ # batches is a list of dicts of numpy arrays batches = [b for b in self.run(num_epochs=1)] # first invert that to make a dict of lists of numpy arrays pivoted_batches = {} for k in batches[0].keys(): pivoted_batches[k] = [] for b in batches: for k, v in b.items(): pivoted_batches[k].append(v) # then concat the arrays in each column result = {k: np.concatenate(column_batches) for k, column_batches in pivoted_batches.items()} return result def materialize_to_memory(self, batch_size): unordered_dict_of_arrays = self.run_one_epoch() # there may already be an 'index' column, in which case from_ordereddict) # below will complain because it wants to generate a new one. # for now, just remove it. # TODO(soergel): preserve index history, potentially many levels deep del unordered_dict_of_arrays["index"] # the order of the columns in this dict is arbitrary; we just need it to # remain consistent. ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays) return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays, batch_size=batch_size) def batch(self, batch_size, shuffle=False, num_threads=1, queue_capacity=None, min_after_dequeue=None, seed=None): """Resize the batches in the `DataFrame` to the given `batch_size`. Args: batch_size: desired batch size. shuffle: whether records should be shuffled. Defaults to true. num_threads: the number of enqueueing threads. queue_capacity: capacity of the queue that will hold new batches. min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. seed: passed to random shuffle operations. Only used if `shuffle` is true. Returns: A `DataFrame` with `batch_size` rows. """ column_names = list(self._columns.keys()) if shuffle: batcher = batch.ShuffleBatch(batch_size, output_names=column_names, num_threads=num_threads, queue_capacity=queue_capacity, min_after_dequeue=min_after_dequeue, seed=seed) else: batcher = batch.Batch(batch_size, output_names=column_names, num_threads=num_threads, queue_capacity=queue_capacity) batched_series = batcher(list(self._columns.values())) dataframe = type(self)() dataframe.assign(**(dict(zip(column_names, batched_series)))) return dataframe @classmethod def _from_csv_base(cls, filepatterns, get_default_values, has_header, column_names, num_threads, enqueue_size, batch_size, queue_capacity, min_after_dequeue, shuffle, seed): """Create a `DataFrame` from CSV files. If `has_header` is false, then `column_names` must be specified. If `has_header` is true and `column_names` are specified, then `column_names` overrides the names in the header. Args: filepatterns: a list of file patterns that resolve to CSV files. get_default_values: a function that produces a list of default values for each column, given the column names. has_header: whether or not the CSV files have headers. column_names: a list of names for the columns in the CSV files. num_threads: the number of readers that will work in parallel. enqueue_size: block size for each read operation. batch_size: desired batch size. queue_capacity: capacity of the queue that will store parsed lines. min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. shuffle: whether records should be shuffled. Defaults to true. seed: passed to random shuffle operations. Only used if `shuffle` is true. Returns: A `DataFrame` that has columns corresponding to `features` and is filled with examples from `filepatterns`. Raises: ValueError: no files match `filepatterns`. ValueError: `features` contains the reserved name 'index'. """ filenames = _expand_file_names(filepatterns) if not filenames: raise ValueError("No matching file names.") if column_names is None: if not has_header: raise ValueError("If column_names is None, has_header must be true.") with gfile.GFile(filenames[0]) as f: column_names = csv.DictReader(f).fieldnames if "index" in column_names: raise ValueError( "'index' is reserved and can not be used for a column name.") default_values = get_default_values(column_names) reader_kwargs = {"skip_header_lines": (1 if has_header else 0)} index, value = reader_source.TextFileSource( filenames, reader_kwargs=reader_kwargs, enqueue_size=enqueue_size, batch_size=batch_size, queue_capacity=queue_capacity, shuffle=shuffle, min_after_dequeue=min_after_dequeue, num_threads=num_threads, seed=seed)() parser = csv_parser.CSVParser(column_names, default_values) parsed = parser(value) column_dict = parsed._asdict() column_dict["index"] = index dataframe = cls() dataframe.assign(**column_dict) return dataframe @classmethod def from_csv(cls, filepatterns, default_values, has_header=True, column_names=None, num_threads=1, enqueue_size=None, batch_size=32, queue_capacity=None, min_after_dequeue=None, shuffle=True, seed=None): """Create a `DataFrame` from CSV files. If `has_header` is false, then `column_names` must be specified. If `has_header` is true and `column_names` are specified, then `column_names` overrides the names in the header. Args: filepatterns: a list of file patterns that resolve to CSV files. default_values: a list of default values for each column. has_header: whether or not the CSV files have headers. column_names: a list of names for the columns in the CSV files. num_threads: the number of readers that will work in parallel. enqueue_size: block size for each read operation. batch_size: desired batch size. queue_capacity: capacity of the queue that will store parsed lines. min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. shuffle: whether records should be shuffled. Defaults to true. seed: passed to random shuffle operations. Only used if `shuffle` is true. Returns: A `DataFrame` that has columns corresponding to `features` and is filled with examples from `filepatterns`. Raises: ValueError: no files match `filepatterns`. ValueError: `features` contains the reserved name 'index'. """ def get_default_values(column_names): # pylint: disable=unused-argument return default_values return cls._from_csv_base(filepatterns, get_default_values, has_header, column_names, num_threads, enqueue_size, batch_size, queue_capacity, min_after_dequeue, shuffle, seed) @classmethod def from_csv_with_feature_spec(cls, filepatterns, feature_spec, has_header=True, column_names=None, num_threads=1, enqueue_size=None, batch_size=32, queue_capacity=None, min_after_dequeue=None, shuffle=True, seed=None): """Create a `DataFrame` from CSV files, given a feature_spec. If `has_header` is false, then `column_names` must be specified. If `has_header` is true and `column_names` are specified, then `column_names` overrides the names in the header. Args: filepatterns: a list of file patterns that resolve to CSV files. feature_spec: a dict mapping column names to `FixedLenFeature` or `VarLenFeature`. has_header: whether or not the CSV files have headers. column_names: a list of names for the columns in the CSV files. num_threads: the number of readers that will work in parallel. enqueue_size: block size for each read operation. batch_size: desired batch size. queue_capacity: capacity of the queue that will store parsed lines. min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. shuffle: whether records should be shuffled. Defaults to true. seed: passed to random shuffle operations. Only used if `shuffle` is true. Returns: A `DataFrame` that has columns corresponding to `features` and is filled with examples from `filepatterns`. Raises: ValueError: no files match `filepatterns`. ValueError: `features` contains the reserved name 'index'. """ def get_default_values(column_names): return [_get_default_value(feature_spec[name]) for name in column_names] dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header, column_names, num_threads, enqueue_size, batch_size, queue_capacity, min_after_dequeue, shuffle, seed) # replace the dense columns with sparse ones in place in the dataframe for name in dataframe.columns(): if name != "index" and isinstance(feature_spec[name], parsing_ops.VarLenFeature): strip_value = _get_default_value(feature_spec[name]) (dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name]) return dataframe @classmethod def from_examples(cls, filepatterns, features, reader_cls=io_ops.TFRecordReader, num_threads=1, enqueue_size=None, batch_size=32, queue_capacity=None, min_after_dequeue=None, shuffle=True, seed=None): """Create a `DataFrame` from `tensorflow.Example`s. Args: filepatterns: a list of file patterns containing `tensorflow.Example`s. features: a dict mapping feature names to `VarLenFeature` or `FixedLenFeature`. reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to read the `Example`s. num_threads: the number of readers that will work in parallel. enqueue_size: block size for each read operation. batch_size: desired batch size. queue_capacity: capacity of the queue that will store parsed `Example`s min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. shuffle: whether records should be shuffled. Defaults to true. seed: passed to random shuffle operations. Only used if `shuffle` is true. Returns: A `DataFrame` that has columns corresponding to `features` and is filled with `Example`s from `filepatterns`. Raises: ValueError: no files match `filepatterns`. ValueError: `features` contains the reserved name 'index'. """ filenames = _expand_file_names(filepatterns) if not filenames: raise ValueError("No matching file names.") if "index" in features: raise ValueError( "'index' is reserved and can not be used for a feature name.") index, record = reader_source.ReaderSource( reader_cls, filenames, enqueue_size=enqueue_size, batch_size=batch_size, queue_capacity=queue_capacity, shuffle=shuffle, min_after_dequeue=min_after_dequeue, num_threads=num_threads, seed=seed)() parser = example_parser.ExampleParser(features) parsed = parser(record) column_dict = parsed._asdict() column_dict["index"] = index dataframe = cls() dataframe.assign(**column_dict) return dataframe @classmethod def from_pandas(cls, pandas_dataframe, num_threads=None, enqueue_size=None, batch_size=None, queue_capacity=None, min_after_dequeue=None, shuffle=True, seed=None, data_name="pandas_data"): """Create a `tf.learn.DataFrame` from a `pandas.DataFrame`. Args: pandas_dataframe: `pandas.DataFrame` that serves as a data source. num_threads: the number of threads to use for enqueueing. enqueue_size: the number of rows to enqueue per step. batch_size: desired batch size. queue_capacity: capacity of the queue that will store parsed `Example`s min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. shuffle: whether records should be shuffled. Defaults to true. seed: passed to random shuffle operations. Only used if `shuffle` is true. data_name: a scope name identifying the data. Returns: A `tf.learn.DataFrame` that contains batches drawn from the given `pandas_dataframe`. """ pandas_source = in_memory_source.PandasSource( pandas_dataframe, num_threads=num_threads, enqueue_size=enqueue_size, batch_size=batch_size, queue_capacity=queue_capacity, shuffle=shuffle, min_after_dequeue=min_after_dequeue, seed=seed, data_name=data_name) dataframe = cls() dataframe.assign(**(pandas_source()._asdict())) return dataframe @classmethod def from_numpy(cls, numpy_array, num_threads=None, enqueue_size=None, batch_size=None, queue_capacity=None, min_after_dequeue=None, shuffle=True, seed=None, data_name="numpy_data"): """Creates a `tf.learn.DataFrame` from a `numpy.ndarray`. The returned `DataFrame` contains two columns: 'index' and 'value'. The 'value' column contains a row from the array. The 'index' column contains the corresponding row number. Args: numpy_array: `numpy.ndarray` that serves as a data source. num_threads: the number of threads to use for enqueueing. enqueue_size: the number of rows to enqueue per step. batch_size: desired batch size. queue_capacity: capacity of the queue that will store parsed `Example`s min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. shuffle: whether records should be shuffled. Defaults to true. seed: passed to random shuffle operations. Only used if `shuffle` is true. data_name: a scope name identifying the data. Returns: A `tf.learn.DataFrame` that contains batches drawn from the given array. """ numpy_source = in_memory_source.NumpySource( numpy_array, num_threads=num_threads, enqueue_size=enqueue_size, batch_size=batch_size, queue_capacity=queue_capacity, shuffle=shuffle, min_after_dequeue=min_after_dequeue, seed=seed, data_name=data_name) dataframe = cls() dataframe.assign(**(numpy_source()._asdict())) return dataframe @classmethod def from_ordereddict(cls, ordered_dict_of_arrays, num_threads=None, enqueue_size=None, batch_size=None, queue_capacity=None, min_after_dequeue=None, shuffle=True, seed=None, data_name="numpy_data"): """Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`. The returned `DataFrame` contains a column for each key of the dict plus an extra 'index' column. The 'index' column contains the row number. Each of the other columns contains a row from the corresponding array. Args: ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a data source. num_threads: the number of threads to use for enqueueing. enqueue_size: the number of rows to enqueue per step. batch_size: desired batch size. queue_capacity: capacity of the queue that will store parsed `Example`s min_after_dequeue: minimum number of elements that can be left by a dequeue operation. Only used if `shuffle` is true. shuffle: whether records should be shuffled. Defaults to true. seed: passed to random shuffle operations. Only used if `shuffle` is true. data_name: a scope name identifying the data. Returns: A `tf.learn.DataFrame` that contains batches drawn from the given arrays. Raises: ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'. """ numpy_source = in_memory_source.OrderedDictNumpySource( ordered_dict_of_arrays, num_threads=num_threads, enqueue_size=enqueue_size, batch_size=batch_size, queue_capacity=queue_capacity, shuffle=shuffle, min_after_dequeue=min_after_dequeue, seed=seed, data_name=data_name) dataframe = cls() dataframe.assign(**(numpy_source()._asdict())) return dataframe
vvv1559/intellij-community
refs/heads/master
python/testData/inspections/PyUnresolvedReferencesInspection/FromImportToContainingFile2/p1/__init__.py
12133432
pjryan126/solid-start-careers
refs/heads/master
store/api/zillow/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.py
12133432
jsternberg/ansible-modules-core
refs/heads/devel
cloud/amazon/__init__.py
12133432
yazman/statscache_plugins
refs/heads/develop
statscache_plugins/volume/__init__.py
12133432
abhi11/tanglu-dak
refs/heads/master
dak/generate_metadata.py
1
#!/usr/bin/env python """ Processes all packages in a given suite to extract interesting metadata (mainly AppStream metainfo data). The data will be stored in the "bin_dep11" table. Additionally, a screenshot cache and tarball of all the icons of packages beloging to a given suite will be created. """ # Copyright (c) 2014 Abhishek Bhattacharjee <abhishek.bhattacharjee11@gmail.com> # Copyright (c) 2014-2015 Matthias Klumpp <mak@debian.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import sys import tarfile import shutil import apt_pkg import os import yaml import uuid import glob from find_metainfo import * from dep11.extractor import MetadataExtractor from dep11.component import DEP11Component, DEP11YamlDumper, get_dep11_header from daklib import daklog from daklib.daksubprocess import call, check_call from daklib.filewriter import DEP11DataFileWriter, DEP11HintsFileWriter from daklib.config import Config from daklib.dbconn import * from daklib.dakmultiprocessing import DakProcessPool, PROC_STATUS_SUCCESS, PROC_STATUS_SIGNALRAISED def usage(): print("""Usage: dak generate_metadata -s <suitename> [OPTION] Extract DEP-11 metadata for the specified suite. -e, --expire Clear the icon/screenshot cache from stale data. -h, --write-hints Export YAML documents with issues found while processing the packages. """) class MetadataPool: ''' Keeps a pool of component metadata per arch per component ''' def __init__(self, values): ''' Initialize the metadata pool. ''' self._values = values self._mcpts = dict() def append_cptdata(self, arch, cptlist): ''' Makes a list of all the DEP11Component objects in a arch pool ''' cpts = self._mcpts.get(arch) if not cpts: self._mcpts[arch] = list() cpts = self._mcpts[arch] for c in cptlist: # TODO: Maybe check for duplicates here? # Right now, we can easily filter them out later and complain about it at the maintainer side, # so a hard-check on duplicate ids might not be necessary. cpts.append(c) def export(self, session): """ Saves metadata in db (serialized to YAML) """ for arch, cpts in self._mcpts.items(): values = self._values values['architecture'] = arch dep11 = DEP11Metadata(session) for cpt in cpts: # get the metadata in YAML format metadata = cpt.to_yaml_doc() hints_yml = cpt.get_hints_yaml() if not hints_yml: hints_yml = "" # store metadata in database dep11.insert_data(cpt._binid, cpt.cid, metadata, hints_yml, cpt.has_ignore_reason()) # commit all changes session.commit() ############################################################################## def make_icon_tar(suitename, component): ''' icons-%(component)_%(size).tar.gz of each Component. ''' cnf = Config() sizes = cnf.value_list('DEP11::IconSizes') for size in sizes: icon_location_glob = os.path.join (cnf["Dir::MetaInfo"], suitename, component, "*", "icons", size, "*.*") tar_location = os.path.join (cnf["Dir::Root"], "dists", suitename, component) icon_tar_fname = os.path.join(tar_location, "icons-%s_%s.tar.gz" % (component, size)) tar = tarfile.open(icon_tar_fname, "w:gz") for filename in glob.glob(icon_location_glob): icon_name = os.path.basename (filename) tar.add(filename,arcname=icon_name) tar.close() def extract_metadata(mde, sn, pkgname, metainfo_files, binid, package_fname, arch): cpts = mde.process(pkgname, package_fname, metainfo_files, binid) data = dict() data['arch'] = arch data['cpts'] = cpts data['message'] = "Processed package: %s (%s/%s)" % (pkgname, sn, arch) return (PROC_STATUS_SUCCESS, data) def process_suite(session, suite, logger, force=False): ''' Extract new metadata for a given suite. ''' path = Config()["Dir::Pool"] if suite.untouchable and not force: import daklib.utils daklib.utils.fubar("Refusing to touch %s (untouchable and not forced)" % suite.suite_name) return for component in [ c.component_name for c in suite.components ]: mif = MetaInfoFinder(session) pkglist = mif.find_meta_files(component=component, suitename=suite.suite_name) values = { 'archive': suite.archive.path, 'suite': suite.suite_name, 'component': component, } pool = DakProcessPool() dpool = MetadataPool(values) def parse_results(message): # Split out into (code, msg) code, msg = message if code == PROC_STATUS_SUCCESS: # we abuse the message return value here... logger.log([msg['message']]) dpool.append_cptdata(msg['arch'], msg['cpts']) elif code == PROC_STATUS_SIGNALRAISED: logger.log(['E: Subprocess recieved signal ', msg]) else: logger.log(['E: ', msg]) cnf = Config() iconf = IconFinder(suite.suite_name, component) mde = MetadataExtractor(suite.suite_name, component, cnf["Dir::MetaInfo"], cnf["DEP11::Url"], cnf.value_list('DEP11::IconSizes'), iconf) for pkgname, pkg in pkglist.items(): for arch, data in pkg.items(): package_fname = os.path.join (path, data['filename']) if not os.path.exists(package_fname): print('Package not found: %s' % (package_fname)) continue pool.apply_async(extract_metadata, (mde, suite.suite_name, pkgname, data['files'], data['binid'], package_fname, arch), callback=parse_results) pool.close() pool.join() # save new metadata to the database dpool.export(session) make_icon_tar(suite.suite_name, component) logger.log(["Completed metadata extraction for suite %s/%s" % (suite.suite_name, component)]) def write_component_files(session, suite, logger): ''' Writes the metadata into Component-<arch>.yml.xz Ignores if ignore is True in the db ''' # SQL to fetch metadata sql = """ select distinct bd.metadata from bin_dep11 bd, binaries b, bin_associations ba, override o where bd.ignore = FALSE and bd.binary_id = b.id and b.package = o.package and o.component = :component_id and b.id = ba.bin and ba.suite = :suite_id and b.architecture = :arch_id """ logger.log(["Writing DEP-11 files for %s" % (suite.suite_name)]) for c in suite.components: # writing per <arch> for arch in suite.architectures: if arch.arch_string == "source": continue head_string = get_dep11_header(suite.suite_name, c.component_name) values = { 'archive' : suite.archive.path, 'suite_id' : suite.suite_id, 'suite' : suite.suite_name, 'component_id' : c.component_id, 'component' : c.component_name, 'arch_id' : arch.arch_id, 'arch' : arch.arch_string } writer = DEP11DataFileWriter(**values) ofile = writer.open() ofile.write(head_string) result = session.execute(sql, values) for doc in result: ofile.write(doc[0]) writer.close() def write_hints_files(session, suite, logger): ''' Writes the DEP-11 hints file (with issues and hints to improve the metadata) into DEP11Hints-<component>_<arch>.yml.gz in Dir::MetaInfoHints. ''' # SQL to fetch hints sql = """ select distinct bd.hints from bin_dep11 bd, binaries b, bin_associations ba, override o where bd.binary_id = b.id and b.package = o.package and o.component = :component_id and b.id = ba.bin and ba.suite = :suite_id and b.architecture = :arch_id """ logger.log(["Writing DEP-11 hints files for %s" % (suite.suite_name)]) for c in suite.components: # writing per arch for arch in suite.architectures: if arch.arch_string == "source": continue head_string = get_dep11_header(suite.suite_name, c.component_name) values = { 'archive' : suite.archive.path, 'suite_id' : suite.suite_id, 'suite' : suite.suite_name, 'component_id' : c.component_id, 'component' : c.component_name, 'arch_id' : arch.arch_id, 'arch' : arch.arch_string } writer = DEP11HintsFileWriter(Config()["Dir::MetaInfoHints"], **values) ofile = writer.open() ofile.write(head_string) result = session.execute(sql, values) for doc in result: ofile.write(doc[0]) writer.close() def expire_dep11_data_cache(session, suitename, logger): ''' Clears stale cache items per suite. ''' # list for metadata we want to keep keep = list() # select all the binids with a package-name # (select all package-name from binaries) sql = """select bd.binary_id,b.package from bin_dep11 bd, binaries b where b.id = bd.binary_id""" q = session.execute(sql) result = q.fetchall() for r in result: keep.append("%s-%s" % (r[1], r[0])) glob_tmpl = "%s/*/*" % (os.path.join(Config()["Dir::MetaInfo"], suitename)) for fname in glob.glob(glob_tmpl): if not os.path.basename(fname) in keep: logger.log(["Expiring DEP-11 cache directory: %s" % (fname)]) rmtree(fname) def main(): cnf = Config() Arguments = [('h',"help","DEP11::Options::Help"), ('s',"suite","DEP11::Options::Suite", "HasArg"), ('e',"expire","DEP11::Options::ExpireCache"), ('h',"write-hints","DEP11::Options::WriteHints"), ] for i in ["help", "suite", "ExpireCache"]: if not cnf.has_key("DEP11::Options::%s" % (i)): cnf["DEP11::Options::%s" % (i)] = "" arguments = apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv) Options = cnf.subtree("DEP11::Options") if Options["Help"]: usage() return suitename = Options["Suite"] if not suitename: print("You need to specify a suite!") sys.exit(1) # check if we have some important config options set if not cnf.has_key("Dir::MetaInfo"): print("You need to specify a metadata export directory (Dir::MetaInfo)") sys.exit(1) if not cnf.has_key("DEP11::Url"): print("You need to specify a metadata public web URL (DEP11::Url)") sys.exit(1) if not cnf.has_key("DEP11::IconSizes"): print("You need to specify a list of allowed icon-sizes (DEP11::IconSizes)") sys.exit(1) if Options["WriteHints"] and not cnf.has_key("Dir::MetaInfoHints"): print("You need to specify an export directory for DEP-11 hints files (Dir::MetaInfoHints)") sys.exit(1) logger = daklog.Logger('generate-metadata') from daklib.dbconn import Component, DBConn, get_suite, Suite session = DBConn().session() suite = get_suite(suitename.lower(), session) if Options["ExpireCache"]: expire_dep11_data_cache(session, suitename, logger) process_suite(session, suite, logger) # export database content as Components-<arch>.xz YAML documents write_component_files(session, suite, logger) if Options["WriteHints"]: write_hints_files(session, suite, logger) # we're done logger.close() if __name__ == "__main__": main()