repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
networks-lab/metaknowledge
metaknowledge/tests/test_recordcollection.py
Python
gpl-2.0
26,484
0.012538
#Written by Reid McIlroy-Young for Dr. John McLevey, University of Waterloo 2015 import unittest import metaknowledge import metaknowledge.WOS import os import filecmp import networkx as nx disableJournChecking = True class TestRecordCollection(unittest.TestCase): @classmethod def setUpClass(cls): metaknowledge.VERBOSE_MODE = False cls.RCmain = metaknowledge.RecordCollection("metaknowledge/tests/testFile.isi") cls.RCbadmain = metaknowledge.RecordCollection("metaknowledge/tests/badFile.isi") def setUp(self): self.RC = self.RCmain.copy() self.RCbad = self.RCbadmain.copy() def test_isCollection(self): self.assertIsInstance(self.RC, metaknowledge.RecordCollection) self.assertEqual(str(metaknowledge.RecordCollection()), "RecordCollection(Empty)") self.assertTrue(self.RC == self.RC) def test_fullRead(self): RC = metaknowledge.RecordCollection("metaknowledge/tests/") self.assertEqual(len(RC), 1032) def test_caching(self): RC = metaknowledge.RecordCollection("metaknowledge/tests/", cached = True, name = 'testingCache', extension = 'testFile.isi') self.assertTrue(os.path.isfile("metaknowledge/tests/tests.[testFile.isi].mkRecordDirCache")) accessTime = os.stat("metaknowledge/tests/testFile.isi").st_atime RC2 = metaknowledge.RecordCollection("metaknowledge/tests/", cached = True, name = 'testingCache', extension = 'testFile.isi') self.assertEqual(accessTime, os.stat("metaknowledge/tests/testFile.isi").st_atime) RC.dropBadEntries() RC2.dropBadEntries() self.assertEqual(RC, RC2) os.remove("metaknowledge/tests/tests.[testFile.isi].mkRecordDirCache") def test_bad(self): self.assertTrue(metaknowledge.RecordCollection('metaknowledge/tests/badFile.isi').bad) with self.assertRaises(metaknowledge.mkExceptions.RCTypeError): metaknowledge.RecordCollection('metaknowledge/tests/testFile.isi', extension = '.txt') self.assertEqual(self.RCbad | self.RC, self.RCbad | self.RC ) self.assertEqual(len(self.RCbad | self.RCbad), 32) self.assertFalse(self.RCbad == self.RC) self.assertEqual('/Users/Reid/Documents/Work/NetworksLab/metaknowledge/metaknowledge/tests/badFile.isi', self.RCbad.errors.keys().__iter__().__next__()) def test_badEntries(self): badRecs = self.RC.badEntries() self.assertTrue(badRecs <= self.RC) self.assertTrue(badRecs.pop().bad) self.RC.dropBadEntries() def test_dropJourn(self): RCcopy = self.RC.copy() self.RC.dropNonJournals() self.assertEqual(len(self.RC), len(RCcopy) - 2) self.RC.dropNonJournals(invert = True) self.assertEqual(len(self.RC), 0) RCcopy.dropNonJournals(ptVal = 'B') self.assertEqual(len(RCcopy), 1) def test_repr(self): self.assertEqual(repr(self.RC), "<metaknowledge.RecordCollection object testFile>") def test_hash(self): self.assertNotEqual(hash(self.RC), hash(self.RCbad)) R = self.RC.pop() RC = metaknowledge.RecordCollection([R]) self.assertEqual(hash(RC), hash(hash(R))) def test_contains(self): R = self.RC.peek() self.assertTrue(R in self.RC) R = self.RC.pop() self.assertFalse(R in self.RC) def test_conID(self): R = self.RC.peek() self.assertTrue(self.RC.containsID(R.id)) self.assertFalse(self.RC.containsID('234567654')) def test_discard(self): R = self.RC.peek() l = len(self.RC) self.RC.discard(R) l2 = len(self.RC) self.assertEqual(l, l2 + 1) self.RC.discard(R) self.assertEqual(l2, len(self.RC)) def test_pop(self): R = self.RC.pop() self.assertFalse(R in self.RC) self.RC.clear() with self.assertRaises(KeyError): R = self.RC.pop() def test_peek(self): R = self.RC.peek() self.assertTrue(R in self.RC) self.RC.clear() R = self.RC.peek() self.assertTrue(R is None) def test_clear(self): R = self.RCbad.peek() self.assertTrue(self.RCbad.bad) self.RCbad.clear() self.assertFalse(self.RCbad.bad) self.assertFalse(R in self.RCbad) def test_remove(self): R = self.RC.peek() l = len(self.RC) self.RC.remove(R) self.assertEqual(l, len(self.RC) + 1) with self.assertRaises(KeyError): self.RC.remove(R) def test_equOps(self): l = len(self.RC) for i in range(10): self.RCbad.pop() lb = len(self.RCbad) RC = metaknowledge.RecordCollection([]) RC.bad = True RC |= self.RC self.assertEqual(self.RC, RC) RC -= self.RC self.assertNotEqual(self.RC, RC) RC ^= self.RC self.assertEqual(self.RC, RC) RC &= self.RCbad self.assertNotEqual(self.RC, RC) def test_newOps(self): l = len(self.RC) for i in range(10): self.RCbad.pop() lb = len(self.RCbad) RC = metaknowledge.RecordCollection([]) RC.bad = True RC3 = self.RC | RC
self.assertEqual(self.RC, RC3) RC4 = RC3 - self.RC self.assertNotEqual(self.RC, RC4) RC5 = RC4 ^ self.RC
self.assertEqual(self.RC, RC5) RC6 = RC5 & self.RCbad self.assertNotEqual(self.RC, RC6) def test_opErrors(self): with self.assertRaises(TypeError): self.RC <= 1 with self.assertRaises(TypeError): self.RC >= 1 self.assertTrue(self.RC != 1) with self.assertRaises(TypeError): self.RC >= 1 with self.assertRaises(TypeError): self.RC |= 1 with self.assertRaises(TypeError): self.RC ^= 1 with self.assertRaises(TypeError): self.RC &= 1 with self.assertRaises(TypeError): self.RC -= 1 with self.assertRaises(TypeError): self.RC | 1 with self.assertRaises(TypeError): self.RC ^ 1 with self.assertRaises(TypeError): self.RC & 1 with self.assertRaises(TypeError): self.RC - 1 def test_addRec(self): l = len(self.RC) R = self.RC.pop() self.assertEqual(len(self.RC), l - 1) self.RC.add(R) self.assertEqual(len(self.RC), l) RC2 = metaknowledge.RecordCollection("metaknowledge/tests/TwoPaper.isi") self.RC |= RC2 self.assertEqual(len(self.RC), l + 2) with self.assertRaises(metaknowledge.CollectionTypeError): self.RC.add(1) def test_bytes(self): with self.assertRaises(metaknowledge.BadRecord): self.assertIsInstance(bytes(self.RC), bytes) self.RC.dropBadEntries() self.assertIsInstance(bytes(self.RC), bytes) def test_WOS(self): self.RC.dropBadEntries() R = self.RC.peek() l = len(self.RC) self.assertTrue(R, self.RC.getID(R.id)) self.assertEqual(len(self.RC), l) self.RC.removeID(R.id) self.assertEqual(len(self.RC), l - 1) self.RC.getID(self.RC.peek().id) self.assertEqual(len(self.RC), l - 1) self.assertFalse(self.RC.getID(self.RC.pop().id)) self.RC.discardID('sdfghjkjhgfdfghj') self.RC.discardID('WOS:A1979GV55600001') with self.assertRaises(KeyError): self.RC.removeID('ghjkljhgfdfghjmh') def test_directoryRead(self): self.assertEqual(len(metaknowledge.RecordCollection('.')), 0) self.assertTrue(metaknowledge.RecordCollection('metaknowledge/tests/') >= self.RC) self.assertTrue(metaknowledge.RecordCollection('metaknowledge/tests/', extension= '.txt') <= self.RC) def test_contentType(self): RC = metaknowledge.RecordCollection('metaknowledge/tests/') self.assertEqual(RC._collectedTypes, {'MedlineRecord', 'WOSRecord', 'ProQuestRecord', 'ScopusRecord'}) self.assertEqual(self.RC._collectedTypes, {'WOSRecord
karulis/pybluez
osx/_bluetoothsockets.py
Python
gpl-2.0
35,262
0.002354
# Copyright (c) 2009 Bea Lam. All rights reserved. # # This file is part of LightBlue. # # LightBlue is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # LightBlue is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with LightBlue. If not, see <http://www.gnu.org/licenses/>. # Mac OS X bluetooth sockets implementation. # # To-do: # - allow socket options # # if doing security AUTH, should set bool arg when calling # openConnection_withPageTimeout_authenticationRequired_() in connect() import time import socket as _socket import threading import os import errno import types import objc import Foundation from . import _IOBluetooth from . import _lightbluecommon from . import _macutil from ._LightAquaBlue import BBServiceAdvertiser, BBBluetoothChannelDelegate #import sets # python 2.3 try: SHUT_RD, SHUT_WR, SHUT_RDWR = \ _socket.SHUT_RD, _socket.SHUT_WR, _socket.SHUT_RDWR except AttributeError: # python 2.3 SHUT_RD, SHUT_WR, SHUT_RDWR = (0, 1, 2) def _getavailableport(proto): # Just advertise a service and see what channel it was assigned, then # stop advertising the service and return the channel. # It's a hacky way of doing it, but IOBluetooth doesn't seem to provide # functionality for just getting an available channel. if proto == _lightbluecommon.RFCOMM: try: result, channelID, servicerecordhandle = BBServiceAdvertiser.addRFCOMMServiceDictionary_withName_UUID_channelID_serviceRecordHandle_(BBServiceAdvertiser.serialPortProfileDictionary(), "DummyService", None, None, None) except: result, channelID, servicerecordhandle = BBServiceAdvertiser.addRFCOMMServiceDictionary_withName_UUID_channelID_serviceRecordHandle_(BBServiceAdvertiser.serialPortProfileDictionary(), "DummyService", None) if result != _macutil.kIOReturnSuccess: raise _lightbluecommon.BluetoothError(result, \ "Could not retrieve an available service channel") result = BBServiceAdvertiser.removeService_(servicerecordhandle) if result != _macutil.kIOReturnSuccess: raise _lightbluecommon.BluetoothError(result, \ "Could not retrieve an available service channel") return channelID else: raise NotImplementedError("L2CAP server sockets not currently supported") def _checkaddrpair(address, checkbtaddr=True): # will want checkbtaddr=False if the address might be empty string # (for binding to a server address) if not isinstance(address, tuple): raise TypeError("address must be (address, port) tuple, was %s" % \ type(address)) if len(address) != 2: raise TypeError("address tuple must have 2 items (has %d)" % \ len(address)) if not isinstance(address[0], str): raise TypeError("address host value must be string, was %s" % \ type(address[0])) if checkbtaddr: if not _lightbluecommon._isbtaddr(address[0]): raise TypeError("address '%s' is not a bluetooth address" % \ address[0]) if not isinstance(address[1], int): raise TypeError("address port value must be int, was %s" % \ type(address[1])) # from std lib socket module class _closedsocket(object): __slots__ = [] def _dummy(*args): raise _socket.error(errno.EBADF, 'Bad file descriptor') send = recv = sendto = recvfrom = __getattr__ = _dummy
# Thanks to Simon Wittber for string queue recipe # http://aspn.activestate.com/ASPN/Cookbook/
Python/Recipe/426060 # (this is a modified version) class _StringQueue(object): def __init__(self): self.l_buffer = [] self.s_buffer = "" self.lock = threading.RLock() self.bufempty = True def empty(self): return self.bufempty def write(self, data): # no type check, and assumes data is not empty! #append data to list, no need to "".join just yet. self.lock.acquire() try: self.l_buffer.append(data) self.bufempty = False finally: self.lock.release() def _build_str(self): #build a new string out of list new_string = "".join([str(x.tobytes()) for x in self.l_buffer]) #join string buffer and new string self.s_buffer = "".join((self.s_buffer, new_string)) #clear list self.l_buffer = [] def __len__(self): #calculate length without needing to _build_str return sum([len(i) for i in self.l_buffer]) + len(self.s_buffer) def read(self, count): self.lock.acquire() try: #if string doesn't have enough chars to satisfy caller if count > len(self.s_buffer): self._build_str() #get data requested by caller result = self.s_buffer[:count] #remove requested data from string buffer self.s_buffer = self.s_buffer[len(result):] self.bufempty = (len(self.s_buffer) == 0) finally: self.lock.release() return result #class _SocketWrapper(_socket._socketobject): class _SocketWrapper(object): """ A Bluetooth socket object has the same interface as a socket object from the Python standard library <socket> module. It also uses the same exceptions, raising socket.error for general errors and socket.timeout for timeout errors. Note that L2CAP sockets are not available on Python For Series 60, and only L2CAP client sockets are supported on Mac OS X and Linux. A simple client socket example: >>> from lightblue import * >>> s = socket() # or socket(L2CAP) to create an L2CAP socket >>> s.connect(("00:12:2c:45:8a:7b", 5)) >>> s.send("hello") 5 >>> s.close() A simple server socket example: >>> from lightblue import * >>> s = socket() >>> s.bind(("", 0)) >>> s.listen(1) >>> advertise("My RFCOMM Service", s, RFCOMM) >>> conn, addr = s.accept() >>> print "Connected by", addr Connected by ('00:0D:93:19:C8:68', 5) >>> conn.recv(1024) "hello" >>> conn.close() >>> s.close() """ def __init__(self, sock): self._sock = sock def accept(self): sock, addr = self._sock.accept() return _SocketWrapper(sock), addr accept.__doc__ = _lightbluecommon._socketdocs["accept"] def dup(self): return _SocketWrapper(self._sock) dup.__doc__ = _lightbluecommon._socketdocs["dup"] def close(self): self._sock.close() self._sock = _closedsocket() self.send = self.recv = self.sendto = self.recvfrom = self._sock._dummy try: import lightblue lightblue.stopadvertise(self) except: pass close.__doc__ = _lightbluecommon._socketdocs["close"] def makefile(self, mode='r', bufsize=-1): # use std lib socket's _fileobject return _socket._fileobject(self._sock, mode, bufsize) makefile.__doc__ = _lightbluecommon._socketdocs["makefile"] # delegate all other method calls to internal sock obj def __getattr__(self, attr): return getattr(self._sock, attr) # internal _sock object for RFCOMM and L2CAP sockets class _BluetoothSocket(object): _boundports = { _lightbluecommon.L2CAP: set(), _lightbluecommon.RFCOMM: set() } # conn is the associated _RFCOMMConnection or _L2CAPConnection def __init__(self, conn): self.__conn = conn if conn is not None and conn.channel is not None: self.__remotedevice = conn.channel.getDevice()
jay-lau/magnum
magnum/conductor/mesos_monitor.py
Python
apache-2.0
2,587
0
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from magnum.common import urlfetch from magnum.conductor.monitors import MonitorBase class MesosMonitor(MonitorBase): def __init__(self, context, bay): super(MesosMonitor, self).__init__(context, bay) self.data = {} @property def metrics_spec(self): return { 'memory_util': { 'unit': '%', 'func': 'compute_memory_util', }, 'cpu_util': { 'unit': '%', 'func': 'compute_cpu_util', }, } def _build_url(self, url, protocol='http', port='80', path='/'): return protocol + '://' + url + ':' + port + path def _is_leader(self, state): return state['leader'
] == state['pid'] def pull_data(self): self.data['mem_total'] = 0 self.data['mem_used'] = 0 self.data['cpu_total'] = 0 self.data['cpu_used'] = 0 for master_addr in self.bay.master_addresses: mesos_master_url = self._build_url(master_addr, port='5050', path='/state') master
= jsonutils.loads(urlfetch.get(mesos_master_url)) if self._is_leader(master): for slave in master['slaves']: self.data['mem_total'] += slave['resources']['mem'] self.data['mem_used'] += slave['used_resources']['mem'] self.data['cpu_total'] += slave['resources']['cpus'] self.data['cpu_used'] += slave['used_resources']['cpus'] break def compute_memory_util(self): if self.data['mem_total'] == 0 or self.data['mem_used'] == 0: return 0 else: return self.data['mem_used'] * 100 / self.data['mem_total'] def compute_cpu_util(self): if self.data['cpu_used'] == 0: return 0 else: return self.data['cpu_used'] * 100 / self.data['cpu_total']
craws/OpenAtlas-Python
tests/test_search.py
Python
gpl-2.0
1,697
0
from flask import url_for from openatlas import app from openatlas.models.entity import Entity from tests.base import TestBaseCase class SearchTest(TestBaseCase): def test_search(self) -> None: with app.test_request_context(): app.preprocess_request() # type: ignore person = Entity.insert('person', 'Waldo') person.begin_to = '2018-01-01' person.update() person.link( 'P131', Entity.insert('actor_appellation', 'Waldo alias')) object_ = Entity.insert('place', 'Waldorf') object_.link('P1', Entity.insert('appellation', 'Waldorf alias')) Entity.insert('person', 'Waldo without date') with app.app_context(): # type: ignore self.app.post(url_for('search_index'), data={'global-term': ''}) rv = self.app.post( url_for('search_index'), data={ 'global-term': 'wal', 'include_dateless': True, 'begin_year': -100, 'end_year': 3000})
assert b'Waldo' in rv.data rv = self.app.post( url_for('search_index'), data={'term': 'wal', 'own': True}) assert b'Waldo' not in rv.data data = {'term': 'do', 'classes': 'person'} rv = self.app.post(url_for('search_index'), d
ata=data) assert b'Waldo' in rv.data rv = self.app.post( url_for('search_index'), follow_redirects=True, data={'term': 'x', 'begin_year': 2, 'end_year': -1}) assert b'cannot start after' in rv.data
usta/radmyarchive-py
setup.py
Python
bsd-3-clause
1,376
0.000728
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from setuptools import setup, find_packages from codecs import open from os import path import re import ast here = path.abspath(path.dirname(__file__)) _version_re = re.compile(r'__version__\s+=\s+(.*)') with open('radmyarchive/__init__.py', 'rb') as vf: version = str(ast.literal_eval(_version_re.search( vf.read().decode('utf-8')).group(1))) with open(path.join(here, 'README.rst'), encoding='utf-8') as f: readme_file = f.read() setup( name="radmyarchive", version=version, author="Ömer Fadıl Usta", author_email="omerusta@gmail.c
om", packages=find_packages(), scripts=["scripts/RADMYARCHIVE.py"], url="https://github.com/usta/radmyarchive-py", license="BSD", keywords="exif image photo rename metadata arrange rearrange catalogue", description="A simple photo rearranger with help of
EXIF tags", install_requires=['exifread', 'termcolor', 'colorama'], long_description=readme_file, classifiers=( "Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.4", "Topic :: Utilities", ), )
marioluigiman/geekchicken
cafe/models.py
Python
mit
960
0.015625
from django.db import models from django.utils import timezone # Create your models here. class Comment(models.Mode
l): title = models.CharField(max_length=200) comment_text = models.TextField() rating = models.IntegerField() created_date = models.DateTimeField(default = timezone.now) published_date = models.DateTimeField(blank=True, null= True) def publish(self): self.published_date = timezone.now(
) self.save() def __str__(self): return self.title class Reservation(models.Model): name = models.CharField(max_length=200) people_amount = models.IntegerField(default = 1) time = models.TimeField(default = timezone.now) created_date = models.DateTimeField(default = timezone.now) published_date = models.DateTimeField(blank=True,null = True) def publish(self): self.published_date = timezone.now() self.save() def __str__ (self): return self.name
tgracchus/spinnaker
pylib/spinnaker/reconfigure_spinnaker.py
Python
apache-2.0
891
0.003367
#!/usr/bin/python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from configurator import Configura
tor if __name__ == '__main__': try: configurator = Configurator() configurator.update
_deck_settings() except (RuntimeError, IOError, ValueError) as e: sys.stderr.write(str(e) + '\n') sys.exit(-1)
simplegeo/sqlalchemy
test/aaa_profiling/test_zoomark_orm.py
Python
mit
13,694
0.002629
"""Benchmark for SQLAlchemy. An adaptation of Robert Brewers' ZooMark speed tests. """ import datetime import sys import time from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.test import * ITERATIONS = 1 dbapi_session = engines.ReplayableSession() metadata = None class ZooMarkTest(TestBase): """Runs the ZooMark and squawks if method counts vary from the norm. Each test has an associated `call_range`, the total number of accepted function calls made during the test. The count can vary between Python 2.4 and 2.5. Unlike a unit test, this is a ordered collection of steps. Running components individually will fail. """ __only_on__ = 'postgresql+psycopg2' __skip_if__ = lambda : sys.version_info < (2, 5), # TODO: get 2.4 # support def test_baseline_0_setup(self): global metadata, session creator = testing.db.pool._creator recorder = lambda : dbapi_session.recorder(creator()) engine = engines.testing_engine(options={'creator': recorder}) metadata = MetaData(engine) session = sessionmaker()() engine.connect() def test_baseline_1_create_tables(self): zoo = Table( 'Zoo', metadata, Column('ID', Integer, Sequence('zoo_id_seq'), primary_key=True, index=True), Column('Name', Unicode(255)), Column('Founded', Date), Column('Opens', Time), Column('LastEscape', DateTime), Column('Admission', Float), ) animal = Table( 'Animal', metadata, Column('ID', Integer, Sequence('animal_id_seq'), primary_key=True), Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True), Column('Name', Unicode(100)), Column('Species', Unicode(100)), Column('Legs', Integer, default=4), Column('LastEscape', DateTime), Column('Lifespan', Float(4)), Column('MotherID', Integer, ForeignKey('Animal.ID')), Column('PreferredFoodID', Integer), Column('AlternateFoodID', Integer), ) metadata.create_all() global Zoo, Animal class Zoo(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) class Animal(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) mapper(Zoo, zoo) mapper(Animal, animal) def test_baseline_1a_populate(self): wap = Zoo(Name=u'Wild Animal Park', Founded=datetime.date(2000, 1, 1), Opens=datetime.time(8, 15, 59), LastEscape=datetime.datetime( 2004, 7, 29, 5, 6, 7, ), Admission=4.95) session.add(wap) sdz = Zoo(Name=u'San Diego Zoo', Founded=datetime.date(1835, 9, 13), Opens=datetime.time(9, 0, 0), Admission=0) session.add(sdz) bio = Zoo(Name=u'Montr\xe9al Biod\xf4me', Founded=datetime.date(1992, 6, 19), Opens=datetime.time(9, 0, 0), Admission=11.75) session.add(bio) seaworld = Zoo(Name=u'Sea_World', Admission=60) session.add(seaworld) # Let's add a crazy futuristic Zoo to test large date values. lp = Zoo(Name=u'Luna Park', Founded=datetime.date(2072, 7, 17), Opens=datetime.time(0, 0, 0), Admission=134.95) session.add(lp) session.flush() # Animals leopard = Animal(Species=u'Leopard', Lifespan=73.5) session.add(leopard) leopard.ZooID = wap.ID leopard.LastEscape = \ datetime.datetime(2004, 12, 21, 8, 15, 0, 999907, ) session.add(Animal(Species=u'Lion', ZooID=wap.ID)) session.add(Animal(Species=u'Slug', Legs=1, Lifespan=.75)) session.add(Animal(Species=u'Tiger', ZooID=sdz.ID)) # Override Legs.default with itself just to make sure it works. session.add(Animal(Species=u'Bear', Legs=4)) session.add(Animal(Species=u'Ostrich', Legs=2, Lifespan=103.2)) session.add(Animal(Species=u'Centipede', Legs=100)) session.add(Animal(Species=u'Emperor Penguin', Legs=2, ZooID=seaworld.ID)) session.add(Animal(Species=u'Adelie Penguin', Legs=2, ZooID=seaworld.ID)) session.add(Animal(Species=u'Millipede', Legs=1000000, ZooID=sdz.ID)) # Add a mother and child to test relationships bai_yun = Animal(Species=u'Ape', Nameu=u
'Bai Yun', Legs=2) session.add(bai_yun) session.add(Animal(Species=u'Ape', Name=u'Hua Mei', Legs=2, MotherID=bai_yun.ID)) session.flush() sessio
n.commit() def test_baseline_2_insert(self): for x in xrange(ITERATIONS): session.add(Animal(Species=u'Tick', Name=u'Tick %d' % x, Legs=8)) session.flush() def test_baseline_3_properties(self): for x in xrange(ITERATIONS): # Zoos WAP = list(session.query(Zoo).filter(Zoo.Name == u'Wild Animal Park')) SDZ = list(session.query(Zoo).filter(Zoo.Founded == datetime.date(1835, 9, 13))) Biodome = list(session.query(Zoo).filter(Zoo.Name == u'Montr\xe9al Biod\xf4me')) seaworld = list(session.query(Zoo).filter(Zoo.Admission == float(60))) # Animals leopard = list(session.query(Animal).filter(Animal.Species == u'Leopard')) ostrich = list(session.query(Animal).filter(Animal.Species == u'Ostrich')) millipede = list(session.query(Animal).filter(Animal.Legs == 1000000)) ticks = list(session.query(Animal).filter(Animal.Species == u'Tick')) def test_baseline_4_expressions(self): for x in xrange(ITERATIONS): assert len(list(session.query(Zoo))) == 5 assert len(list(session.query(Animal))) == ITERATIONS + 12 assert len(list(session.query(Animal).filter(Animal.Legs == 4))) == 4 assert len(list(session.query(Animal).filter(Animal.Legs == 2))) == 5 assert len(list(session.query(Animal).filter(and_(Animal.Legs >= 2, Animal.Legs < 20)))) == ITERATIONS + 9 assert len(list(session.query(Animal).filter(Animal.Legs > 10))) == 2 assert len(list(session.query(Animal).filter(Animal.Lifespan > 70))) == 2 assert len(list(session.query(Animal). filter(Animal.Species.like(u'L%')))) == 2 assert len(list(session.query(Animal). filter(Animal.Species.like(u'%pede')))) == 2 assert len(list(session.query(Animal).filter(Animal.LastEscape != None))) == 1 assert len(list(session.query(Animal).filter(Animal.LastEscape == None))) == ITERATIONS + 11 # In operator (containedby) assert len(list(session.query(Animal).filter( Animal.Species.like(u'%pede%')))) == 2 assert len(list(session.query(Animal). filter(Animal.Species.in_((u'Lion' , u'Tiger', u'Bear'))))) == 3 # Try In with cell references class thing(object): pass pet, pet2 = thing(), thing() pet.Name, pet2.Name = u'Slug', u'Ostrich' assert len(list(session.query(Animal). filter(Animal.Species.in_((pet.Name, pet2.Name))))) == 2 # lo
kit-cel/gr-dab
python/channel_tests/ber.py
Python
gpl-3.0
427
0.04918
def bits_set(x): bits = 0 for i in range(0,8): if (x & (1<<i))>0: bits += 1 return bits def find_ber(sent, received): assert(len(received)<=len(sent)) if len(received) < len(sent)/2: print "frame detection error, more than half of the frames were lost!" return 0.5 er
rors = 0 for i in range(0,len(received)): errors +
= bits_set(sent[i] ^ received[i]) # ^ is xor return float(errors)/float(8*len(received))
DHI-GRAS/processing_SWAT
MDWF_Sensan_a.py
Python
gpl-3.0
7,579
0.006333
""" *************************************************************************** MDWF_Sensan_a.py ------------------------------------- Copyright (C) 2014 TIGER-NET (www.tiger-net.org) *************************************************************************** * This plugin is part of the Water Observation Information System (WOIS) * * developed under the TIGER-NET project funded by the European Space * * Agency as part of the long-term TIGER initiative aiming at promoting * * the use of Earth Observation (EO) for improved Integrated Water * * Resources Management (IWRM) in Africa. * * * * WOIS is a free software i.e. you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published * * by the Free Software Foundation, either versio
n 3 of the License, * * or (at your option) any later version. * * * * WOIS is distributed in the hope that it will be useful, but WITHOUT ANY * * WARRANTY; without even the implied warranty of MERCHANTABILITY or * * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * * for more details.
* * * * You should have received a copy of the GNU General Public License along * * with this program. If not, see <http://www.gnu.org/licenses/>. * *************************************************************************** """ import os from PyQt4 import QtGui from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException from processing.core.parameters import * from SWATAlgorithm import SWATAlgorithm from SWAT_SENSAN_specs import SWAT_SENSAN_specs SENSAN_specs = SWAT_SENSAN_specs() class MDWF_Sensan_a(SWATAlgorithm): SRC_FOLDER = "SRC_FOLDER" PAR_SRC = "PAR_SRC" PST_FILE = "PST_FILE" PAR_FILE = "PAR_FILE" PCT_DEV = "PCT_DEV" def __init__(self): super(MDWF_Sensan_a, self).__init__(__file__) def defineCharacteristics(self): self.name = "5.3 - Sensitivity analysis and calibration of SWAT model with PEST (MDWF) - generate parameter variation file" self.group = "Model development workflow (MDWF)" self.addParameter(ParameterFile(MDWF_Sensan_a.SRC_FOLDER, "Select model source folder", True)) self.addParameter(ParameterSelection(MDWF_Sensan_a.PAR_SRC, "Select source for parameter variation", ['Initial parameter values in PEST control file (.pst)','Initial parameter values defined when creating template files (.pbf)','Optimal parameter values output file from PEST (.par)'], False)) self.addParameter(ParameterFile(MDWF_Sensan_a.PST_FILE, "Select PEST control file", False)) self.addParameter(ParameterFile(MDWF_Sensan_a.PAR_FILE, "Select PEST output parameter file", False)) self.addParameter(ParameterNumber(MDWF_Sensan_a.PCT_DEV, "Percent deviation in parameter values")) def processAlgorithm(self, progress): SRC_FOLDER = self.getParameterValue(MDWF_Sensan_a.SRC_FOLDER) PAR_SRC = self.getParameterValue(MDWF_Sensan_a.PAR_SRC) PST_FILE = self.getParameterValue(MDWF_Sensan_a.PST_FILE) PAR_FILE = self.getParameterValue(MDWF_Sensan_a.PAR_FILE) PCT_DEV = self.getParameterValue(MDWF_Sensan_a.PCT_DEV) pct_devfile = open(SRC_FOLDER + os.sep + 'pct_dev.dat','w') pct_devfile.writelines(str(PCT_DEV)+'\r\n') pct_devfile.close() pvfilename = SRC_FOLDER + os.sep + SENSAN_specs.VARFLE pvfile = open(pvfilename,'w') PARNAME = [] PARVAL1 = [] PARLBND = [] PARUBND = [] # Find number of parameters and prepare the parameter variation block if PAR_SRC == 0: if os.path.isfile(PST_FILE): pst_lines = open(PST_FILE,'r').readlines() no_par = int(pst_lines[3].split()[0]) no_obsgr = int(pst_lines[3].split()[2]) for i in range(no_obsgr+12,no_obsgr+12+no_par): PARNAME.append(pst_lines[i].split()[0]) PARVAL1.append(pst_lines[i].split()[3]) PARLBND.append(pst_lines[i].split()[4]) PARUBND.append(pst_lines[i].split()[5]) else: raise GeoAlgorithmExecutionException('File ' + PST_FILE + ' does not exist. Please chose another source for parameter variation.') elif PAR_SRC == 1: filelist = os.listdir(SRC_FOLDER) for f in filelist: if '.pbf' in f: PARNAME.append(open(SRC_FOLDER + os.sep +f,'r').readlines()[0].split()[0]) PARVAL1.append(open(SRC_FOLDER + os.sep +f,'r').readlines()[0].split()[3]) PARLBND.append(open(SRC_FOLDER + os.sep +f,'r').readlines()[0].split()[4]) PARUBND.append(open(SRC_FOLDER + os.sep +f,'r').readlines()[0].split()[5]) elif PAR_SRC == 2: if os.path.isfile(PAR_FILE): par_lines = open(PAR_FILE,'r').readlines() no_par = len(par_lines)-1 for i in range(1,no_par+1): PARNAME.append(par_lines[i].split()[0]) PARVAL1.append(par_lines[i].split()[1]) pst_lines = open(PST_FILE,'r').readlines() no_par = int(pst_lines[3].split()[0]) no_obsgr = int(pst_lines[3].split()[2]) for i in range(no_obsgr+12,no_obsgr+12+no_par): PARLBND.append(pst_lines[i].split()[4]) PARUBND.append(pst_lines[i].split()[5]) else: raise GeoAlgorithmExecutionException('File ' + PAR_FILE + ' does not exist. Please chose another source for parameter variation.') # Write header and baseline parameter set for i in range(0,len(PARNAME)): pvfile.writelines(PARNAME[i] + '\t') pvfile.writelines('\r\n') for i in range(0,len(PARVAL1)): pvfile.writelines(PARVAL1[i] + '\t') pvfile.writelines('\r\n') # Write parameter sets having one parameter deviate from the baseline parameter set for j in range(0,len(PARVAL1)): for i in range(0,len(PARVAL1)): if j == i: if (float(PARVAL1[i]) * (1+PCT_DEV/100.) >= float(PARLBND[i])) & (float(PARVAL1[i]) * (1+PCT_DEV/100.) <= float(PARUBND[i])): pvfile.writelines(str(float(PARVAL1[i]) * (1+PCT_DEV/100.)) + '\t') elif (float(PARVAL1[i]) * (1+PCT_DEV/100.) > float(PARUBND[i])): raise GeoAlgorithmExecutionException(PARNAME[i] + ' exceeds upper boundary with a deviation of '+ str(PCT_DEV)+' %.') else: raise GeoAlgorithmExecutionException(PARNAME[i] + ' is smaller than lower boundary with a deviation of '+ str(PCT_DEV)+' %.') else: pvfile.writelines(PARVAL1[i] + '\t') pvfile.writelines('\r\n') pvfile.close() def getIcon(self): return QtGui.QIcon(os.path.dirname(__file__) + "/images/tigerNET.png")
apple/swift-lldb
packages/Python/lldbsuite/test/functionalities/history/TestHistoryRecall.py
Python
apache-2.0
1,405
0.00427
""" Make sure the !N and !-N commands work properly. """ from __future__ import print_function import lldb import lldbsuite.test.lldbutil as lldbutil from lldbsuite.test.lldbtest import * class TestHistoryRecall(TestBase): mydir = TestBase.compute_mydir(__file__) # If your test case doesn't stress debug info, the # set this to true. That way it won't be run once for # each debug info format. NO_DEBUG_INFO_TESTCASE = True def test_history_recall(self): """Test the !N and !-N functionality of the command interpreter.""" self.sample_test() def setUp(self): # Call super's setUp(). TestBase.setUp(self) def sample_test(self): interp = self.dbg.GetCommandInterpreter() result = lldb.SBCommandReturnObject() interp.Handle
Command("command history", result, True) interp.HandleCommand("platform list", result, True
) interp.HandleCommand("!0", result, False) self.assertTrue(result.Succeeded(), "!0 command did not work: %s"%(result.GetError())) self.assertTrue("command history" in result.GetOutput(), "!0 didn't rerun command history") interp.HandleCommand("!-1", result, False) self.assertTrue(result.Succeeded(), "!-1 command did not work: %s"%(result.GetError())) self.assertTrue("host:" in result.GetOutput(), "!-1 didn't rerun platform list.")
iandees/all-the-places
locations/spiders/hm.py
Python
mit
3,295
0.003642
import scrapy from locations.items import GeojsonPointItem import itertools def chunks(l, n): for i in range(0, len(l), n): yield l[i:i + n] def partition(l, n): return list(chunks(l, n)) def process_hours(opening_hours): ret_hours = [] for hours_str in opening_hours: split_hours = hours_str.replace(",", "").replace("AM AM","").replace("PM PM", "").split(" ") if split_hours[1] == "-": range_start = split_hours[0] range_end = split_hours[2] times = partition([x for x in split_hours[3:] if x != "-"], 2) else: range_start, range_end = split_hours[0], None times = partition([x for x in split_hours[1:] if x != "-"], 2) periods = partition(times, 2) periods = [list(itertools.chain
(*r)) for r in periods] period_list = [] for start, start_period, end, end_period in periods: start_hour, start_minutes = [int(x) for x in start.split(":")] end_hour, end_minutes = [int(x) for x in end.split(":")] if start_period == "PM": start_hour += 12 end_hour += 12 hours = (start_hour, start_minutes, end_hour, end_minutes) period_list.
append("%02d:%02d-%02d:%02d" % hours) periods_str = ", ".join(period_list) if range_start and range_end: ret_hours.append("{}-{} {}".format(range_start[:2], range_end[:2], periods_str)) elif range_start: ret_hours.append("{} {}".format(range_start[:2], periods_str)) return "; ".join(ret_hours) class HMSpider(scrapy.Spider): name = "hm-worldwide" all_stores_uri = 'https://hm.storelocator.hm.com/rest/storelocator/stores/1.0/locale/en_US/country/{}/' start_urls = ["http://www.hm.com/entrance.ahtml"] def parse(self, response): country_urls = response.css(".column li a::attr('href')").extract() country_codes = {x.split("=")[1].split("&")[0].upper() for x in country_urls} for country_code in country_codes: yield scrapy.Request(url=self.all_stores_uri.format(country_code), callback=self.parse_country) def parse_country(self, response): stores = response.css("storeComplete") for store in stores: point = { "lat": store.xpath("latitude/text()").extract_first(), "lon": store.xpath("longitude/text()").extract_first(), "name": store.xpath("name/text()").extract_first(), "addr_full": store.xpath("address/addressLine/text()").extract_first(), "city": store.xpath("city/text()").extract_first(), "country": store.xpath("country/text()").extract_first(), "phone": store.xpath("phone/text()").extract_first(), "opening_hours": process_hours(store.xpath("openingHours/openingHour/text()").extract()), "ref": store.xpath("storeId/text()").extract_first() } if "/country/US" in response.url: point["state"] = store.xpath("region/name/text()").extract_first() point["postcode"] = store.xpath("address/addressLine/text()").extract()[-1].split(" ")[-1] yield GeojsonPointItem(**point)
wotaen/itunes_podcast_rss
extract.py
Python
mit
1,768
0.005656
import json import re import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3 import Retry URL_TEMPLATE = "https://itunes.apple.com/loo
kup?id=%s&entity=podcast" def id_from_url(url): """ Extract ID from iTunes podcast URL :param url (str) :return: (str) """ matches = re.findall(r'\/id([0-9]+)',url) if len(matches) == 0: raise LookupError("No ID present in the given URL") if len(matches) > 1: raise LookupError("More than one ID present in the URL, cannot decide which one to take") return matches[0] def
lookup_id(id): """ Looks up podcast ID in Itunes lookup service https://itunes.apple.com/lookup?id=<id>&entity=podcast :param id (str): :return: itunes response for the lookup as dict """ try: retries = Retry(total=3, backoff_factor=0.1, status_forcelist=[ 500, 502, 503, 504 ]) s = requests.Session() s.mount('https://', HTTPAdapter(max_retries=retries)) response = s.get(URL_TEMPLATE % id) content = json.loads(response.content.decode('utf-8')) except: raise return content def feed_url(itunes_lookup_response): """ Returns feed URL from the itunes lookup response :param itunes_lookup_response: :return: str """ if len(itunes_lookup_response.get('results')) == 0: raise LookupError("iTunes response has no results") url = itunes_lookup_response.get('results')[0].get('feedUrl') if url is None: raise LookupError("feedUrl field is not present in response") return url def extract_feed_url(url): id = id_from_url(url) response = lookup_id(id) url = feed_url(response) return url
TheBestHuman/DesktopSudokuGenerator
lib/gnome_sudoku.py
Python
gpl-3.0
38,776
0.013642
import gtk, gobject, gtk.glade import gnome, gnome.ui, pango import os, os.path from gtk_goodies import gconf_wrapper, Undo, dialog_extras, image_extras import gsudoku, sudoku, saver, sudoku_maker, printing, sudoku_generator_gui import game_selector import time, threading from gettext import gettext as _ from gettext import ngettext from defaults import * from timer import ActiveTimer from simple_debug import simple_debug,options from dialog_swallower import SwappableArea icon_factory = gtk.IconFactory() STOCK_PIXBUFS = {} for filename,stock_id in [('footprints.png','tracks'),]: pb = gtk.gdk.pixbuf_new_from_file(os.path.join(IMAGE_DIR,filename)) STOCK_PIXBUFS[stock_id]=pb iconset = gtk.IconSet(pb) icon_factory.add(stock_id,iconset) icon_factory.add_default() gtk.stock_add([('tracks', _('Track moves'), 0,0,""),]) try: STOCK_FULLSCREEN = gtk.STOCK_FULLSCREEN except: STOCK_FULLSCREEN = _('Full Screen') class UI (gconf_wrapper.GConfWrapper): ui='''<ui> <menubar name="MenuBar"> <menu name="File" action="File"> <menuitem action="New"/> <menuitem action="Open"/> <menuitem action="ByHand"/> <separator/> <menuitem action="Print"/> <menuitem action="PrintMany"/> <separator/> <!--<menuitem action="Save"/>--> <separator/> <menuitem action="Generator"/> <menuitem action="BackgroundGenerator"/> <separator/> <menuitem action="Close"/> <!--<menuitem action="Quit"/>--> </menu> <menu action="Edit"> <menuitem action="Undo"/> <menuitem action="Redo"/> <separator/> <menuitem action="Clear"/> <menuitem action="ClearNotes"/> </menu> <menu action="View"> <menuitem action="FullScreen"/> <separator/> <menuitem action="ToggleToolbar"/> <menuitem action="ToggleBackground"/> <menuitem action="ToggleHighlight"/> </menu> <menu action="Game"> <menuitem action="ShowPossible"/> <menuitem action="AutofillCurrentSquare"/> <menuitem action="Autofill"/> <separator/> <menuitem action="AlwaysShowPossible"/> <menuitem action="ShowImpossibleImplications"/> <separator/> <menuitem action="Tracker"/> <separator/> <menuitem action="PuzzleInfo"/> <separator/> <menuitem action="HighScores"/> </menu> <menu action="Help"> <menuitem action="About"/> <menuitem action="ShowHelp"/> </menu> </menubar> <toolbar name="Toolbar"> <!--<toolitem action="Quit"/>--> <toolitem action="New"/> <!--<toolitem action="O
pen"/>
--> <!--<toolitem action="Print"/>--> <!--<toolitem action="Save"/>--> <separator/> <toolitem action="Clear"/> <toolitem action="ClearNotes"/> <!--<separator/> <toolitem action="Undo"/> <toolitem action="Redo"/>--> <separator/> <toolitem action="ShowPossible"/> <!--<toolitem action="AlwaysShowPossible"/>--> <toolitem action="AutofillCurrentSquare"/> <separator/> <toolitem action="ToggleHighlight"/> <!--<toolitem action="AlwaysShowPossible"/>--> <toolitem action="Tracker"/> </toolbar> </ui>''' initial_prefs = {'group_size':9, 'font_zoom':0, 'zoom_on_resize':1, 'always_show_hints':0, 'player':os.environ.get('USERNAME',''), 'difficulty':0.0, 'minimum_font_size':pango.SCALE * 7, # minimum font-size 'minimum_number_of_new_puzzles':MIN_NEW_PUZZLES, 'bg_black':1, 'bg_custom_color':'', #'show_notes':0 } @simple_debug def __init__ (self): self.w = gtk.Window() self.w.set_default_size(700,675) self.timer = ActiveTimer(self.w) self.won = False gconf_wrapper.GConfWrapper.__init__(self, gconf_wrapper.GConf('gnome-sudoku') ) self.initialize_prefs() self.player = self.gconf['player'] self.cleared = [] # used for Undo memory self.cleared_notes = [] # used for Undo memory gnome.program_init('gnome-sudoku',VERSION, properties={gnome.PARAM_APP_DATADIR:APP_DATA_DIR} ) self.w.connect('delete-event',self.quit_cb) self.vb = gtk.VBox() self.uimanager = gtk.UIManager() if self.gconf['bg_custom_color']: bgcol = self.gconf['bg_custom_color'] elif self.gconf['bg_black']: bgcol = 'black' else: bgcol = None self.gsd = gsudoku.SudokuGameDisplay() if bgcol: self.gsd.set_bg_color(bgcol) self.gsd.connect('puzzle-finished',self.you_win_callback) self.main_actions = gtk.ActionGroup('MainActions') self.main_actions.add_actions([ ('File',None,'_File'), ('New',gtk.STOCK_NEW,None, '<Control>n',_('New game'),self.new_cb), ('Print',gtk.STOCK_PRINT,None, None,_('Print current game'),self.print_game), ('PrintMany',gtk.STOCK_PRINT,_('Print _Multiple Sudokus'), None,_('Print more than one sudoku at a time.'),self.print_multiple_games), #('Quit',gtk.STOCK_QUIT,None,'<Control>q', # 'Quit Sudoku game',self.quit_cb), ('Close',gtk.STOCK_CLOSE,None,'<Control>w', _('Close Sudoku (save game for later)'),self.quit_cb), #('Save',gtk.STOCK_SAVE,_('_Save'), # '<Control>s','Save game to play later.', # self.save_game), ('ByHand',gtk.STOCK_EDIT,_('_Enter custom game'), None,_('Enter new puzzle by hand (use this to copy a puzzle from another source).'), self.enter_game_by_hand), ('Open',gtk.STOCK_OPEN,_('_Resume old game'), '<Control>r',_('Resume a previous saved game.'), self.open_game), ('Game',None,_('_Game')), ('View',None,_('_View')), ('ShowPossible',gtk.STOCK_HELP,_('_Hint'), '<Control>i', _('Show which numbers could go in the current square.'), self.show_hint_cb), ('AutofillCurrentSquare',gtk.STOCK_APPLY,_('_Fill'),'<Control>f', _('Automatically fill in the current square if possible.'), self.auto_fill_current_square_cb), ('Autofill',gtk.STOCK_REFRESH,_('Fill _all squares'),'<Control>a', _('Automatically fill in all squares for which there is only one valid value.'), self.auto_fill_cb), #('ZoomIn',gtk.STOCK_ZOOM_IN,'_Increase size', # '<Control>plus','Increase the size of numbers and squares', # self.zoom_in_cb), #('ZoomOut',gtk.STOCK_ZOOM_OUT,'_Decrease size', # '<Control>minus','Decrease the size of numbers and squares.', # self.zoom_out_cb), ('FullScreen',STOCK_FULLSCREEN,None, 'F11',None,self.full_screen_cb), ('PuzzleInfo',gtk.STOCK_ABOUT,_('Puzzle _Statistics'), None,_('Show statistics about current puzzle'), self.show_info_cb), ('Help',None,_('_Help'), None,None,None), ('About',gtk.STOCK_ABOUT,None, None,None,self.show_about), ('ShowHelp',gtk.STOCK_HELP,None, None,None,self.show_help), ('HighScores',None,_('High _Scores'), None,_('Show high scores or replay old games.'), self.show_high_scores_cb), ]) self.main_actions.add_toggle_actions([ ('AlwaysShowPossible', None, _('_Always show hint'), None, _('A
nullpuppy/vgng
vgng.py
Python
mit
2,102
0.00333
#!/usr/bin/env python # # Translation of videogamena.me javascript to python # # http://videogamena.me/vgng.js # http://videogamena.me/video_game_names.txt # # (C) 2014 Dustin Knie <dustin@nulldomain.com> import argparse import os import random from math import floor, trunc _word_list_file = 'video_game_names.txt' _word_list = [] def _build_list(word_list=_word_list_file): try: f = open(word_list, 'r') words = [] for line in f: line = line.strip('\n') if line == "----": _word_list.append(words) words = [] else: words.append(line) _word_list.append(words) except IOError as e: print("Error opening {}: {}".format(word_list, e)) exit(1) def _get_word(word_list, words=[], bad_match_list=[], allow_similar_matches=False): bad_word = True while bad_word: word = word_list[trunc(floor(random.random() * len(word_list)))] if '^' in word: if not allow_similar_matches: bad_match_list += word.split('^')[1].split('|') word = word.split('^')[0] if word in words or word in bad_match_list: continue bad_word = False words.append(wo
rd) return (words, bad_match_list) def generate_game_name(allow_similar_matches=False): words = [] bad_match_list = [] for word_list in _word_list: (words, bad_match_list) = _get_word(word_list,
words=words, bad_match_list=bad_match_list, allow_similar_matches=allow_similar_matches) return ' '.join(words) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('count', type=int, nargs='?', help='Number of names to create') parser.add_argument('-l', '--list', action='store', help='Word list to use for generating names.') args = parser.parse_args() _build_list(word_list=args.list if args.list else _word_list_file) for i in range(args.count if args.count else 1): print(generate_game_name())
hahnicity/bamboo
bamboo/globals.py
Python
mit
159
0
""" bamboo.globals ~~~~~~~~~~~~~ """ from peak.util.proxies i
mport CallbackProxy from bamboo.context import context db = CallbackProxy(lambda: context["db"])
nwjs/chromium.src
tools/v8_context_snapshot/run.py
Python
bsd-3-clause
552
0.003623
# Copy
right 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style li
cense that can be # found in the LICENSE file. """This program wraps an arbitrary command since gn currently can only execute scripts.""" import os import subprocess import sys from shutil import copy2 args = sys.argv[1:] args[0] = os.path.abspath(args[0]) #if sys.platform == 'darwin': # copy2(os.path.join(os.path.dirname(args[0]), 'libffmpeg.dylib'), os.path.dirname(os.path.dirname(args[0]))) sys.exit(subprocess.call(args))
rajpushkar83/cloudmesh
cloudmesh/management/read.py
Python
apache-2.0
752
0.00133
import yaml from mongoe
ngine import * import datetime import time import hashlib import uuid from pprint import pprint from user import User, Users from cloudmesh_management.generate import random_user from cloudmesh_management.user import read_user FILENAME = "/tmp/user.yaml" connect('user', port=27777) users = Users() # Reads user information from file def main(): # user = random_user() # with op
en(FILENAME, "w") as f: # f.write(user.yaml()) print 70 * "=" user = User() user = read_user(FILENAME) print 70 * "=" pprint(user.json()) user.save() user.update(**{"set__username": "Hallo"}) user.save() print User.objects(username="Hallo") if __name__ == "__main__": main()
ESOedX/edx-platform
lms/djangoapps/dashboard/tests/test_sysadmin.py
Python
agpl-3.0
12,287
0.001139
""" Provide tests for sysadmin dashboard feature in sysadmin.py """ from __future__ import absolute_import import glob import os import re import shutil import unittest from datetime import datetime from uuid import uuid4 import mongoengine from django.conf import settings from django.test.client import Client from django.test.utils import override_settings from django.urls import reverse from opaque_keys.edx.locator import CourseLocator from pytz import UTC from six import text_type from six.moves import range from dashboard.git_import import GitImportErrorNoDir from dashboard.models import CourseImportLog from openedx.core.djangolib.markup import Text from student.roles import CourseStaffRole, GlobalStaff from student.tests.factories import UserFactory from util.date_utils import DEFAULT_DATE_TIME_FORMAT, get_time_display from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, SharedModuleStoreTestCase from xmodule.modulestore.tests.mongo_connection import MONGO_HOST, MONGO_PORT_NUM TEST_MONGODB_LOG = { 'host': MONGO_HOST, 'port': MONGO_PORT_NUM, 'user': '', 'password': '', 'db': 'test_xlog', } class SysadminBaseTestCase(SharedModuleStoreTestCase): """ Base class with common methods used in XML and Mongo tests """ TEST_REPO = 'https://github.com/edx/edx4edx_lite.git' TEST_BRANCH = 'testing_do_not_delete' TEST_BRANCH_COURSE = CourseLocator.from_string('course-v1:MITx+edx4edx_branch+edx4edx') MODULESTORE = TEST_DATA_SPLIT_MODULESTORE def setUp(self): """Setup test case by adding primary user.""" super(SysadminBaseTestCase, self).setUp() self.user = UserFactory.create(username='test_user', email='test_user+sysadmin@edx.org', password='foo') self.client = Client() def _setstaff_login(self): """Makes the test user staff and logs them in""" GlobalStaff().add_users(self.user) self.client.login(username=self.user.username, password='foo') def _add_edx4edx(self, branch=None): """Adds the edx4edx sample course""" post_dict = {'repo_location': self.TEST_REPO, 'action': 'add_course', } if branch: post_dict['repo_branch'] = branch return self.client.post(reverse('sysadmin_courses'), post_dict) def _rm_edx4edx(self): """Deletes the sample course from the XML store""" def_ms = modulestore() course_path = '{0}/edx4edx_lite'.format( os.path.abspath(settings.DATA_DIR)) try: # using XML store course = def_ms.courses.get(course_path, None) except AttributeError: # Using mongo store course = def_ms.get_course(CourseLocator('MITx', 'edx4edx', 'edx4edx')) # Delete git loaded course response = self.client.post( reverse('sysadmin_courses'), { 'course_id': text_type(course.id), 'action': 'del_course', } ) self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path)) return response def _rm_glob(self, path): """ Create a shell expansion of passed in parameter and iteratively remove them. Must only expand to
directories. """ for path in glob.glob(path): shutil.rmtree(path) def _mkdir(self, path): """ Create directory and add the cleanup for it. """ os.mkdir(path) self.addCleanup(shutil.rmtree, path) @override_settings( MONGODB_LOG=TEST_MONGODB_LOG, GIT_REPO_DIR=settings.TEST_ROOT / "course_repos_{}".format(uuid4().hex) ) @unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set") class TestSysAdminMongoCourseImport(SysadminBaseTestCase): """ Check that importing into the mongo module store works """ @classmethod def tearDownClass(cls): """Delete mongo log entries after test.""" super(TestSysAdminMongoCourseImport, cls).tearDownClass() try: mongoengine.connect(TEST_MONGODB_LOG['db']) CourseImportLog.objects.all().delete() except mongoengine.connection.ConnectionError: pass def _setstaff_login(self): """ Makes the test user staff and logs them in """ self.user.is_staff = True self.user.save() self.client.login(username=self.user.username, password='foo') def test_missing_repo_dir(self): """ Ensure that we handle a missing repo dir """ self._setstaff_login() if os.path.isdir(settings.GIT_REPO_DIR): shutil.rmtree(settings.GIT_REPO_DIR) # Create git loaded course response = self._add_edx4edx() self.assertIn(Text(text_type(GitImportErrorNoDir(settings.GIT_REPO_DIR))), response.content.decode('UTF-8')) def test_mongo_course_add_delete(self): """ This is the same as TestSysadmin.test_xml_course_add_delete, but it uses a mongo store """ self._setstaff_login() self._mkdir(settings.GIT_REPO_DIR) def_ms = modulestore() self.assertNotEqual('xml', def_ms.get_modulestore_type(None)) self._add_edx4edx() course = def_ms.get_course(CourseLocator('MITx', 'edx4edx', 'edx4edx')) self.assertIsNotNone(course) self._rm_edx4edx() course = def_ms.get_course(CourseLocator('MITx', 'edx4edx', 'edx4edx')) self.assertIsNone(course) def test_course_info(self): """ Check to make sure we are getting git info for courses """ # Regex of first 3 columns of course information table row for # test course loaded from git. Would not have sha1 if # git_info_for_course failed. table_re = re.compile(u""" <tr>\\s+ <td>edX\\sAuthor\\sCourse</td>\\s+ # expected test git course name <td>course-v1:MITx\\+edx4edx\\+edx4edx</td>\\s+ # expected test git course_id <td>[a-fA-F\\d]{40}</td> # git sha1 hash """, re.VERBOSE) self._setstaff_login() self._mkdir(settings.GIT_REPO_DIR) # Make sure we don't have any git hashes on the page response = self.client.get(reverse('sysadmin_courses')) self.assertNotRegexpMatches(response.content.decode('utf-8'), table_re) # Now add the course and make sure it does match response = self._add_edx4edx() self.assertRegexpMatches(response.content.decode('utf-8'), table_re) def test_gitlogs(self): """ Create a log entry and make sure it exists """ self._setstaff_login() self._mkdir(settings.GIT_REPO_DIR) self._add_edx4edx() response = self.client.get(reverse('gitlogs')) # Check that our earlier import has a log with a link to details self.assertIn('/gitlogs/course-v1:MITx+edx4edx+edx4edx', response.content.decode('utf-8')) response = self.client.get( reverse('gitlogs_detail', kwargs={ 'course_id': 'course-v1:MITx+edx4edx+edx4edx'})) self.assertIn('======&gt; IMPORTING course', response.content.decode('utf-8')) self._rm_edx4edx() def test_gitlog_date(self): """ Make sure the date is timezone-aware and being converted/formatted properly. """ tz_names = [ 'America/New_York', # UTC - 5 'Asia/Pyongyang', # UTC + 9 'Europe/London', # UTC 'Canada/Yukon', # UTC - 8 'Europe/Moscow', # UTC + 4 ] tz_format = DEFAULT_DATE_TIME_FORMAT self._setstaff_login() self._mkdir(settings.GIT_REPO_DIR) self._add_edx4edx() date = CourseImportLog.objects.first().created.replace(tzinfo=UTC) for timezone in tz_names: wi
drcsturm/project-euler
p026.py
Python
mit
1,084
0.021218
# A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given: # 1/2 = 0.5 # 1/3 = 0.(3) # 1/4 = 0.25 # 1/5 = 0.2 # 1/6 = 0.1(6) # 1/7 = 0.(142857) # 1/8 = 0.125 # 1/9 = 0.(1) # 1/10 = 0.1 # Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle. # Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part. from decimal import * getcontext().prec = 3000 max_len = 6 repeating = 6 repeating_num = 7 for n in range(6,1000): num = str(Decimal(1) /
Decimal(n))[2:] if len(num) <= max_len * 2:continue for j in range(10): breaker = False for i in range(max_len, int(len(num)/2)-j): # print(n, num[j:j+i], num[j+i:i*2+j]) if num[j:j+i] == num[j+i:i*2+j]: if len(num[j:i])
>= repeating: repeating = len(num[j:i]) repeating_num = n # print(n, num[j:i]) breaker = True break if breaker:break print(repeating_num)
aws-quickstart/taskcat
tests/test_common_utils.py
Python
apache-2.0
4,514
0.001108
import errno import os import unittest import mock from taskcat._common_utils import ( exit_with_code, fetch_ssm_parameter_value, get_s3_domain, make_dir, merge_dicts, name_from_stack_id, param_list_to_dict, pascal_to_snake, region_from_stack_id, s3_bucket_name_from_url, s3_key_from_url, s3_url_maker, ) from taskcat.exceptions import TaskCatException class TestCommonUtils(unittest.TestCase): def test_get_param_includes(self): bad_testcases = [{}, [[]], [{}]] for bad in bad_testcases: with self.assertRaises(TaskCatException): param_list_to_dict(bad) def test_region_from_stack_id(self): actual = region_from_stack_id("arn:::us-east-1") self.assertEqual("us-east-1", actual) def test_name_from_stack_id(self): actual = name_from_stack_id("arn:::us-east-1::Stack/test-name") self.assertEqual("test-name", actual) @mock.patch("taskcat._common_utils.get_s3_domain", return_value="amazonaws.com") def test_s3_url_maker(self, m_get_s3_domain): m_s3 = mock.Mock() m_s3.get_bucket_location.return_value = {"LocationConstraint": None} actual = s3_url_maker("test-bucket", "test-key/1", m_s3) self.assertEqual( "https://test-bucket.s3.us-east-1.amazonaws.com/test-key/1", actual ) m_s3.get_bucket_location.return_value = {"LocationConstraint": "us-west-2"} actual = s3_url_maker("test-bucket", "test-key/1", m_s3) self.assertEqual( "https://test-bucket.s3.us-west-2.amazonaws.com/test-key/1", actual ) m_get_s3_domain.assert_called_once() def test_get_s3_domain(self): actual = get_s3_domain("cn-north-1") self.assertEqual("amazonaws.com.cn", actual) with self.assertRaises(TaskCatException): get_s3_domain("totally-invalid-region") def test_merge_dicts(self): input = [{}, {}] actual = merge_dicts(input) self.assertEqual({}, actual) input = [{"a": 1}, {"b": 2}] actual = merge_dicts(input) self.assertEqual({"a": 1, "b": 2}, actual) def test_pascal_to_snake(self): actual = pascal_to_snake("MyParam") self.assertEqual("my_param", actual) actual = pascal_to_snake("VPCParam") self.assertEqual("vpcparam", actual) def test_make_dir(self): path = "/tmp/test_make_dir_path" try: os.r
mdir(path) except FileNotFoundError: pass os.makedirs(path) make_dir(path) os.rmdir(path) make_dir(path) self.assertEqual(os.path.isdir(path), True) with self.assertRaises(FileExistsError) as cm:
make_dir(path, False) self.assertEqual(cm.exception.errno, errno.EEXIST) os.rmdir(path) @mock.patch("taskcat._common_utils.sys.exit", autospec=True) @mock.patch("taskcat._common_utils.LOG", autospec=True) def test_exit_with_code(self, mock_log, mock_exit): exit_with_code(1) mock_log.error.assert_not_called() mock_exit.assert_called_once_with(1) mock_exit.reset_mock() exit_with_code(0, "msg") mock_exit.assert_called_once_with(0) mock_exit.assert_called_once() def test_s3_key_from_url(self): k = s3_key_from_url("https://testbuk.s3.amazonaws.com/testprefix/testobj.yaml") self.assertEqual("testprefix/testobj.yaml", k) def test_s3_bucket_name_from_url(self): bucket = s3_bucket_name_from_url("https://buk.s3.amazonaws.com/obj.yaml") self.assertEqual("buk", bucket) def test_fetch_ssm_parameter_value(self): # String, no explicit version. m_boto_client = mock.Mock() m_ssm = mock.Mock() m_boto_client.return_value = m_ssm m_ssm.get_parameter.return_value = { "Parameter": {"Name": "foo", "Type": "String", "Value": "bar", "Version": 1} } expected = "bar" actual = fetch_ssm_parameter_value(m_boto_client, "foo") self.assertEqual(expected, actual) m_ssm.get_parameter.return_value = { "Parameter": { "Name": "foo", "Type": "StringList", "Value": "bar,baz,11", "Version": 1, } } expected = "bar,baz,11" actual = fetch_ssm_parameter_value(m_boto_client, "foo") self.assertEqual(expected, actual)
starofrainnight/rabird.selenium
rabird/selenium/exceptions.py
Python
apache-2.0
118
0
""" @date 2014-11-16 @author Hong-She Liang <starofrainnigh
t@gmai
l.com> """ from selenium.common.exceptions import *
PearsonIOKI/ioki-social
fabfile.py
Python
mit
1,897
0.001054
""" Script which define fabric's methods. """ from fabric.api import local, task, lcd from utils import create_bundle_server as create_bs, create_bundle_client as create_bc @task def create_bundle_server(bundle_name=None): """ Creates bundle for server-side of the application. """ create_b
s(bundle_name) @task def create_bundle_client(bundle_name=None): """ Creates bundle for client-side of the application. """ create_bc(bundle_name) @task def create_bundle(bundle_name=None): """ Creates bundle for server and client. """ create_bundle_server(bundle_name) create_bundle_client(bundle_name) @task def run_test_server(is_ci=False): """ Runs unittests for server-side of the application. """
bundle_list = local('find server/* -type d | grep tests', capture=True) for catalog in bundle_list.split("\n"): if is_ci: xunit_file = catalog.replace('/', '_') local("nosetests --with-xunit \ --xunit-file=../build/logs/xunit/" + xunit_file + '.xml ' + catalog) else: local("nosetests " + catalog) @task def run_stylechecker_server(is_ci=False): """ Runs style checkers for server-side of the application. """ if is_ci: local("pep8 server | tee ../build/logs/stylechecker/pep8.out") local("pylint server | tee ../build/logs/stylechecker/pylint.out") else: local("pep8 server") local("pylint server") @task def run_test_client(): """ Runs unittests for client-side of the application. """ with lcd("client"): local('grunt clean', capture=True) local('grunt build', capture=True) local('grunt test', capture=True) @task def run_test(): """ Runs unittests for server and client. """ run_test_server() run_test_client()
team-xue/xue
xue/cms/plugins/inherit/models.py
Python
bsd-3-clause
673
0.005944
from django.db import models from django.utils.translation import ugettext_lazy as _ from cms.models import CMSPlugin, Page from cms import settings class InheritPagePlaceholder(CMSPlugin): """ Provides the ability to i
nherit plugins for a certain placeholder from an associated "parent" page instance """ from_page = models.ForeignKey(Page, null=True, blank=True, help_text=_("Choose a page to include its plugins into this placeholder, empty will choose current page")) from_language = models.CharField(_("language"), max_length=5, choices=settings.CMS_LANGUAGES, blank=True, null=True, help_text=_("Optional: the language of the p
lugins you want"))
micahwood/linux-dotfiles
sublime/Packages/SublimeLinter/lint/highlight.py
Python
mit
14,958
0.001738
# # highlight.py # Part of SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by Ryan Hileman and Aparajita Fishman # # Project: https://github.com/SublimeLinter/SublimeLinter3 # License: MIT # """ This module implements highlighting code with marks. The following classes are exported: HighlightSet Highlight The following constants are exported: WARNING - name of warning type ERROR - name of error type MARK_KEY_FORMAT - format string for key used to mark code regions GUTTER_MARK_KEY_F
ORMAT - format string for key used to mark gutter mark regions MARK_SCOPE_FORMAT - format string used for color scheme scope names """ import re import sublime from . import persist # # Error types # WARNING = 'warning' ERROR = 'error' MARK_KEY_FORMAT = 'sublimelinter-{}-marks' GUTTER_MARK_KEY_FORMAT = 'sub
limelinter-{}-gutter-marks' MARK_SCOPE_FORMAT = 'sublimelinter.mark.{}' UNDERLINE_FLAGS = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_EMPTY_AS_OVERWRITE MARK_STYLES = { 'outline': sublime.DRAW_NO_FILL, 'fill': sublime.DRAW_NO_OUTLINE, 'solid underline': sublime.DRAW_SOLID_UNDERLINE | UNDERLINE_FLAGS, 'squiggly underline': sublime.DRAW_SQUIGGLY_UNDERLINE | UNDERLINE_FLAGS, 'stippled underline': sublime.DRAW_STIPPLED_UNDERLINE | UNDERLINE_FLAGS, 'none': sublime.HIDDEN } WORD_RE = re.compile(r'^([-\w]+)') NEAR_RE_TEMPLATE = r'(?<!"){}({}){}(?!")' def mark_style_names(): """Return the keys from MARK_STYLES, sorted and capitalized, with None at the end.""" names = list(MARK_STYLES) names.remove('none') names.sort() names.append('none') return [name.capitalize() for name in names] class HighlightSet: """This class maintains a set of Highlight objects and performs bulk operations on them.""" def __init__(self): """Initialize a new instance.""" self.all = set() def add(self, highlight): """Add a Highlight to the set.""" self.all.add(highlight) def draw(self, view): """ Draw all of the Highlight objects in our set. Rather than draw each Highlight object individually, the marks in each object are aggregated into a new Highlight object, and that object is then drawn for the given view. """ if not self.all: return all = Highlight() for highlight in self.all: all.update(highlight) all.draw(view) @staticmethod def clear(view): """Clear all marks in the given view.""" for error_type in (WARNING, ERROR): view.erase_regions(MARK_KEY_FORMAT.format(error_type)) view.erase_regions(GUTTER_MARK_KEY_FORMAT.format(error_type)) def redraw(self, view): """Redraw all marks in the given view.""" self.clear(view) self.draw(view) def reset(self, view): """Clear all marks in the given view and reset the list of marks in our Highlights.""" self.clear(view) for highlight in self.all: highlight.reset() def line_type(self, line): """Return the primary error type for the given line number.""" if not self.all: return None line_type = None for highlight in self.all: if line_type == ERROR: continue _line_type = highlight.lines.get(line) if _line_type != WARNING and line_type == WARNING: continue line_type = _line_type return line_type class Highlight: """This class maintains error marks and knows how to draw them.""" def __init__(self, code=''): """Initialize a new instance.""" self.code = code self.marks = {WARNING: [], ERROR: []} self.mark_style = 'outline' self.mark_flags = MARK_STYLES[self.mark_style] # Every line that has a mark is kept in this dict, so we know which # lines to mark in the gutter. self.lines = {} # These are used when highlighting embedded code, for example JavaScript # or CSS within an HTML file. The embedded code is linted as if it begins # at (0, 0), but we need to keep track of where the actual start is within the source. self.line_offset = 0 self.char_offset = 0 # Linting runs asynchronously on a snapshot of the code. Marks are added to the code # during that asynchronous linting, and the markup code needs to calculate character # positions given a line + column. By the time marks are added, the actual buffer # may have changed, so we can't reliably use the plugin API to calculate character # positions. The solution is to calculate and store the character positions for # every line when this object is created, then reference that when needed. self.newlines = newlines = [0] last = -1 while True: last = code.find('\n', last + 1) if last == -1: break newlines.append(last + 1) newlines.append(len(code)) @staticmethod def strip_quotes(text): """Return text stripped of enclosing single/double quotes.""" first = text[0] if first in ('\'', '"') and text[-1] == first: text = text[1:-1] return text def full_line(self, line): """ Return the start/end character positions for the given line. This returns *real* character positions (relative to the beginning of self.code) base on the *virtual* line number (adjusted by the self.line_offset). """ # The first line of the code needs the character offset if line == 0: char_offset = self.char_offset else: char_offset = 0 line += self.line_offset start = self.newlines[line] + char_offset end = self.newlines[min(line + 1, len(self.newlines) - 1)] return start, end def range(self, line, pos, length=-1, near=None, error_type=ERROR, word_re=None): """ Mark a range of text. line and pos should be zero-based. The pos and length argument can be used to control marking: - If pos < 0, the entire line is marked and length is ignored. - If near is not None, it is stripped of quotes and length = len(near) - If length < 0, the nearest word starting at pos is marked, and if no word is matched, the character at pos is marked. - If length == 0, no text is marked, but a gutter mark will appear on that line. error_type determines what type of error mark will be drawn (ERROR or WARNING). When length < 0, this method attempts to mark the closest word at pos on the given line. If you want to customize the word matching regex, pass it in word_re. If the error_type is WARNING and an identical ERROR region exists, it is not added. If the error_type is ERROR and an identical WARNING region exists, the warning region is removed and the error region is added. """ start, end = self.full_line(line) if pos < 0: pos = 0 length = (end - start) - 1 elif near is not None: near = self.strip_quotes(near) length = len(near) elif length < 0: code = self.code[start:end][pos:] match = (word_re or WORD_RE).search(code) if match: length = len(match.group()) else: length = 1 pos += start region = sublime.Region(pos, pos + length) other_type = ERROR if error_type == WARNING else WARNING i_offset = 0 for i, mark in enumerate(self.marks[other_type].copy()): if mark.a == region.a and mark.b == region.b: if error_type == WARNING: return else: self.marks[other_type].pop(i - i_offset) i_offset += 1 self.marks[error_type].append(region) def
prolifik/cakecoin
contrib/seeds/makeseeds.py
Python
mit
709
0.015515
#!/usr/bin/env python # # Generate pnSeed[] from Pieter's DNS seeder # NSEEDS=600 import re import sys from subprocess import check_output def main(): lines = sys.stdin.readlines() ips = [] pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):17792") for line in lines: m = pattern.match(line) if m is None:
continue ip = 0 for i in range(0,4): ip = ip + (int(m.group(i+1)) << (8*(i))) if ip == 0: continue ips.append(ip) for row in range(0, min(NSEEDS,len(ips)), 8): print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + "," if __name__ == '__mai
n__': main()
OpenMined/PySyft
packages/syft/src/syft/proto/lib/python/bool_pb2.py
Python
apache-2.0
1,474
0.002035
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: proto/lib/python/bool.proto """Generated protocol buffer code.""" # third party from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.proto
buf import symbol_database as _symbol_database # @@pr
otoc_insertion_point(imports) _sym_db = _symbol_database.Default() # syft absolute from syft.proto.core.common import ( common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2, ) DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( b'\n\x1bproto/lib/python/bool.proto\x12\x0fsyft.lib.python\x1a%proto/core/common/common_object.proto"7\n\x04\x42ool\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x08\x12!\n\x02id\x18\x02 \x01(\x0b\x32\x15.syft.core.common.UIDb\x06proto3' ) _BOOL = DESCRIPTOR.message_types_by_name["Bool"] Bool = _reflection.GeneratedProtocolMessageType( "Bool", (_message.Message,), { "DESCRIPTOR": _BOOL, "__module__": "proto.lib.python.bool_pb2" # @@protoc_insertion_point(class_scope:syft.lib.python.Bool) }, ) _sym_db.RegisterMessage(Bool) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _BOOL._serialized_start = 87 _BOOL._serialized_end = 142 # @@protoc_insertion_point(module_scope)
acsone/account-invoicing
account_invoice_rounding/tests/test_invoice_rounding.py
Python
agpl-3.0
7,217
0
# -*- coding: utf-8 -*- # Copyright 2016 Camptocamp SA # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) import openerp.tests.common as test_common # @test_common.post_install(True) class TestSwedishRounding(test_common.TransactionCase): def create_dummy_invoice(self): invoice = self.env['account.invoice'].create({ 'partner_id': self.partner.id, 'currency_id': self.env.ref('base.EUR').id, 'account_id': self.account.id, 'date_invoice': '2018-01-01', 'invoice_line': [(0, 0, { 'name': 'Dummy invoice line', 'product_id': self.product.id, 'invoice_line_tax_id': [(4, self.tax_10.id)], 'account_id': self.account.id, 'quantity': 1, 'price_unit': 99.99, 'journal_id': self.journal_sale.id })] }) return invoice def create_dummy_invoice_2(self): invoice = self.env['account.invoice'].create({ 'partner_id': self.partner.id, 'currency_id': self.env.ref('base.EUR').id, 'account_id': self.account.id, 'date_invoice': '2018-01-01', 'invoice_line': [(0, 0, { 'name': 'Dummy invoice line', 'product_id': self.product.id, 'invoice_line_tax_id': [(4, self.tax_77.id)], 'account_id': self.account.id, 'quantity': 1, 'price_unit': 90, 'journal_id': self.journal_sale.id })] }) return invoice def create_two_lines_dummy_invoice(self): invoice = self.env['account.invoice'].create({ 'partner_id': self.partner.id, 'currency_id': self.env.ref('base.EUR').id, 'account_id': self.account.id, 'date_invoice': '2018-01-01', 'invoice_line': [(0, 0, { 'name': 'Dummy invoice line', 'product_id': self.product.id, 'invoice_line_tax_id': [(4, self.tax_10.id)], 'account_id': self.account.id, 'quantity': 1, 'price_unit': 99.99, 'journal_id': self.journal_sale.id }), (0, 0, { 'name': 'Dummy invoice line', 'product_id': self.product2.id, 'invoice_line_tax_id': [(4, self.tax_20.id)], 'account_id': self.account.id, 'quantity': 1, 'price_unit': 19.99, 'journal_id': self.journal_sale.id })] }) return invoice def setUp(self): super(TestSwedishRounding, self).setUp() # self.sudo(self.ref('base.user_demo')) expense_type = self.env.ref('account.data_account_type_expense') self.journal_sale = self.env["account.journal"].create({ "name": "Test sale journal", "type": "sale", "code": "TEST_SJ", }) self.account = self.env['account.account'].create({ 'name': 'Rounding account', 'code': '6666', 'user_type': expense_type.id }) tax_code_0 = self.env['account.tax.code'].create({ 'name': 'Tax0', 'sign': 1, }) tax_code_1 = self.env['account.tax.code'].create({ 'name': 'Tax1', 'sign': 1, }) tax_code_2 = self.env['account.tax.code'].create({ 'name': 'Tax2', 'sign': 1, }) self.tax_77 = self.env['account.tax'].create({ 'name': 'Dummy tax 7.7%', 'type': 'percent', 'amount': .077, 'type_tax_use': 'sale', 'tax_code_id': tax_code_0.id, }) self.tax_10 = self.env['account.tax'].create({ 'name': 'Dummy tax 10%', 'type': 'percent', 'amount': .1, 'type_tax_use': 'sale', 'tax_code_id': tax_code_1.id, }) self.tax_20 = self.env['account.tax'].create({ 'name': 'Dummy tax 20%', 'type': 'percent', 'amount': .20, 'type_tax_use': 'sale', 'tax_code_id': tax_code_2.id, }) self.partner = self.env['res.partner'].create({ 'name': 'Test Partner', }) self.product = self.env['product.product'].create({ 'name': 'Product Test', 'list_price': 99.99, 'default_code': 'TEST0001', }) self.product2 = self.env['product.product'].create({ 'name': 'Product Test 2', 'list_price': 19.99, 'default_code': 'TEST0001', }) def test_rounding_globally(self): company = self.env.ref('base.main_company') company.write({ 'tax_calculation_rounding_method': 'swedish_round_globally', 'tax_calculation_rounding': 0.05, }) invoice1 = self.create_dummy_invoice() invoice1.button_reset_taxes() invoice1.signal_workflow('invoice_open') self.assertEqual(invoice1.amount_total, 110) invoice2 = self.create_two_lines_dummy_invoice() invoice2.button_reset_taxes() self.assertEqual(invoice2.amount_total, 134) self.assertEqual(sum([t.amount for t in invoice2.tax_line]), 14.02) bigger_tax = self.env['account.invoice.tax'].search([ ('invoice_id', '=', invoice2.id)], limit=1, order='amount desc') self.assertEqual(bigger_tax.amount, 10.02) self.assertEqual(len(invoice2.invoice_line), 2) self.assertFalse(invoice2.global_round_line_id) def test_rounding_per_line(self): company = self.env.ref('base.main_company') company.write({ 'tax_calculation_rounding_method': 'swedish_add_invoice_line', 'tax_calculation_rounding': 0.05, 'tax_calculation_rounding_account_id': self.account.id }) invoice1 = self.create_dummy_invoice() invoice1.signal_workflow('invoice_open') invoice1.button_reset_taxes() self.assertEqual(invoice1.amount_total, 110) invoice2 = self.create_two_lines_dummy_invoice() invoice2.button_reset_taxes() invoice2.signal_workflow('invoice_open') self.assertEqual(invoice2.amount_total, 134) self.assertEqual(sum([t.amount for t in invoice2.tax_line]), 14) self.assertEqual(len(invoice2.invoice_line), 3) self.assertEqual(invoice2.global_round_line_id.price_subtotal, 0.02) # test with pressing taxes reset button before validation i
nvoice3 = self.create_dummy_invoice_2() invoice3.button_reset_taxes() invoice3.signal_workflow('invoice_open') self.assertEqual(invoice3.amount_total, 96.95) self.assertEqual(invoice3.amount_untaxed, 90.02) # test without pressing taxes reset button before validation invoice3 = self.create_dummy_invoice_2() invoice3.signal_workflow('invoice_open')
self.assertEqual(invoice3.amount_total, 96.95) self.assertEqual(invoice3.amount_untaxed, 90.02)
cloudfoundry/python-buildpack
fixtures/setup_py/funniest/__init__.py
Python
apache-2.0
52
0
d
ef joke(): return 'Knock Knock.
Who is there?'
shy21grams/GroundControl
Simulation/simulationCanvas.py
Python
gpl-3.0
3,902
0.022553
from kivy.uix.floatlayout import FloatLayout from kivy.properties import NumericProperty, ObjectProperty from kivy.graphics import Color, Ellipse, Line from kivy.graphics.transformation import Matrix from kivy.core.window import Window from simulationLine import SimulationLine from simulationAngle import SimulationAngle from simulationSled import SimulationSled from chainLengthToXY import ChainLengthtoXY from posToChainLength import PosToChainLength from kivy.graphics.transformation import Matrix import re import math class SimulationCanvas(FloatLayout): scatterObject = ObjectProperty(None) motorLift = 220 motorTranslate = 258.8 bedWidth = 2438.4 #8' bedHeight = 1219.2 #4' motorY = bedHeight + motorLift motor2X = bedWidth + motorTranslate def initialize(self): self.startChains() self.drawFrame() self.setSpindleLocation(self.bedWidth/2,self.bedHeight/2) self.setInitialZoom() self.xPosSlider.bind(value=self.xPosSliderValueChange) self.yPosSlider.bind(value=self.yPosSliderValueChange) self.setupAngles() self.setupSled() self.lengthToXY.initialize(self.chainA, self.chainB, self.bedWidth+2*self.motorTranslate, self.bedHeight+self.motorLift, self.motorTranslate, self.motorLift) self.posToLength.initialize(self.sled, self.bedWidth+2*self.motorTranslate, self.bedHeight+self.motorLift, self.motorTranslate, self.motorLift) def setSpindleLocation(self,x,y): self.chainA.setEnd(x,y) self.chainB.setEnd(x,y) def xPosSliderValueChange(self,callback,value): self.setSpindleLocation(value,self.chainA.toPos[1]) def yPosSliderValueChange(self,callback,value): self.setSpindleLocation(self.chainA.toPos[0], value) def drawFrame(self): self.frameLeft.initialize() self.frameTop.initialize() self.frameRight.initialize() self.frameBottom.initialize() self.frameLeft.setStart(0,0) self.frameLeft.setEnd(0,self.bedHeight) self.frameLeft.color = (1,0,0) self.frameTop.setStart(0,self.bedHeight) self.frameTop.setEnd(self.bedWidth,self.bedHeight) self.frameTop.color = (1,0,0) self.frameRight.setStart(self.bedWidth,0) self.frameRight.setEnd(self.bedWidth,self.bedHeight) self.frameRight.color = (1,0,0) self.frameBottom.setStart(0,0) self.frameBottom.setEnd(self.bedWidth,0) self.frameBottom.color = (1,0,0) def setupAngles(self): self.angleA.initialize(self.chainA, self.lineT, 0) self.
angleB.initialize(self.chainB, self.lineT, 0) self.angleP.initialize(self.chainA, self.chainB, 1) def setupSled(self): self.sled.initialize(self.chainA, self.chainB, 1, self.angleP) def setInitialZoom(self): mat = Matrix().scale(.4, .4, 1) self.scatterInstance.apply_transform(mat, (0,0))
mat = Matrix().translate(200, 100, 0) self.scatterInstance.apply_transform(mat) def startChains(self): self.chainA.initialize() self.chainB.initialize() self.lineT.initialize() self.lineT.color = (0,0,1) self.chainA.setStart(-self.motorTranslate, self.motorY) self.chainB.setStart(self.motor2X, self.motorY) self.lineT.setStart(-self.motorTranslate,self.motorY) self.lineT.setEnd(self.motor2X,self.motorY)
log2timeline/plaso
plaso/preprocessors/generic.py
Python
apache-2.0
3,399
0.005001
# -*- coding: utf-8 -*- """Operating system independent (generic) preprocessor plugins.""" from dfvfs.helpers import file_system_searcher from plaso.lib import definitions from plaso.preprocessors import interface from plaso.preprocessors import manager class DetermineOperatingSystemPlugin( interface.FileSystemArtifactPreprocessorPlugin): """Plugin to determine the operating system.""" # pylint: disable=abstract-method # This plugin does not use an artifact definition and therefore does not # use _ParsePathSpecification. # We need to check for both forward and backward slashes since the path # specification will be dfVFS back-end dependent. _WINDOWS_LOCATIONS = set([ '/windows/system32', '\\windows\\system32', '/winnt/system32', '\\winnt\\system32', '/winnt35/system32', '\\winnt35\\system32', '\\wtsrv\\system32', '/wtsrv/system32']) def __init__(self): """Initializes a plugin to determine the operating system.""" super(DetermineOperatingSystemPlugin, self).__init__() self._find_specs = [ file_system_searcher.FindSpec( case_sensitive=False, location='/etc', location_separator='/'), file_system_searcher.FindSpec( case_sensitive=False, location='/System/Library', location_separator='/'), file_system_searcher.FindSpec( case_sensitive=False, location='\\Windows\\System32', location_separator='\\'), file_system_searcher.FindSpec( case_sensitive=False, location='\\WINNT\\System32', location_separator='\\'), file_system_searcher.FindSpec( case_sensitive=False, location='\\WINNT35\\System32', location_separator='\\'), file_system_searcher.FindSpec( case_sensitive=False, location='\\WTSRV\\System32', location_separator='\\')] # pylint: disable=unused-argument def Collect(self, mediator, artifact_definition, searcher, file_system): """Collects values using a file artifact definition. Args: mediator (PreprocessMediator): mediates interactions between preprocess plugins and other components, such as storage and knowledge base. artifact_definition (artifacts.ArtifactDefinition): artifact definition. searcher (dfvfs.FileSystemSearcher): file system searcher to preproce
ss the file system. file_system (dfvfs.FileSystem): file system to be preprocessed. Raises: PreProcessFail: if the preprocessing fails. """ locations = [] for path_spec in searcher.Find(find_specs=self._
find_specs): relative_path = searcher.GetRelativePath(path_spec) if relative_path: locations.append(relative_path.lower()) operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN if self._WINDOWS_LOCATIONS.intersection(set(locations)): operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT elif '/system/library' in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS elif '/etc' in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX if operating_system != definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN: mediator.SetValue('operating_system', operating_system) manager.PreprocessPluginsManager.RegisterPlugins([ DetermineOperatingSystemPlugin])
aewallin/openvoronoi
python_examples/chain_1.py
Python
lgpl-2.1
3,292
0.001215
import openvoronoi as ovd import ovdvtk # helper library for visualization using vtk import time import vtk import datetime import math import random import os import sys import pickle import gzip if __name__ == "__main__": # size of viewport in pixels # w=2500 # h=1500 # w=1920 # h=1080 w = 1024 h = 800 myscreen = ovdvtk.VTKScreen(width=w, height=h) ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version()) w2if = vtk.vtkWindowToImageFilter() # for screenshots w2if.SetInput(myscreen.renWin) lwr = vtk.vtkPNGWriter() lwr.SetInputConnection(w2if.GetOutputPort()) # w2if.Modified() # lwr.SetFileName("tux1.png") scale = 1 myscreen.render() random.seed(42) far = 1 camPos = far zmult = 3 # camPos/float(1000) myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos) myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos) myscreen.camera.SetFocalPoint(0.0, 0, 0) vd = ovd.VoronoiDiagram(far, 120) print ovd.version(), ovd.build_type() # for vtk visualization vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003) vod.drawFarCircle() vod.textScale = 0.02 vod.vertexRadius = 0.0031 vod.drawVertices = 0 vod.drawVertexIndex = 1 vod.drawGenerators = 0 vod.offsetEdges = 1 vd.setEdgeOffset(0.05) linesegs = 1 # switch to turn on/off line-segments segs = [] # ovd.Point(1,1) eps = 0.9 p1 = ovd.Point(-0.1, -0.2) p2 = ovd.Point(0.2, 0.1) p3 = ovd.Point(0.4, 0.2) p4 = ovd.Point(0.6, 0.6) p5 = ovd.Point(-0.6, 0.3) pts = [p1, p2, p3, p4, p5] # t_after = time.time() # print ".done in {0:.3f} s.".format( t_after-t_before ) times = [] id_list = [] m = 0 t_before = time.time() print "inserting %d VertexSites one by one: " % len(pts) for p in pts: id_list.append(vd.addVertexSite(p)) print " %02d added vertex %3d at ( %1.3f, %1.3f )" % (m, id_list[m], p.x, p.y) m = m + 1 t_after = time.time() times.append(t_after - t_before) print "all VertexSites inserted." vd.check() t_before = time.time() # vd.debug_on() print "inserting %d LineSites one by one: " % (len(id_list)) for n in range(len(id_list)): print " %02d source - target = %02d - %02d " % (n, id_list[n - 1], id_list[n]) vd.addLineSite(id_list[n - 1], id_list[n]) print "all LineSites inserted." vd.check() t_after = time.time() line_time = t_after - t_before if line_time < 1e-3: line_time = 1 times.append(line_time) vod.setVDText2(times) err = vd.getStat() print "getStat() got errorstats for ", len(err), " points" if len(err) > 1: miner
r = min(err) maxerr = max(err) print " min error= ", minerr print " max error= ", maxerr print " num vertices: ", vd.numVertices() print " num SPLIT vertices: "
, vd.numSplitVertices() calctime = t_after - t_before vod.setAll() print "PYTHON All DONE." myscreen.render() # w2if.Modified() # lwr.SetFileName("{0}.png".format(Nmax)) # lwr.Write() # write screenshot to file myscreen.iren.Start()
pferreir/indico-backup
indico/MaKaC/plugins/Collaboration/RecordingManager/fossils.py
Python
gpl-3.0
1,501
0.009993
# -*- coding: utf-8 -*- ## ## ## This file is part of Indico. ## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN). ## ## Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or (at your option) any later version. ## ## Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Indico;if not, see <http://www.gnu.org/licenses/>. from MaKaC.fossils.contribution import IContributionWithSpeakersFossil from MaKaC.fossils.subcontribution import ISubContributionWithSpeakersFossil from MaKaC.com
mon.fossilize import addFossil from MaKaC.conference import Contribution class IContributionRMFossil(IContributionWithSpeakersFossil): """ This fossil is ready f
or when we add subcontribution granularity to contributions and to provide an example for a plugin-specific fossil """ def getSubContributionList(self): pass getSubContributionList.result = ISubContributionWithSpeakersFossil # We cannot include this fossil in the Contribution class directly because it belongs to a plugin addFossil(Contribution, IContributionRMFossil)
suneeth51/neutron
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py
Python
apache-2.0
25,812
0
# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time import mock from oslo_config import cfg from oslo_log import log from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base # Useful global dummy variables. NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7' LS_ID = 420 LV_ID = 42 LV_IDS = [42, 43] VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8' VIF_MAC = '3c:09:24:1e:78:23' OFPORT_NUM = 1 VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM, VIF_ID, VIF_MAC, 'switch') VIF_PORTS = {VIF_ID: VIF_PORT} FIXED_IPS = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] VM_DEVICE_OWNER = "compute:None" TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}} BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00" UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00" class DummyPort(object): def __init__(self, interface_id): self.interface_id = interface_id class DummyVlanBinding(object): def __init__(self, network_id, vlan_id): self.network_id = network_id self.vlan_id = vlan_id class TunnelTest(object): USE_VETH_INTERCONNECTION = False VETH_MTU = None def setUp(self): super(TunnelTest, self).setUp() cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('report_interval', 0, 'AGENT') self.INT_BRIDGE = 'integration_bridge' self.TUN_BRIDGE = 'tunnel_bridge' self.MAP_TUN_BRIDGE = 'tun_br_map' self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE} self.INT_OFPORT = 11111 self.TUN_OFPORT = 22222 self.MAP_TUN_INT_OFPORT = 33333 self.MAP_TUN_PHY_OFPORT = 44444 self.LVM = self.mod_agent.LocalVLANMapping( LV_ID, 'gre', None, LS_ID, VIF_PORTS) self.LVM_FLAT = self.mod_agent.LocalVLANMapping( LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS) self.LVM_VLAN = self.mod_agent.LocalVLANMapping( LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS) self.inta = mock.Mock() self.intb = mock.Mock() self.ovs_bridges = { self.INT_BRIDGE: mock.create_autospec( self.br_int_cls('br-int')), self.TUN_BRIDGE: mock.create_autospec( self.br_tun_cls('br-tun')), self.MAP_TUN_BRIDGE: mock.create_autospec( self.br_phys_cls('br-phys')), } self.ovs_int_ofports = { 'patch-tun': self.TUN_OFPORT, 'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT } def lookup_br(br_name, *args, **kwargs): return self.ovs_bridges[br_name] self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS, autospec=True).start() self.mock_int_bridge_cls.side_effect = lookup_br self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS, autospec=True).start() self.mock_phys_bridge_cls.side_effect = lookup_br self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS, autospec=True).start() self.mock_tun_bridge_cls.side_effect = lookup_br self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int
_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT self.mock_int_bridge.add_patch_
port.side_effect = ( lambda tap, peer: self.ovs_int_ofports[tap]) self.mock_int_bridge.get_vif_ports.return_value = [] self.mock_int_bridge.get_ports_attributes.return_value = [] self.mock_int_bridge.db_get_val.return_value = {} self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE] self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE self.mock_map_tun_bridge.add_port.return_value = ( self.MAP_TUN_PHY_OFPORT) self.mock_map_tun_bridge.add_patch_port.return_value = ( self.MAP_TUN_PHY_OFPORT) self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE] self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT self.device_exists = mock.patch.object(ip_lib, 'device_exists').start() self.device_exists.return_value = True self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start() self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start() add_veth = self.ipwrapper.return_value.add_veth add_veth.return_value = [self.inta, self.intb] self.get_bridges = mock.patch.object(ovs_lib.BaseOVS, 'get_bridges').start() self.get_bridges.return_value = [self.INT_BRIDGE, self.TUN_BRIDGE, self.MAP_TUN_BRIDGE] self.execute = mock.patch('neutron.agent.common.utils.execute').start() self._define_expected_calls() def _define_expected_calls(self, arp_responder=False): self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE), ] self.mock_phys_bridge_cls_expected = [ mock.call(self.MAP_TUN_BRIDGE), ] self.mock_tun_bridge_cls_expected = [ mock.call(self.TUN_BRIDGE), ] self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.delete_port('patch-tun'), mock.call.setup_default_table(), ] self.mock_map_tun_bridge_expected = [ mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT), mock.call.set_db_attribute( 'Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'options:peer', 'phy-%s' % self.MAP_TUN_BRIDGE), ] self.mock_map_tun_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT), mock.call.set_db_attribute( 'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE, 'options:peer', 'int-%s' % self.MAP_TUN_BRIDGE), ] self.mock_tun_bridge_expected = [ mock.call.reset_bridge(secure_mode=True), mock.call.setup_controllers(mock.ANY), mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ mock.call.add_patch_port('patch-tun', 'patch-int'), ] self.m
argentumproject/electrum-arg
plugins/hw_wallet/hw_wallet.py
Python
mit
3,347
0.000299
#!/usr/bin/env python2 # -*- mode: python -*- # # Electrum - lightweight Bi
tcoin client # Copyright (C) 2016 The Electrum developers # # Permission is hereby granted, free of charge, to any person # obt
aining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from struct import pack from electrum_arg.wallet import BIP44_Wallet class BIP44_HW_Wallet(BIP44_Wallet): '''A BIP44 hardware wallet base class.''' # Derived classes must set: # - device # - DEVICE_IDS # - wallet_type restore_wallet_class = BIP44_Wallet max_change_outputs = 1 def __init__(self, storage): BIP44_Wallet.__init__(self, storage) # Errors and other user interaction is done through the wallet's # handler. The handler is per-window and preserved across # device reconnects self.handler = None def unpaired(self): '''A device paired with the wallet was diconnected. This can be called in any thread context.''' self.print_error("unpaired") def paired(self): '''A device paired with the wallet was (re-)connected. This can be called in any thread context.''' self.print_error("paired") def get_action(self): pass def can_create_accounts(self): return True def can_export(self): return False def is_watching_only(self): '''The wallet is not watching-only; the user will be prompted for pin and passphrase as appropriate when needed.''' assert not self.has_seed() return False def can_change_password(self): return False def get_client(self, force_pair=True): return self.plugin.get_client(self, force_pair) def first_address(self): '''Used to check a hardware wallet matches a software wallet''' account = self.accounts.get('0') derivation = self.address_derivation('0', 0, 0) return (account.first_address()[0] if account else None, derivation) def derive_xkeys(self, root, derivation, password): if self.master_public_keys.get(self.root_name): return BIP44_wallet.derive_xkeys(self, root, derivation, password) # When creating a wallet we need to ask the device for the # master public key xpub = self.get_public_key(derivation) return xpub, None def i4b(self, x): return pack('>I', x)
1dot75cm/repo-checker
checker/backends/gnome.py
Python
mit
803
0
# -*- coding: utf-8 -*- from checker.backends import BaseBackend from checker import logger log = logger.getLogger(__name__) class GnomeBa
ckend(BaseBackend): """for projects hosted on gnome.org""" name = 'Gnome' domain = 'gnome.org' example = 'https://download.gnome.org/sources/gnome-control-center' def __init__(self, url): super(GnomeBackend, self).__init__() self._url = url self._rule_type = "xpath" def get_urls(self, branch=None): return self._url, def get_rules(self): log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1])) return [("//tr/td[3][contains(text(), '-')]/text()", ""), ("", "")] @classmethod def isrelease(cls, url): return True
ccxt/ccxt
examples/py/instantiate-all-at-once.py
Python
mit
447
0.002237
# -*- coding: utf-8 -*- import os import sys root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(root + '/python') import ccxt # noqa: E402 exchanges = {} # a placeholder for your instances for id in ccxt.exchanges: exchange = getattr(ccxt, id) exchanges[id] = exchange() # now exchanges dictionary contains all exchange ins
tances... exchanges['bitt
rex'].fetch_order_book('ETH/BTC')
ThunderShiviah/london-social-network-analysis
web_subscriptions/items.py
Python
mit
330
0
# -*- coding: utf-8 -*- # Define here the models for your scrap
ed item
s # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class ListItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() name = scrapy.Field() location = scrapy.Field()
rajaram1990/GetNSEStockPrice
GetNSEStockPrice/email_base.py
Python
mit
1,041
0.011527
import sys import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from config import config def send_mail(message,receivers=config['smtp_receivers']): """ Module that sends emails. Message should be a di
ctionary containing the following keys: subject -> Subject of the email text -> The plain text part of the email uses options from config.py """ sender = config['smtp_sender'] print sender print receivers print message try: smtp
Obj = smtplib.SMTP_SSL(config['smtp_server'],config['smtp_port']) smtpObj.login(config['smtp_login'],config['smtp_password']) if not receivers: return True smtpObj.sendmail(sender, receivers, message) smtpObj.quit() return True except Exception, exc: print >> sys.stderr, "Exception From email_base" print >> sys.stderr, exc return False if __name__ == '__main__': send_mail('Test message')
f0rki/cb-multios
original-challenges/Palindrome/poller/for-testing/machine.py
Python
mit
1,305
0.011494
from generator.actions import Actions import random import string class Palindrome(Actions): def start(self): pass def banner(self): # Confirm the initial empty line self.read(delim='\n', expect='\n') # Confirm the actual banner self.read(delim='\n', expect='Welcome to Palindrome Finder\n') def request(self): # Skip the empty line self.read(delim='\n', expect='\n') # Confirm the request self.read(length=37, expect='\tPlease enter a possible palindrome: ') def palindrome(self): halfword = self.random_string(random.randint(1, 16)) self.write(halfword + halfword[::-1] + "\n") self.read(delim='\n', expect="\t\tYes, that's a palin
drome!\n") def not_palindrome(self): word = self.random_string(random.randint(2, 32)) while self.is_palindrome(word): word = self.random_string(random.randint(2, 32)) self.write(word + "\n") self.read(delim='\n', expect="\t\tNope, that's not a palindrome\n") def is_palindrome
(self, word): for i in range(0, len(word) / 2): if word[i] != word[-i - 1]: return False return True def random_string(self, size): chars = string.letters + string.digits return ''.join(random.choice(chars) for _ in range(size))
kingsdigitallab/kdl-django
cms/migrations/0009_blogpost_date.py
Python
mit
565
0.00177
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-08-17 08:53 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migra
tion(migrations.Migration): dependencies = [ ('cms', '0008_blogindexpage_blogpost_blogposttag'), ] operations = [ migrations.AddField(
model_name='blogpost', name='date', field=models.DateField(default=django.utils.timezone.now, verbose_name='body'), preserve_default=False, ), ]
aurex-linux/virt-manager
virtManager/createpool.py
Python
gpl-2.0
18,923
0.000951
# # Copyright (C) 2008, 2013 Red Hat, Inc. # Copyright (C) 2008 Cole Robinson <crobinso@redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA. # # pylint: disable=E0611 from gi.repository import Gtk from gi.repository import Gdk # pylint: enable=E0611 import logging from virtManager.baseclass import vmmGObjectUI from virtManager.asyncjob import vmmAsyncJob from virtManager import uiutil from virtinst import StoragePool PAGE_NAME = 0 PAGE_FORMAT = 1 class vmmCreatePool(vmmGObjectUI): def __init__(self, conn): vmmGObjectUI.__init__(self, "createpool.ui", "vmm-create-pool") self.conn = conn self._pool = None self.builder.connect_signals({ "on_pool_forward_clicked" : self.forward, "on_pool_back_clicked" : self.back, "on_pool_cancel_clicked" : self.close, "on_vmm_create_pool_delete_event" : self.close, "on_pool_finish_clicked" : self.forward, "on_pool_pages_change_page" : self.page_changed, "on_pool_source_button_clicked" : self.browse_source_path, "on_pool_target_button_clicked" : self.browse_target_path, "on_pool_name_activate": self.forward, "on_pool_hostname_activate" : self.hostname_changed, "on_pool_iqn_chk_toggled": self.iqn_toggled, }) self.bind_escape_key_close() self.set_initial_state() self.set_page(PAGE_NAME) def show(self, parent): logging.debug("Showing new pool wizard") self.reset_state() self.topwin.set_transient_for(parent) self.topwin.present() def close(self, ignore1=None, ignore2=None): logging.debug("Closing new pool wizard") self.topwin.hide() return 1 def _cleanup(self): self.conn = None self._pool = None def set_initial_state(self): self.widget("pool-pages").set_show_tabs(False) blue = Gdk.Color.parse("#0072A8")[1] self.widget("header").modify_bg(Gtk.StateType.NORMAL, blue) type_list = self.widget("pool-type") type_model = Gtk.ListStore(str, str) type_list.set_model(type_model) uiutil.set_combo_text_column(type_list, 1) format_list = self.widget("pool-format") format_model = Gtk.ListStore(str, str) format_list.set_model(format_model) uiutil.set_combo_text_column(format_list, 1) # Target path combo box entry target_list = self.widget("pool-target-path") # target_path, Label, pool class instance target_model = Gtk.ListStore(str, str, object) target_model.set_sort_column_id(0, Gtk.SortType.ASCENDING) target_list.set_model(target_model) target_list.set_entry_text_column(0) # Source path combo box entry source_list = self.widget("pool-source-path") # source_path, Label, pool class instance source_model = Gtk.ListStore(str, str, object) source_model.set_sort_column_id(0, Gtk.SortType.ASCENDING) source_list.set_model(source_model) source_list.set_entry_text_column(0) self.populate_pool_type() def reset_state(self): self.widget("pool-pages").set_current_page(0) self.widget("pool-forward").show() self.widget("pool-finish").hide() self.widget("pool-back").set_sensitive(False) self.widget("pool-name").set_text("") self.widget("pool-name").grab_focus() self.widget("pool-type").set_active(0) self.widget("pool-target-path").get_child().set_text("") self.widget("pool-source-path").get_child().set_text("") self.widget("pool-hostname").set_text("") self.widget("pool-iqn-chk").set_active(False) self.widget("pool-iqn-chk").toggled() self.widget("pool-iqn").set_text("") self.widget("pool-format").set_active(-1) self.widget("pool-build").set_sensitive(True) self.widget("pool-build").set_active(False) self.widget("pool-details-grid").set_visible(False) def hostname_changed(self, ignore): # If a hostname was entered, try to lookup valid pool sources. self.populate_pool_sources() def iqn_toggled(self, src): self.widget("pool-iqn").set_sensitive(src.get_active()) def populate_pool_type(self): model = self.widget("pool-type").get_model() model.clear() types = StoragePool.get_pool_types() types.sort() for typ in types: model.append([typ, "%s: %s" % (typ, StoragePool.get_pool_type_desc(typ))]) def populate_pool_format(self, formats): model = self.widget("pool-format").get_model() model.clear() for f in formats: model.append([f, f]) def populate_pool_sources(self): source_list = self.widget("pool-source-path") source_model = source_list.get_model() source_model.clear() target_list = self.widget("pool-target-path") target_model = target_list.get_model() target_model.clear() use_list = source_list
use_model = source_model entry_list = [] if self._pool.type == StoragePool.TYPE_SCSI: entry_list = self.list_scsi_adapters() use_list
= source_list use_model = source_model elif self._pool.type == StoragePool.TYPE_LOGICAL: pool_list = self.list_pool_sources() entry_list = [[p.target_path, p.target_path, p] for p in pool_list] use_list = target_list use_model = target_model elif self._pool.type == StoragePool.TYPE_DISK: entry_list = self.list_disk_devs() use_list = source_list use_model = source_model elif self._pool.type == StoragePool.TYPE_NETFS: host = self.get_config_host() if host: pool_list = self.list_pool_sources(host=host) entry_list = [[p.source_path, p.source_path, p] for p in pool_list] use_list = source_list use_model = source_model for e in entry_list: use_model.append(e) if entry_list: use_list.set_active(0) def list_scsi_adapters(self): scsi_hosts = self.conn.get_nodedevs("scsi_host") host_list = [dev.host for dev in scsi_hosts] clean_list = [] for h in host_list: name = "host%s" % h tmppool = self._make_stub_pool() tmppool.source_path = name entry = [name, name, tmppool] if name not in [l[0] for l in clean_list]: clean_list.append(entry) return clean_list def list_disk_devs(self): devs = self.conn.get_nodedevs("storage") devlist = [] for dev in devs: if dev.drive_type != "disk" or not dev.block: continue devlist.append(dev.block) devlist.sort() clean_list = [] for dev in devlist: tmppool = self._make_stub_pool() tmppool.source_path = dev entry = [dev, dev, tmppool] if dev not in [l[0] for l in clean_list]: clean_list.append(entry) return clean_list def list_pool_sources(self, host=None): pool_type = self._pool.type plist = [] try:
flocca/flocca_dot_com
flocca_dot_com/settings.py
Python
gpl-3.0
5,929
0.001012
# This Python file uses the following encoding: utf-8 """ Copyright 2013 Giacomo Antolini <giacomo.antolini@gmail.com>. This file is part of flocca_dot_com. flocca_dot_com is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. flocca_dot_com is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Nome-Programma. If not, see <http://www.gnu.org/licenses/>. """ # Django settings for flocca_dot_com project. import os DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Giacomo Antolini', 'giacomo.antolini@gmail.com') ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(os.path.dirname(__file__), '..', 'flocca_dot_com.sql').replace('\\', '/'), 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(os.path.dirname(__file__), '..', 'static').replace('\\', '/'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '7x8i2+j(yn4(s^%)n6s+%ergrl9ba0e*0^1t+%cbhuvx#8k6(5' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'flocca_dot_com.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'flocca_dot_com.wsgi.application' import os TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\','/'),) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://doc
s.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler'
} }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
bikash/h2o-dev
h2o-py/tests/testdir_algos/rf/pyunit_iris_ignoreRF.py
Python
apache-2.0
405
0.02716
import sys sys.path.insert(1, "../../../") import h2o def iris_ignore(ip,port): # Connect to h2o h2o.init(ip,port) iris = h2o.import_frame(path=h2o.locate("smallda
ta/iris/iris2.csv")) for maxx in range(4): model = h2o.random_forest(y=iris[4], x=iris[range(maxx+1)], ntrees=50, max_depth=100) model.show() if __name__ == "__main__": h2o.run_test(s
ys.argv, iris_ignore)
denny820909/builder
lib/python2.7/site-packages/buildbot_slave-0.8.8-py2.7.egg/buildslave/test/__init__.py
Python
mit
2,076
0.003372
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members import sys import twisted from twisted.trial import unittest from buildslave import monkeypatches # apply the same patches the slave does when it starts monkeypatches.patch_all(for_tests=True) def add_debugging_monkeypatches(): """ DO NOT CALL THIS DIRECTLY This adds a few "harmless" monkeypatches which make it easier to d
ebug failing tests. """ from twisted.application.service import Service old_startService = Service.startService old_stopService = Service.stopService def startService(self): assert not self.running return old_startService(self) def stopService(self): assert self.running return old_stopService(self) Service.startService = star
tService Service.stopService = stopService # versions of Twisted before 9.0.0 did not have a UnitTest.patch that worked # on Python-2.7 if twisted.version.major <= 9 and sys.version_info[:2] == (2,7): def nopatch(self, *args): raise unittest.SkipTest('unittest.TestCase.patch is not available') unittest.TestCase.patch = nopatch add_debugging_monkeypatches() __all__ = [] # import mock so we bail out early if it's not installed try: import mock mock = mock except ImportError: raise ImportError("Buildbot tests require the 'mock' module; " "try 'pip install mock'")
billiob/papyon
papyon/service/description/SingleSignOn/RequestMultipleSecurityTokens.py
Python
gpl-2.0
4,656
0.004296
# -*- coding: utf-8 -*- # # papyon - a python client library for Msn # # Copyright (C) 2005-2006 Ali Sabil <ali.sabil@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import xml.sax.saxutils as xml class LiveService(object): CONTACTS = ("contacts.msn.com", "MBI") MESSENGER = ("messenger.msn.com", "?id=507") MESSENGER_CLEAR = ("messengerclear.live.com", "MBI_KEY_OLD") MESSENGER_SECURE = ("messengersecure.live.com", "MBI_SSL") SPACES = ("spaces.live.com", "MBI") STORAGE = ("storage.msn.com", "MBI") TB = ("http://Passport.NET/tb", None) VOICE = ("voice.messenger.msn.com", "?id=69264") @classmethod def url_to_service(cls, url): for attr_name in dir(cls): if attr_name.startswith('_'): continue attr = getattr(cls, attr_name)
if isinstance(attr, tuple) and attr[0] == url: return attr return None def transport_headers(): """Returns a dictionary, containing transport (http) headers to use for the request""" return {} def soap_action(): """Returns the SOAPAction value to pass to the transport or None if no SOAPAction needs to be specified""" return None def soap_header(account, password): """Returns the SOAP xml header""" return """
<ps:AuthInfo xmlns:ps="http://schemas.microsoft.com/Passport/SoapServices/PPCRL" Id="PPAuthInfo"> <ps:HostingApp>{7108E71A-9926-4FCB-BCC9-9A9D3F32E423}</ps:HostingApp> <ps:BinaryVersion>4</ps:BinaryVersion> <ps:UIVersion>1</ps:UIVersion> <ps:Cookies/> <ps:RequestParams>AQAAAAIAAABsYwQAAAAxMDMz</ps:RequestParams> </ps:AuthInfo> <wsse:Security xmlns:wsse="http://schemas.xmlsoap.org/ws/2003/06/secext"> <wsse:UsernameToken Id="user"> <wsse:Username>%(account)s</wsse:Username> <wsse:Password>%(password)s</wsse:Password> </wsse:UsernameToken> </wsse:Security>""" % {'account': xml.escape(account), 'password': xml.escape(password)} def soap_body(*tokens): """Returns the SOAP xml body""" token_template = """ <wst:RequestSecurityToken xmlns:wst="http://schemas.xmlsoap.org/ws/2004/04/trust" Id="RST%(id)d"> <wst:RequestType>http://schemas.xmlsoap.org/ws/2004/04/security/trust/Issue</wst:RequestType> <wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2002/12/policy"> <wsa:EndpointReference xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/03/addressing"> <wsa:Address>%(address)s</wsa:Address> </wsa:EndpointReference> </wsp:AppliesTo> %(policy_reference)s </wst:RequestSecurityToken>""" policy_reference_template = """ <wsse:PolicyReference xmlns:wsse="http://schemas.xmlsoap.org/ws/2003/06/secext" URI=%(uri)s/>""" tokens = list(tokens) if LiveService.TB in tokens: tokens.remove(LiveService.TB) assert(len(tokens) >= 1) body = token_template % \ {'id': 0, 'address': xml.escape(LiveService.TB[0]), 'policy_reference': ''} for id, token in enumerate(tokens): if token[1] is not None: policy_reference = policy_reference_template % \ {'uri': xml.quoteattr(token[1])} else: policy_reference = "" t = token_template % \ {'id': id + 1, 'address': xml.escape(token[0]), 'policy_reference': policy_reference} body += t return '<ps:RequestMultipleSecurityTokens ' \ 'xmlns:ps="http://schemas.microsoft.com/Passport/SoapServices/PPCRL" ' \ 'Id="RSTS">%s</ps:RequestMultipleSecurityTokens>' % body def process_response(soap_response): body = soap_response.body return body.findall("./wst:RequestSecurityTokenResponseCollection/" \ "wst:RequestSecurityTokenResponse")
grtfou/data-analytics-web
website/utils/data_exportor.py
Python
mit
8,235
0
# -*- coding: utf-8 -*- # @date 161103 - Export excel with get_work_order_report function """ Data exportor (Excel, CSV...) """ import io import math from datetime import datetime from xlsxwriter.workbook import Workbook import tablib from utils.tools import get_product_size def get_customers(customer_list=None, file_format='csv'): """Generate customer data file for download.""" if customer_list is None: customer_list = [] data = tablib.Dataset() data.headers = ('客戶代碼', '客戶名稱') for c in customer_list: data.append((c.c_code, c.c_name)) if file_format == 'csv': return data.csv return data def get_maintenance_log(log_list=None, file_format='csv'): """Generate maintenance log to csv file for download.""" if log_list is None: log_list = [] data = tablib.Dataset() data.headers = ('機台', '維修項目', '開始時間', '員工', '結束時間', '員工', '總計時間') for log in log_list: m_code = log['m_code'].replace('<br>', '\n') data.append((log['machine_id'], m_code, log['start_time'], log['who_start'], log['end_time'], log['who_end'], log['total_time'][0]) ) if file_format == 'csv': return data.csv return data def get_w_m_performance_report(file_format='xls'): """Generate excel file for download by worker and machine performance.""" row_number = 11 data = tablib.Dataset() data.append(['個人效率期間表 ({})'.format( datetime.now().strftime("%Y/%m/%d"))] + [''] * (row_number - 1)) data.append(['工號', '姓名', '日期', '標準量', '效率標準量', '實質生產量', '總稼動時間', '總停機時間', '稼動 %', '數量效率 %', '平均效率 %']) if file_format == 'xls': return data.xls return data def get_loss_rate_report(report_data, file_format='csv'): """Generate csv file for download by machine loss rate.""" data = tablib.Dataset() data.headers = ('機台', '機型', '良品數', '不良品數', '損耗率(%)', '損耗金額(RMB)', '損耗率排名') rank = 0 old_loss_rate = None for r in sorted(report_data, key=lambda k: k['loss_rate'], reverse=True): if old_loss_rate != r['loss_rate']: rank += 1 old_loss_rate = r['loss_rate'] record = [r['machine_id'], r['machine_type'], r['count_qty'], r['event_qty'], r['loss_rate'], r['total_loss_money'], rank] data.append(record) if file_format == 'csv': return data.csv return data def get_loss_rate_detail_report(report_data, file_format='csv'): """Generate csv file for download by machine loss rate detail.""" data = tablib.Dataset() data.headers = ('日期', '良品數', '不良品數', '損耗率(%)', '損耗金額(RMB)') for r in sorted(report_data, key=lambda k: k['record_date']): record = [r['record_date'], r['count_qty'], r['event_qty'], r['loss_rate'], r['total_loss_money']] data.append(record) if file_format == 'csv': return data.csv return data def get_uptime_report(report_data='', file_format='xls'): """Generate excel file for download by uptime information.""" data = tablib.Dataset() data.append_separator('製造部各工程稼動率一覽表') data.append(['月份:10', '星期', '', '', '', '', '', '目標', '', '', '', '']) data.append(['', '', '加締卷取(%)', '組立(%)', '老化(%)', 'CUTTING(%)', 'TAPPING(%)', '加締卷取', '組立', '老化', 'CUTTING', 'TAPPING']) if file_format == 'xls': return data.xls return data def get_work_order_report(report_data, file_format='csv'): """Generate csv file for download by work order.""" # data = tablib.Dataset() # data.headers = ('製令編號', '料號', '客戶', '產品規格', # '投入數', '應繳庫數', # '加締捲取', '組立', '老化', '選別', '加工切角') # for r in sorted(report_data, key=lambda k: k['order_no']): # try: # intput_count = int(r['input_count']) # except (TypeError, ValueError): # intput_count = -1 # record = [r['order_no'], r['part_no'], r['customer'], r['product'], # intput_count, math.floor(intput_count / 1.03), # r['step1_status'], r['step2_status'], r['step3_status'], # r['step4_status'], r['step5_status']] # data.append(record) # if file_format == 'csv': # return data.csv # return data output = io.BytesIO() if file_format == 'xls': workbook = Workbook(output, {'in_memory': True}) worksheet = workbook.add_worksheet() # merge_format = workbook.add_format({ # 'bold': 1, # 'border': 1, # 'align': 'center', # 'valign': 'vcenter'}) worksheet.merge_range('A1:A3', '製令編號') worksheet.merge_range('B1:B3', '料號') worksheet.merge_range('C1:C3', '客戶') worksheet.merge_range('D1:D3', '產品規格') worksheet.merge_range('E1:E3', '投入數') worksheet.merge_range('F1:F3', '應繳庫數') worksheet.write('G1', '加締捲取') worksheet.write('H1', '組立') worksheet.write('I1', '老化') worksheet.write('J1', '選別') worksheet.write('K1', '加工切角') for col_name in ('G', 'H', 'I', 'J', 'K'): worksheet.write(col_name + '2', '機器') worksheet.write(col_name + '3', '良品數') row = 4 for r in sorted(report_data, key=lambda k: k['order_no']): try: intput_count = int(r['input_count']) except (TypeError, ValueError): intput_count = -1 worksheet.merge_range('A{}:A{}'.format(row, row + 2), r['order_no']) worksheet.merge_range('B{}:B{}'.format(row, row + 2), r['part_no']) worksheet.merge_range('C{}:C{}'.format(row, row + 2), r['customer']) worksheet.merge_range('D{}:D{}'.format(row, row + 2), r['product']) worksheet.merge_range('E{}:E{}'.format(row, row + 2), intput_count) worksheet.merge_range('F{}:F{}'.format(row, row + 2), math.floor(intput_count / 1.03)) for process in range(1, 6): row_tag = chr(71 + process - 1) worksheet.write_string('{}{}'.format(row_tag, row), r['step{}_status'.format(process)]) machine = r['step{}_machine'.format(process)] count = r['step{}_count'.format(process)] worksheet.write_string('{}{}'.format(row_tag, row + 1), machine if machine else '') worksheet.write_string('{}{}'.format(row_tag, row + 2), str(count) if count
else '') row += 3 workbook.close() output.seek(0) return output.read() def get_order_report(report_data, file_format='csv'): """Generate csv file for download by machine loss rate detail.""" data = tablib.Dataset() data.headers = ('製令編號', '客戶', '規格', '投入數', '需求數', '加締捲曲', '組立', '老化', '選別', '加工切腳') for r in sorted(report_data, key=lambda k: k['order_no']): record = [r['order_no'], r['cu
stomer'], get_product_size(r['part_no']), r['input_count'], r['require_count'], r['step1_prod_qty'], r['step2_prod_qty'], r['step3_prod_qty'], r['step4_prod_qty'], r['step5_prod_qty']] data.append(record) if file_format == 'csv': return data.csv return data
egbertbouman/tribler-g
Tribler/Core/DecentralizedTracking/pymdht/core/ptime.py
Python
lgpl-2.1
124
0.008065
import sys import time sleep = time.sleep if s
ys.platform == 'win32': time = time.clock else: t
ime = time.time
publica-io/django-publica-modals
runtests.py
Python
bsd-3-clause
1,244
0.000804
import sys try: from django.conf impo
rt settings settings.configure( DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", } }, ROOT_URLCONF="modals.urls", INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", "modals", "templates", "images", "attrs", "tests", "menus"
], SITE_ID=1, NOSE_ARGS=['-s'], ) try: import django setup = django.setup except AttributeError: pass else: setup() from django_nose import NoseTestSuiteRunner except ImportError: import traceback traceback.print_exc() raise ImportError("To fix this error, run: pip install -r requirements-test.txt") def run_tests(*test_args): if not test_args: test_args = ['tests'] # Run tests test_runner = NoseTestSuiteRunner(verbosity=1) failures = test_runner.run_tests(test_args) if failures: sys.exit(failures) if __name__ == '__main__': run_tests(*sys.argv[1:])
aonotas/chainer
chainer/initializers/orthogonal.py
Python
mit
2,709
0
import numpy from chainer.backends import cuda from chainer import initializer # Original code forked from MIT licensed keras project # https://github.com/fchollet/keras/blob/master/keras/initializations.py class Orthogonal(initializer.Initializer): """Initializes array with an orthogonal system. This initializer first makes a matrix of the same shape as the array to be initialized whose elements are drawn independently from standard Gaussian distribution. Next, it applies Singular Value Decomposition (SVD) to the matrix. Then, it initializes the array with either side of resultant orthogonal matrices, depending on the shape of the input array. Finally, the array is multiplied by the constant ``scale``. If the ``ndim`` of the input array is more than 2, we consider the array to be a matrix by concatenating all axes except the first one. The number of vectors consisting of the orthogonal system (i.e. first element of the shape of the array) must be equal to or smaller than the dimension of each vector (i.e. second element of the shape of the array). Attributes: ~Orthogonal.scale (float): A constant to be multiplied by. ~Orthogonal.dtype: Data type specifier. Reference: Saxe et al., https://arxiv.org/abs/1312.6120 """ def __init__(self, scale=1.1, dtype=None): self.scale = scale super(Orthogonal, self).__init__(dtype) # TODO(Kenta Oono) # How do we treat overcomplete base-system case? def __call__(self, array): if self.dtype is not None: assert array.dtype == self.dtype xp = cuda.get_array_module(array) if not array.shape: # 0-dim case array[...] = self.scale elif not array.size: raise ValueError('Array to be initialized must be non-empty.') else: # numpy.prod returns float value when the argument is empty. flat_shape = (len(array), int(numpy.prod(array.shape[1:]))) if flat_shape[0] > flat_shape[1]: raise Value
Error('Cannot make orthogonal system because' ' # of vectors ({}) is larger than' ' that of dimensions ({})'.format( flat_shape[0], flat_sh
ape[1])) a = numpy.random.normal(size=flat_shape) # we do not have cupy.linalg.svd for now u, _, v = numpy.linalg.svd(a, full_matrices=False) # pick the one with the correct shape q = u if u.shape == flat_shape else v array[...] = xp.asarray(q.reshape(array.shape)) array *= self.scale
Jumpscale/jumpscale6_core
apps/watchdogmanager/alerttypes/critical.py
Python
bsd-2-clause
2,105
0.003325
from JumpScale import j import JumpScale.baselib.watchdog.manager import JumpScale.baselib.redis import JumpScale.lib.rogerthat descr = """ critical alert """ organization = "jumpscale" enable = True REDIS_PORT = 9999 # API_KEY = j.application.config.get('rogerthat.apikey') redis_client = j.clients.credis.getRedisClient('127.0.0.1', REDIS_PORT) # rogerthat_client = j.clients.rogerthat.get(API_KEY) # ANSWERS = [{'id': 'yes', 'caption': 'Take', 'action': '', 'type': 'button'},] # def _send_message(message, contacts, answers=ANSWERS, alert_flags=6): # result = rogerthat_client.send_message(message, contacts, answers=answers, alert_flags=alert_flags) # if result: # if result['error']: # j.logger.log('Could not send rogerthat message') # return # else: # message_id = result['result'] # return message_id def escalateL1(watchdogevent): if not j.tools.watchdog.manager.inAlert(watchdogevent):
watchdogevent.escalationstate = 'L1' # contact1 = redis_client.hget('contacts', '1') message = str(watchdogevent) # message_id = _send_message(message, [contact1,]) # watchdogevent.message_id = message_id j.tools.watc
hdog.manager.setAlert(watchdogevent) print "Escalate:%s"%message def escalateL2(watchdogevent): if watchdogevent.escalationstate == 'L1': watchdogevent.escalationstate = 'L2' contacts = redis_client.hgetall('contacts') message = str(watchdogevent) message_id = _send_message(message, [contacts['2'], contacts['3']]) watchdogevent.message_id = message_id j.tools.watchdog.manager.setAlert(watchdogevent) def escalateL3(watchdogevent): if watchdogevent.escalationstate == 'L2': watchdogevent.escalationstate = 'L3' contacts = redis_client.hgetall('contacts')['all'].split(',') message = str(watchdogevent) message_id = _send_message(message, contacts) watchdogevent.message_id = message_id j.tools.watchdog.manager.setAlert(watchdogevent)
zxl200406/minos
owl/machine/management/commands/import_xman.py
Python
apache-2.0
2,543
0.012977
import csv import logging import json import sys import urllib2 from django.conf import settings from django.core.management.base import BaseCommand from machine.models import Machine logger = logging.getLogger(__name__) XMAN_URL = "http://10.180.2.243/api/hostinfo.php?sql=hostname+=+'%s'" IDC_ABBR = { 'shangdi': 'sd', 'lugu': 'lg', 'lugu6': 'lg', 'haihang': 'hh', 'wucaicheng': 'dp', } class Command(BaseCommand): def handle(self, *args, **options): changes = [] for machine in Machine.objects.order_by('hostname'): hostname = machine.hostname url = XMAN_URL % hostname data = json.load(urllib2.urlopen(url)) xman = {} if data and type(data) is dict: k, v = data.popitem() if v and type(v) is dict: try: xman = { 'ip': v['ipaddr'], 'idc': IDC_ABBR[v['site'].lower()], 'rack'
: v['location'].lower(), } except Exception as e: print 'Error on host: %s' % hostname raise if not xman: # the machine doesn't exist in xm
an, delete it later. changes.append((machine, xman, )) else: # check if any field changed. # can't use iteritems as the dict might change. for k, v in xman.items(): if getattr(machine, k) == v: del xman[k] if xman: # some fields changed. changes.append((machine, xman, )) if not changes: print 'Nothing updated from xman, exiting.' else: print 'All changes from xman:' for machine, xman in changes: self.print_change(machine, xman) print print 'Confirm following changes...' answer = None for machine, xman in changes: self.print_change(machine, xman) while answer != 'a': answer = raw_input('Apply this or all following change[s]? ' '<y[es]/n[o]/a[ll]>: ') if answer in ['y', 'n', 'a']: break if answer == 'n': continue # apply change self.apply_change(machine, xman) def print_change(self, machine, xman): if not xman: action = 'host deleted' else: action = ', '.join(['%s: %s ==> %s' % (k, getattr(machine, k), v) for k, v in xman.iteritems()]) print '%s: %s' % (machine.hostname, action) def apply_change(self, machine, xman): if not xman: machine.delete() else: for k, v in xman.iteritems(): setattr(machine, k, v) machine.save()
Maethorin/concept2
migrations/versions/6d8e9e4138bf_.py
Python
mit
760
0.011842
"""empty message Revision ID: 6d8e9e4138bf Revises: 445667ce6268 Create Date: 2016-03-03 10:36:03.205829 """ # revision identifiers, used by Alembic. revision = '6d8e9e4138bf' down_revision = '445667ce6268' from alembic import op import app import sqlalchemy as sa def upgrade(): ### commands auto generated
by Alembic - please adjust! ### op.add_column('provas', sa.Column('data_inicio', sa.DateTime(), nullable=True)) op.add_column('provas', sa.Column('tempo_execucao', sa.Integer(), nullable=True)) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('provas', 'tempo_execucao') op.drop_column('provas', 'da
ta_inicio') ### end Alembic commands ###
Kent1/nxpy
nxpy/flow.py
Python
apache-2.0
4,735
0
import re from lxml import etree from nxpy.util import tag_pattern, whitespace_pattern class Flow(object): def __init__(self): self.routes = [] def export(self): flow = etree.Element('flow') if len(self.routes): for route in self.routes: flow.append(route.export()) return flow else: return False def build(self, node): for child in node: nodeName_ = tag_pattern.match(child.tag).groups()[-1] self.buildChildren(child, nodeName_) def buildChildren(self, child_, nodeName_, from_subclass=False): if nodeName_ == 'route': obj_ = Route() obj_.build(child_) self.routes.append(obj_) class Route(object): def __init__(self): self.name = '' self.operation = None self.match = { "destination": [], "source": [], "protocol": [], "port": [], "destination-port": [], "source-port": [], "icmp-code": [], "icmp-type": [], "tcp-flags": [], "packet-length": [], "dscp": [], "fragment": [] } ''' Match is a dict with list values example: self. match = { "destination": [<ip-prefix(es)>], "source": [<ip-prefix(es)>], "protocol": [<numeric-expression(s)>], "port": [<numeric-expression(s)>], "destination-port": [<numeric-expression(s)>] "source-port": [<numeric-expression(s)>], "icmp-code": [<numeric-expression(s)>], "icmp-type": [<numeric-expression(s)>], "tcp-flags": [<bitwise-expression(s)>], "packet-length": [<numeric-expression(s)>], "dscp": [<numeric-expression(s)>], "fragment": [ "dont-fragment" "not-a-fragment" "is-fragment" "first-fragment" "last-fragment" ] ''' self.then = { "accept": False, "discard": False, "community": False, "next-term": False, "rate-limit": False, "sample": False, "routing-instance": False } '''Then is a dict (have to see about this in the future: self.then = { "accept": True/False, "discard": True/False, "community": "<name>"/False, "next-term": True/False, "rate-limit": <rate>/False, "sample": True/False, "routing-instance": "<RouteTarget extended community>" } ''' def export(self): if self.operation: ro = etree.Element('route', {'operation': self.operation}) else: ro = etree.Element('route') if self.name: etree.SubElement(ro, "name").text = self.name match = etree.Element("match") for key in self.match: if self.match[key]: for value in self.match[key]: etree.SubElement(match, key).text = value if match.getchildren(): ro.append(match) then = etree.Element("then") for key in self.then: if self.then[key]: if self.then[key] is not True and self.then[key] is not False: etree.SubElement(then, key).text = self.then[key]
else: etree.SubElement(then, key) if then.getchildren(): ro.append(then) if ro.getchildren(): return ro else: return False def build(self, node): for child in node: nodeName_ = tag_pattern.match(child.tag).groups()[-1] self.buildChildren(child, nodeName_) def buildChi
ldren(self, child_, nodeName_, from_subclass=False): if nodeName_ == 'name': name_ = child_.text name_ = re.sub(whitespace_pattern, " ", name_).strip() self.name = name_ elif nodeName_ == 'match': for grandChild_ in child_: grandChildName_ = tag_pattern.match( grandChild_.tag).groups()[-1] grandChildText = grandChild_.text grandChildText = re.sub( whitespace_pattern, " ", grandChildText).strip() self.match[grandChildName_].append(grandChildText) elif nodeName_ == 'then': for grandChild_ in child_: grandChildName_ = tag_pattern.match( grandChild_.tag).groups()[-1] self.then[grandChildName_] = True
willprice/weboob
weboob/applications/radioob/radioob.py
Python
agpl-3.0
15,978
0.001189
# -*- coding: utf-8 -*- # Copyright(C) 2010-2012 Romain Bignon # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function import subprocess import os import re import requests from weboob.capabilities.radio import CapRadio, Radio from weboob.capabilities.audio import CapAudio, BaseAudio, Playlist, Album from weboob.capabilities.base import empty from weboob.tools.application.repl import ReplApplication, defaultcount from weboob.tools.application.media_player import InvalidMediaPlayer, MediaPlayer, MediaPlayerNotFound from weboob.tools.application.formatters.iformatter import PrettyFormatter __all__ = ['Radioob'] class RadioListFormatter(PrettyFormatter): MANDATORY_FIELDS = ('id', 'title') def get_title(self,
obj): return obj.title def get_description(self, obj): result = '' if hasattr(obj, 'description') and not empty(obj.description): result += '%-30s' % obj.description if hasattr(obj, 'current') and not empty(obj.current):
if obj.current.who: result += ' (Current: %s - %s)' % (obj.current.who, obj.current.what) else: result += ' (Current: %s)' % obj.current.what return result class SongListFormatter(PrettyFormatter): MANDATORY_FIELDS = ('id', 'title') def get_title(self, obj): result = obj.title if hasattr(obj, 'author') and not empty(obj.author): result += ' (%s)' % obj.author return result def get_description(self, obj): result = '' if hasattr(obj, 'description') and not empty(obj.description): result += '%-30s' % obj.description return result class AlbumTrackListInfoFormatter(PrettyFormatter): MANDATORY_FIELDS = ('id', 'title', 'tracks_list') def get_title(self, obj): result = obj.title if hasattr(obj, 'author') and not empty(obj.author): result += ' (%s)' % obj.author return result def get_description(self, obj): result = '' for song in obj.tracks_list: result += '- %s%-30s%s ' % (self.BOLD, song.title, self.NC) if hasattr(song, 'duration') and not empty(song.duration): result += '%-10s ' % song.duration else: result += '%-10s ' % ' ' result += '(%s)\r\n\t' % (song.id) return result class PlaylistTrackListInfoFormatter(PrettyFormatter): MANDATORY_FIELDS = ('id', 'title', 'tracks_list') def get_title(self, obj): return obj.title def get_description(self, obj): result = '' for song in obj.tracks_list: result += '- %s%-30s%s ' % (self.BOLD, song.title, self.NC) if hasattr(song, 'author') and not empty(song.author): result += '(%-15s) ' % song.author if hasattr(song, 'duration') and not empty(song.duration): result += '%-10s ' % song.duration else: result += '%-10s ' % ' ' result += '(%s)\r\n\t' % (song.id) return result class Radioob(ReplApplication): APPNAME = 'radioob' VERSION = '1.1' COPYRIGHT = 'Copyright(C) 2010-YEAR Romain Bignon\nCopyright(C) YEAR Pierre Maziere' DESCRIPTION = "Console application allowing to search for web radio stations, listen to them and get information " \ "like the current song." SHORT_DESCRIPTION = "search, show or listen to radio stations" CAPS = (CapRadio, CapAudio) EXTRA_FORMATTERS = {'radio_list': RadioListFormatter, 'song_list': SongListFormatter, 'album_tracks_list_info': AlbumTrackListInfoFormatter, 'playlist_tracks_list_info': PlaylistTrackListInfoFormatter, } COMMANDS_FORMATTERS = {'ls': 'radio_list', 'playlist': 'radio_list', } COLLECTION_OBJECTS = (Radio, BaseAudio, ) PLAYLIST = [] def __init__(self, *args, **kwargs): ReplApplication.__init__(self, *args, **kwargs) self.player = MediaPlayer(self.logger) def main(self, argv): self.load_config() return ReplApplication.main(self, argv) def complete_download(self, text, line, *ignored): args = line.split(' ') if len(args) == 2: return self._complete_object() elif len(args) >= 3: return self.path_completer(args[2]) def do_download(self, line): """ download ID [DIRECTORY] Download an audio file """ _id, dest = self.parse_command_args(line, 2, 1) obj = self.retrieve_obj(_id) if obj is None: print('No object matches with this id:', _id, file=self.stderr) return 3 if isinstance(obj, BaseAudio): streams = [obj] else: streams = obj.tracks_list if len(streams) == 0: print('Radio or Audio file not found:', _id, file=self.stderr) return 3 for stream in streams: self.download_file(stream, dest) def download_file(self, audio, dest): _obj = self.get_object(audio.id, 'get_audio', ['url', 'title']) if not _obj: print('Audio file not found: %s' % audio.id, file=self.stderr) return 3 if not _obj.url: print('Error: the direct URL is not available.', file=self.stderr) return 4 audio.url = _obj.url def check_exec(executable): with open('/dev/null', 'w') as devnull: process = subprocess.Popen(['which', executable], stdout=devnull) if process.wait() != 0: print('Please install "%s"' % executable, file=self.stderr) return False return True def audio_to_file(_audio): ext = _audio.ext if not ext: ext = 'audiofile' title = _audio.title if _audio.title else _audio.id return '%s.%s' % (re.sub('[?:/]', '-', title), ext) if dest is not None and os.path.isdir(dest): dest += '/%s' % audio_to_file(audio) if dest is None: dest = audio_to_file(audio) if audio.url.startswith('rtmp'): if not check_exec('rtmpdump'): return 1 args = ('rtmpdump', '-e', '-r', audio.url, '-o', dest) elif audio.url.startswith('mms'): if not check_exec('mimms'): return 1 args = ('mimms', '-r', audio.url, dest) else: if check_exec('wget'): args = ('wget', '-c', audio.url, '-O', dest) elif check_exec('curl'): args = ('curl', '-C', '-', audio.url, '-o', dest) else: return 1 os.spawnlp(os.P_WAIT, args[0], *args) def complete_play(self, text, line, *ignored): args = line.split(' ') if len(args) == 2: return self._complete_object() def do_play(self, line): """ play ID [stream_id] Play a radio or a audio file with a found player (optionnaly specify the wanted stream). """ _id, stream_id = self.parse_command_args(line, 2, 1) if not _id: print('This command takes an argument: %s' % self.get_command_help('play', short=True), file=self.stderr) return
claudep/pootle
tests/pootle_misc/templatetags.py
Python
gpl-3.0
1,836
0
# -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. import pytest from django.template import Context, Template from pootle.core.delegate import scores def _render_str(string, context=None): context = context or {} context = Context(context) return Template(string).render(context) def test_templatetag_progress_bar(): rendered
= _render_str("{% load common_tags %}{% progress_b
ar 0 0 0 %}") assert "<span class=\'value translated\'>0%</span>" in rendered assert '<span class=\'value fuzzy\'>0%</span>' in rendered assert '<span class=\'value untranslated\'>0%</span>' in rendered rendered = _render_str( "{% load common_tags %}{% progress_bar 123 23 73 %}") assert "<span class=\'value translated\'>59.3%</span>" in rendered assert "<span class=\'value fuzzy\'>18.7%</span>" in rendered assert "<span class=\'value untranslated\'>22.0%</span>" in rendered assert '<td class="translated" style="width: 59.3%">' in rendered assert '<td class="fuzzy" style="width: 18.7%">' in rendered assert '<td class="untranslated" style="width: 22.0%">' in rendered @pytest.mark.django_db def test_inclusion_tag_top_scorers(project_set, member): score_data = scores.get(project_set.__class__)(project_set) rendered = _render_str( "{% load common_tags %}{% top_scorers user score_data %}", context=dict( user=member, score_data=score_data.display())) top_scorer = list(score_data.display())[0] assert top_scorer["public_total_score"] in rendered assert top_scorer["user"].email_hash in rendered
PetukhovVictor/compiler
src/Parser/AST/statements/skip.py
Python
mit
152
0
from ..base import AST CLASS = "statements.skip" class
SkipStatement(AST): def __init__(self): super().__init__(CLASS, "skip_s
tatement")
BillTheBest/MadeiraAgent
bin/madeira.py
Python
bsd-3-clause
8,479
0.034202
#!/usr/bin/env python -u # -------------------------------------------------------------------------- # # Copyright 2011, MadeiraCloud (support@madeiracloud.com) # # -------------------------------------------------------------------------- # """MadeiraCloud Agent """ import os import sys import errno import signal import urllib import logging from optparse import Option, OptionParser from madeiracloud import Log from madeiracloud import Task from madeiracloud import Health from madeiracloud import Watcher from madeiracloud import RTimer __copyright__ = "Copyright 2011, MadeiraCloud (http://www.madeiracloud.com))" __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "MadeiraCloud" __author__ = "dev@madeiracloud.com" __email__ = "support@madeiracloud.com" __status__ = "Production" # ----------------------------------------------------- # Exception # ----------------------------------------------------- class MadeiraAgentException(Exception): """A simple exception class used for MadeiraAgent exceptions""" pass # ---------------------------------------------------------------------------------------------- # MadeiraAgent # ---------------------------------------------------------------------------------------------- class MadeiraAgent(object): log_level = 'INFO' log_dir = '/var/log/madeiracloud/' log_file = log_dir + 'madeiracloud.log' log_rotate = 3 log_size = 10000000 pidfile = '/var/lock/subsys/madeiracloud' endpoint_task = 'https://api.madeiracloud.com/agent/task/' endpoint_health = 'https://api.madeiracloud.com/agent/health/' interval_task = 6 interval_health = 60 url_metadata = 'http://169.254.169.254/latest/meta-data' url_userdata = 'http://169.254.169.254/latest/user-data' def __init__(self, daemon=True, no_task=False, no_health=False): """ Initializes MadeiraAgent. """ self.__daemon = daemon self.__no_task = no_task self.__no_health= no_health self.__timer = [] self.__metadata = {} self.__userdata = None self.__distro = None # Log, Daemonize and Signal self._log() if daemon: self._daemonize() signal.signal(signal.SIGTERM, self._signal) signal.signal(signal.SIGINT, self._signal) signal.signal(signal.SIGQUIT, self._signal) signal.signal(signal.SIGHUP, self._signal) signal.signal(signal.SIGCHLD, self._signal) signal.signal(signal.SIGUSR2, self._signal) try: # metadata self.__userdata['instance-id'] = urllib.urlopen("%s/instance-id" % self.url_metadata).read() if self.__userdata['instance-id'].find("404"): raise MadeiraAgentException("Failed to retreive instance instance-id from metadata: %s" % e) # userdata self.__userdata = urllib.urlopen(self.url_userdata).read() # distro f = open('/etc/issue') self.__distro = f.readlines()[0].split(' ')[0].lower() f.close() except Exception, e: logging.fatal("Failed to initialize MadeiraAgent: %s" % e) raise MadeiraAgentException def _log(self): # setup LOG try: level = logging.getLevelName(self.log_level) logging.getLogger().setLevel(level) logger = logging.getLogger() if self.__daemon: # Add the log message handler to the logger if not os.path.exists(MadieraAgent.log_dir): os.makedirs(self.log_dir, 0755) fh = logger.handlers.RotatingFileHandler( filename = self.log_file, maxBytes = self.log_size, backupCount = self.log_rotate ) formatter = Log.LogFormatter(console=False) else: # Set up color if we are in a tty and curses is installed fh = logging.StreamHandler() formatter = Log.LogFormatter(console=True) fh.setFormatter(formatter) logger.addHandler(fh) except OSError, msg: raise MadeiraAgentException def _signal(self, sig, frame): if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT): # exit logging.info('caught signal %s' % sig) self.exit() elif sig == signal.SIGHUP: # reload logging.info('caught signal %s' % sig) self.reload() elif sig == signal.SIGCHLD: # TODO: pass logging.debug('caught signal %s' % sig) elif sig == signal.SIGUSR2: # TODO: pass logging.debug('caught signal %s' % sig) else: logging.warning('caught signal %s' % sig) def _daemonize(self): try: # First fork try: pid = os.fork() if pid > 0: # Exit first
parent sys.exit(0) except OSError, e: logging.error("Cannot run MadeiraAgent in daemon mode: (%d) %s\n" % (e.errno, e.strerror)) raise
MadeiraAgentException # Decouple from parent environment. os.chdir(".") os.umask(0) os.setsid() # Second fork try: pid = os.fork() if pid > 0: # Exit second parent. sys.exit(0) except OSError, e: logging.error("Cannot run MadeiraAgent in daemon mode: (%d) %s\n" % (e.errno, e.strerror)) raise MadeiraAgentException # Open file descriptors and print start message si = file('/dev/null', 'r') so = file('/dev/null', 'a+') se = file('/dev/null', 'a+', 0) pid = os.getpid() sys.stderr.write("\nStarted MadeiraAgent with pid %i\n\n" % pid) sys.stderr.flush() if not os.path.exists(os.path.dirname(self.pidfile)): os.mkdir(os.path.dirname(self.pidfile)) file(self.pidfile,'w+').write("%i\n" % pid) # Redirect standard file descriptors. os.close(sys.stdin.fileno()) os.close(sys.stdout.fileno()) os.close(sys.stderr.fileno()) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) except OSError, e: logging.error("Cannot run MadeiraAgent as daemon: %s" % e) raise MadeiraAgentException def run(self): logging.info("------------------------- Starting MadeiraAgent -------------------------") logging.info("Log Level: %s" % self.log_level) if self.__daemon: logging.info("Log File: %s" % self.log_file) logging.info("Log Size: %s" % self.log_size) logging.info("Log Rotate: %s" % self.log_rotate) try: logging.info("Endpoint - Task: %s" % self.endpoint_task) logging.info("Endpoint - Health: %s" % self.endpoint_health) logging.info("Interval - Task: %d seconds" % self.interval_task) logging.info("Interval - Health: %d seconds" % self.interval_health) # task & health if not self.__no_task: self.__timer.append(RTimer.RTimer(self.interval_task, Task.run, args=[self.endpoint_task, self.__metadata['instance-id'], self.__distro])) if not self.__no_health: self.__timer.append(RTimer.RTimer(self.interval_health, Health.run, args=[self.endpoint_task, self.__metadata, self.__distro])) for t in self.__timer: t.run() # monitor forever self._monitor() except Exception, e: logging.error(e) raise MadeiraAgentException def reload(self): # TODO pass def exit(self): for t in self.__timer: t.cancel() logging.info("------------------------- MadeiraAgent is stopped -----------------------") exit() def _monitor(self): Watcher.run() ####################### main() ######################### if __name__ == "__main__": # Check if a daemon is already running pidfile = '/var/lock/subsys/madeiracloud' if os.path.exists(pidfile): pf = file(pidfile,'r') pid = int(pf.read().strip()) pf.close() try: os.kill(pid, signal.SIG_DFL) except OSError, (err, msg): if err == errno.ESRCH: # Pidfile is stale. Remove it. os.remove(pidfile) else: msg = "Unexpected error when checking pid file '%s'.\n%s\n" %(pidfile, msg) sys.stderr.write(msg) sys.exit(1) else: msg = "MadeiraAgent is already running (pid %i)\n" % pid sys.stderr.write(msg) sys.exit(1) # options usage = "[-h] [-f] [-t] [-l]" optparser = OptionParser(usage=usage) optparser.add_option(Option("-f", "--fg", action="store_true", dest="foreground", help = "Runs in the foreground. Default is background")) optparser.add_option(Option("-t", "--no-task", action="store_true", dest="no_task", help = "If True, the agent will not try to retrieve any task")) optparser.add_option(Option("-l", "--no-health", action="store_true", dest="no_health", help = "
MozillaSecurity/FuzzManager
server/crashmanager/tests/test_bugproviders_rest.py
Python
mpl-2.0
3,764
0.001063
# coding: utf-8 from __future__ import unicode_literals import logging import pytest import requests LOG = logging.getLogger("fm.crashmanager.tests.bugproviders.rest") @pytest.mark.parametrize("method", ["delete", "get", "patch", "post", "put"]) def test_rest_bugproviders_no_auth(db, api_client, method): """must yield unauthorized without authentication""" assert getattr(api_client, method)( "/crashmanager/rest/bugproviders/", {} ).status_code == requests.codes['unauthorized'] @pytest.mark.parametrize("method", ["delete", "get", "patch", "post", "put"]) def test_rest_bugproviders_no_perm(user_noperm, api_client, method): """must yield forbidden without permission""" assert getattr(api_client, method)( "/crashmanager/rest/bugproviders/", {} ).status_code == requests.codes['forbidden'] @pytest.mark.parametrize("method, url, user", [ ("delete", "/crashmanager/rest/bugproviders/", "normal"), ("delete", "/crashmanager/rest/bugproviders/", "restricted"), ("patch", "/crashmanager/rest/bugproviders/", "normal"), ("patch", "/crashmanager/rest/bugproviders/", "restricted"), ("post", "/crashmanager/rest/bugproviders/", "normal"), ("post", "/crashmanager/rest/bugproviders/", "restricted"), ("put", "/crashmanager/rest/bugproviders/", "normal"), ("put", "/crashmanager/rest/bugproviders/", "restricted"), ], indirect=["user"]) def test_rest_bugproviders_methods(api_client, user, method, url): """must yield method-not-allowed for unsupported methods""" assert getattr(api_client, method)(url, {}).status_code == requests.codes['method_not_allowed'] @pytest.mark.parametrize("method, url, user", [ ("get", "/crashmanager/rest/bugproviders/1/", "normal"), ("get", "/crashmanager/rest/bugproviders/1/", "restricted"), ("delete", "/crashmanager/rest/bugproviders/1/", "normal"), ("delete", "/crashmanager/rest/bugproviders/1/", "restricted"), ("patch", "/crashmanager/rest/bugproviders/1/", "normal"), ("patch", "/crashmanager/rest/bugproviders/1/", "restricted"), ("post", "/crashmanager/rest/bugproviders/1/", "normal"), ("post", "/crashmanager/rest/bugproviders/1/", "restricted"), ("put", "/crashmanager/rest/bugproviders/1/", "normal"),
("put", "/crashmanager/rest/bugproviders/1/", "restricted"), ], indirect=["user"]) def test_rest_bugproviders_methods_not_found(api_client, user, method, url): """must yield not-found for undeclared methods""" assert get
attr(api_client, method)(url, {}).status_code == requests.codes['not_found'] def _compare_rest_result_to_bugprovider(result, provider): expected_fields = {"id", "classname", "hostname", "urlTemplate"} assert set(result) == expected_fields for key, value in result.items(): assert value == getattr(provider, key) @pytest.mark.parametrize("user", ["normal", "restricted"], indirect=True) def test_rest_bugproviders_list(api_client, user, cm): """test that list returns the right bug providers""" expected = 4 providers = [cm.create_bugprovider(hostname="test-provider%d.com" % (i + 1), urlTemplate="test-provider%d.com/template" % (i + 1)) for i in range(expected)] resp = api_client.get("/crashmanager/rest/bugproviders/") LOG.debug(resp) assert resp.status_code == requests.codes['ok'] resp = resp.json() assert set(resp) == {'count', 'next', 'previous', 'results'} assert resp['count'] == expected assert resp['next'] is None assert resp['previous'] is None assert len(resp['results']) == expected for result, provider in zip(resp['results'], providers[:expected]): _compare_rest_result_to_bugprovider(result, provider)
slovelan/NRAODev
carta/html5/common/skel/source/class/skel/simulation/tLoadImage.py
Python
gpl-2.0
5,677
0.01603
import Util import time import unittest import selectBrowser from selenium import webdriver from flaky import flaky from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By # Test loading images. from flaky import flaky class tLoadImage(unittest.TestCase): def setUp(self): browser = selectBrowser._getBrowser() Util.setUp(self, browser) # Test that an image can be loaded and then closed. def test_load_image(self): driver = self.driver timeout = selectBrowser._getSleep() # Load a specific image. imageWindow = Util.load_image(self, driver, "Default") time.sleep( timeout ) # Click on the Data->Close->Image button to close the image. ActionChains(driver).double_click( imageWindow ).perform() dataButton = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//div[text()='Data']/.."))) ActionChains(driver).click( dataButton ).send_keys(Keys.ARROW_DOWN).send_keys(Keys.ARROW_DOWN).send_keys( Keys.ARROW_RIGHT).send_keys(Keys.ENTER).perform() time.sleep( timeout ) # Test was written in response to Issue 178. Loading a particular image produced # a crash. def test_load_image178(self): driver = self.driver timeout = selectBrowser._getSleep() # Load a specific image. imageWindow = Util.load_image(self, driver, "SI1.fits") time.sleep( timeout ) # Make sure we have not crashed by closing the image ActionChains(driver).double_click( imageWindow ).perform() dataButton = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//div[text()='Data']/.."))) ActionChains(driver).click( dataButton ).send_keys(Keys.ARROW_DOWN).send_keys(Keys.ARROW_DOWN).send_keys( Keys.ARROW_RIGHT).send_keys(Keys.ENTER).perform() time.sleep( timeout ) # Test was written in response to Issue 152. Loading an image and then pressing the # 100% clip button produced and error rather than changing the clip. def test_clip100(self): driver = self.driver timeout = selectBrowser._getSleep() # Load a specific image. imageWindow = Util.load_image(self, driver, "Default") time.sleep( timeout ) # Store the minimum clip value minClipText = driver.find_element_by_xpath("//div[@id='clipMinIntensity']/input") driver.execute_script( "arguments[0].scrollIntoView(true);", minClipText ) minClip = minClipText.get_attribute( "value") print "Min intensity", minClip # Press the 100% clip button ActionChains(driver).double_click( imageWindow ).perform() clippingButton = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//div[text()='Clipping']/.."))) ActionChains(driver).click( clippingButton ).send_keys(Keys.ARROW_RIGHT ).send_keys(Keys.ARROW_DOWN).send_keys(Keys.ARROW_DOWN ).send_keys(Keys.ARROW_DOWN).send_keys(Keys.ARROW_DOWN ).send_keys(Keys.ARROW_DOWN).send_keys(Keys.ARROW_DOWN ).send_keys(Keys.ENTER).perform() time.sleep( timeout ) # Make sure the minimum clip value goes down. newMinClip = minClipText.get_attribute( "value") print "New min intensity", newMinClip self.assertTrue( float(newMinClip) < float(minClip), "Minimum clip value did not go down") # Test that we can load a large number of images, one after another def test_load_images(self): driver = self.driver timeout = selectBrowser._getSleep() # Load a specific image. imageWindow = Util.load_image(self, driver, "aH.fits") time.sleep( timeout ) Util.load_image( self, driver, "aJ.fits") time.sleep( timeout ) Util.load_image( self, driver, "N15693D.fits") time.sleep( timeout ) Util.load_image( self, driver, "Orion.cont.image.fits") time.sleep( timeout ) Util.load_image( self, driver, "Orion.methanol.cbc.contsub.image.fits") time.sleep( timeout ) Util.load_image( self, driver, "TWHydra_CO2_1line.image.fits") time.sleep( timeout ) Util.load_image( self, driver, "br1202_wide.image") time.sleep( timeout ) Util.load_image( self, driver, "TWHydra_CO3_2line.image") time.sleep( timeout ) Util.load_image( self, driver, "TWHydra_cont1.3mm.image") time.sleep( timeout ) Util.load_image( self, driver, "v2.0_ds2_l000_13pca_map20.fits") time.sleep( timeout ) #Find the image animator and verify that there are 9 images loaded upperBoundText = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "//div[@id='ImageUpperBoundSpin']/input"))) driver.execute_script( "arguments[0].scrollIntoView(true);", upperBoundText) imageCount = upperBoundText.get_attribute("value") print "Image Count: ", imageCount self.assertEqual( imageCount, str(9), "Wrong number of images were loaded") def tearDown(self): # Close the browser self.driver.close() # Allow browser to fully close before continuing time.sleep(2) # Close the session and del
ete temporary files self.driver.quit() if __name__
== "__main__": unittest.main()
KaranToor/MA450
google-cloud-sdk/lib/surface/source/__init__.py
Python
apache-2.0
1,960
0.00102
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main command group for cloud source command group.""" from go
oglecloudsdk.api_lib.source import source from googlecloudsdk.api_lib.sourcerepo import sourcerepo from googlecloudsdk.calliope import b
ase from googlecloudsdk.core import properties from googlecloudsdk.core import resolvers from googlecloudsdk.core import resources from googlecloudsdk.core.credentials import store as c_store @base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA, base.ReleaseTrack.GA) class Source(base.Group): """Cloud git repository commands.""" def Filter(self, context, args): """Initialize context for source commands. Args: context: The current context. args: The argparse namespace that was specified on the CLI or API. Returns: The updated context. """ resources.REGISTRY.SetParamDefault( api='source', collection=None, param='projectId', resolver=resolvers.FromProperty(properties.VALUES.core.project)) resources.REGISTRY.SetParamDefault( api='sourcerepo', collection=None, param='projectId', resolver=resolvers.FromProperty(properties.VALUES.core.project)) source.Source.SetResourceParser(resources.REGISTRY) source.Source.SetApiEndpoint() sourcerepo.Source.SetResourceParser(resources.REGISTRY) sourcerepo.Source.SetApiEndpoint()
vlas-sokolov/pyspeckit
pyspeckit/spectrum/headers.py
Python
mit
1,312
0.005335
from __future__ import print_function try: import astropy.io.fits as pyfits except ImportError: import pyfits def intersection(header1, header2, if_conflict=None): """ Return a pyfits Header containing the inter
section of two pyfits Headers *if_conflict* [ '1'/1/'Header1' | '2'/2/'Header2' | None ] Defines behavior if a keyword conflict is found. Default is to remove the key """ newheader = pyfits.Header() for key,value in header1.items(): if key in header2: try: if value == header2[key]: newheader[key] = value
elif if_conflict in ('1',1,'Header1'): newheader[key] = value elif if_conflict in ('2',2,'Header2'): newheader[key] = Header2[key] except KeyError: """ Assume pyfits doesn't want you to have that keyword (because it shouldn't be possible to get here otherwise) """ pass else: try: newheader[key] = value except KeyError: """ Assume pyfits doesn't want you to have that keyword (because it shouldn't be possible to get here otherwise) """ pass return newheader
jpzk/evopy
evopy/examples/problems/SchwefelsProblem26/ORIDSESAlignedSVC.py
Python
gpl-3.0
2,196
0.023679
''' This file is part of evopy. Copyright 2012, Jendrik Poloczek evopy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. evopy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with evopy. If not, see <http://www.gnu.org/licenses/>. ''' from sys import path path.append("../../../..") from numpy import matrix from sklearn.cross_validation import KFold from evopy.strategies.ori_dses_aligned_svc import
ORIDSESAlignedSVC from evopy.problems.tr_problem import TRProblem from evopy.problems.schwefels_problem_26 import SchwefelsProblem26 from evopy.simulators.simulator import Simulator from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear from evopy.operators.termination.accuracy import Accuracy def ge
t_method(): sklearn_cv = SVCCVSkGridLinear(\ C_range = [2 ** i for i in range(-1, 14, 2)], cv_method = KFold(20, 5)) meta_model = DSESSVCLinearMetaModel(\ window_size = 10, scaling = ScalingStandardscore(), crossvalidation = sklearn_cv, repair_mode = 'mirror') method = ORIDSESAlignedSVC(\ mu = 15, lambd = 100, theta = 0.3, pi = 70, initial_sigma = matrix([[4.5, 4.5]]), delta = 4.5, tau0 = 0.5, tau1 = 0.6, initial_pos = matrix([[10.0, 10.0]]), beta = 1.0, meta_model = meta_model) return method if __name__ == "__main__": optimizer = get_method() problem = SchwefelsProblem26() optfit = problem.optimum_fitness() sim = Simulator(optimizer, problem, Accuracy(optfit, 10**(-6))) results = sim.simulate()
Azure/azure-sdk-for-python
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_01/aio/operations/_app_service_plans_operations.py
Python
mit
83,276
0.004731
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Gene
rator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, AsyncIterable, Ca
llable, Dict, Generic, List, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models from ..._vendor import _convert_request from ...operations._app_service_plans_operations import build_create_or_update_request_initial, build_create_or_update_vnet_route_request, build_delete_hybrid_connection_request, build_delete_request, build_delete_vnet_route_request, build_get_hybrid_connection_plan_limit_request, build_get_hybrid_connection_request, build_get_request, build_get_route_for_vnet_request, build_get_server_farm_skus_request, build_get_vnet_from_server_farm_request, build_get_vnet_gateway_request, build_list_by_resource_group_request, build_list_capabilities_request, build_list_hybrid_connection_keys_request, build_list_hybrid_connections_request, build_list_request, build_list_routes_for_vnet_request, build_list_usages_request, build_list_vnets_request, build_list_web_apps_by_hybrid_connection_request, build_list_web_apps_request, build_reboot_worker_request, build_restart_web_apps_request, build_update_request, build_update_vnet_gateway_request, build_update_vnet_route_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class AppServicePlansOperations: """AppServicePlansOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.web.v2021_01_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list( self, detailed: Optional[bool] = None, **kwargs: Any ) -> AsyncIterable["_models.AppServicePlanCollection"]: """Get all App Service plans for a subscription. Description for Get all App Service plans for a subscription. :param detailed: Specify :code:`<code>true</code>` to return all App Service plan properties. The default is :code:`<code>false</code>`, which returns a subset of the properties. Retrieval of all properties may increase the API latency. :type detailed: bool :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AppServicePlanCollection or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_01_01.models.AppServicePlanCollection] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlanCollection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, detailed=detailed, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( subscription_id=self._config.subscription_id, detailed=detailed, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("AppServicePlanCollection", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/serverfarms'} # type: ignore @distributed_trace def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.AppServicePlanCollection"]: """Get all App Service plans in a resource group. Description for Get all App Service plans in a resource group. :param resource_group_name: Name of the resource group to which the resource belongs. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AppServicePlanCollection or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_01_01.models.AppServicePlanCollection] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlanCollection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_resource_group_request( resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, template_url=self.list_by_resource_group.metadata['url'], ) request = _convert_request(request)
renatahodovan/fuzzinator
fuzzinator/ui/tui/table.py
Python
bsd-3-clause
18,857
0.000955
# Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss. # # Licensed under the BSD 3-Clause License # <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>. # This file may not be copied, modified, or distributed except # according to those terms. from functools import cmp_to_key from urwid import * from .decor_widgets import PatternBox from .graphics import fz_box_pattern class TableRowsListWalker(ListWalker): def __init__(self, table, sort=None): self.table = table self.sort = sort self.focus = 0 self.rows = [] super().__init__() def __getitem__(self, position): if position < 0 or position >= len(self.rows): raise IndexError return self.rows[position] def __delitem__(self, index): if -1 < index < len(self.rows): del self.rows[index] self._modified() def __len__(self): return len(self.rows) def add(self, item): self.rows.append(item) self._modified() def insert(self, *args): self.rows.insert(*args) self._modified() def clear(self): self.focus = 0 del self.rows[:] def remove(self, value): self.rows.remove(value) def next_position(self, position): index = position + 1 if position >= len(self.rows): raise IndexError return index def prev_position(self, position): index = position - 1 if position < 0: raise IndexError return index def set_focus(self, position): self.rows[self.focus].unhighlight() self.focus = position self.rows[self.focus].highlight() def set_sort_column(self, column, **kwargs): self._modified() # It contains two columns: the content of the rows and the scrollbar (at least the original version). class ScrollingListBox(WidgetWrap): signals = ['select', 'load_more'] def __init__(self, body, infinite=False): self.infinite = infinite self.requery = False self.height = 0 self.listbox = ListBox(body)
self.body = self.listbox.body self.ends_visible = self.listbox.ends_visible super().__init__(self.listbox) def keypress(self, size, key): if key == 'home': if self.body: # len(self.body) != 0 self.focus_position = 0 self._invalidate() return key if key == 'end': if self.body: # len(self.body) != 0 sel
f.focus_position = len(self.body) - 1 self._invalidate() return key if key in ['page down', 'down'] and self.infinite and self.focus_position == len(self.body) - 1: self.requery = True self._invalidate() return None if key == 'enter': if self.body: # len(self.body) != 0 emit_signal(self, 'select', self, self.selection) return None if key == 'left': return None return super().keypress(size, key) def render(self, size, focus=False): maxcol, maxrow = size if self.requery and 'bottom' in self.ends_visible((maxcol, maxrow)): self.requery = False emit_signal(self, 'load_more', len(self.body)) self.height = maxrow return super().render((maxcol, maxrow), focus) @property def focus(self): return self.listbox.focus @property def focus_position(self): if self.listbox.body: # len(self.listbox.body) != 0 return self.listbox.focus_position return 0 @focus_position.setter def focus_position(self, value): self.listbox.focus_position = value self.listbox._invalidate() @property def row_count(self): return len(self.listbox.body) @property def selection(self): if self.body: # len(self.body) != 0 return self.body[self.focus_position] return None class TableColumn(object): align = 'left' wrap = 'space' padding = None def __init__(self, name, label=None, width=('weight', 1), format_fn=None, sort_key=None, sort_fn=None, sort_reverse=False): self.name = name self.label = label if label else name self.format_fn = format_fn self.sort_key = sort_key self.sort_fn = sort_fn self.sort_reverse = sort_reverse self.sizing, self.width = width def _format(self, v): if isinstance(v, str): return Text(v, align=self.align, wrap=self.wrap) # First, call the format function for the column, if there is one if self.format_fn: try: v = self.format_fn(v) except TypeError: return Text('', align=self.align, wrap=self.wrap) return self.format(v) def format(self, v): # Do our best to make the value into something presentable if v is None: v = '' elif isinstance(v, int): v = '%d' % v elif isinstance(v, float): v = '%.03f' % v # If v doesn't match any of the previous options than it might be a Widget. if not isinstance(v, Widget): return Text(v, align=self.align, wrap=self.wrap) return v class HeaderColumns(Columns): def __init__(self, contents): self.selected_column = None super().__init__(contents) def __setitem__(self, i, v): self.contents[i * 2] = (v, self.contents[i * 2][1]) class BodyColumns(Columns): def __init__(self, contents, header=None): self.header = header super().__init__(contents) @property def selected_column(self): return self.header.selected_column @selected_column.setter def selected_column(self, value): self.header.selected_column = value class TableCell(WidgetWrap): signals = ['click', 'select'] def __init__(self, table, column, row, value): self.table = table self.column = column self.row = row self.value = value self.contents = self.column._format(self.value) padding = self.column.padding or self.table.padding self.padding = Padding(self.contents, left=padding, right=padding) self.attr = AttrMap(self.padding, attr_map=row.attr_map, focus_map=row.focus_map) super().__init__(self.attr) def selectable(self): return isinstance(self.row, TableBodyRow) def highlight(self): self.attr.set_attr_map(self.row.focus_map) def unhighlight(self): self.attr.set_attr_map(self.row.attr_map) def set_attr_map(self, attr_map): self.attr.set_attr_map(attr_map) def set_focus_map(self, focus_map): self.attr.set_focus_map(focus_map) def keypress(self, size, key): if key == 'enter': emit_signal(self, 'select') return key # Override the mouse_event method (param list is fixed). def mouse_event(self, size, event, button, col, row, focus): if event == 'mouse press': emit_signal(self, 'click') class TableRow(WidgetWrap): attr_map = {} focus_map = {} border_char = ' ' column_class = Columns # To be redefined by subclasses. decorate = True _selectable = True def __init__(self, table, data, header=None, cell_click=None, cell_select=None, attr_map=None, focus_map=None): self.table = table if isinstance(data, (list, tuple)): self.data = dict(zip([c.name for c in self.table.columns], data)) elif isinstance(data, dict): self.data = data self.header = header self.cell_click = cell_click self.cell_select = cell_select self.contents = [] if self.decorate: if attr_map: self.attr_map = attr_map elif table.attr_map: self.attr_map.update(table.attr_map) if focus_map: self.fo
naresh21/synergetics-edx-platform
lms/djangoapps/branding/tests/test_models.py
Python
agpl-3.0
1,936
0.001033
""" Tests for the Video Branding configuration. """ from django.test import TestCase from django.core.exceptions import ValidationError from nose.plugins.attrib import attr from branding.models import BrandingInfoConfig @attr(shard=1) class BrandingInfoConfigTest(TestCase): """ Test the BrandingInfoConfig model. """ def setUp(self): super(BrandingInfoConfigTest, self).setUp() self.configuration_string = """{ "CN": { "url": "http://www.xuetangx.com", "logo_src": "http://www.xuetangx.com/static/images/logo.png", "logo_tag": "Video hosted by XuetangX.com" } }""" self.config = BrandingInfoConfig(configuration=self.configuration_string) def test_create(self): """ Tests creation of configuration. """ self.config.save() self.assertEquals(self.config.configuration, self.configuration_string) def test_clean_bad_json(self): """ Tests if bad Json string was given. """ self.config = BrandingInfoConfig(configuration='{"bad":"test"') self.assertRaises(ValidationError, self.config.clean) def test_get(self):
""" Tests get configuration from saved string. """ self.config.enabled = True self.config.save() expected_config = { "CN": { "url": "http://www.xuetangx.com", "logo_src": "http://www.xuetangx.com/static/images/logo.png", "logo_tag": "Video hosted by XuetangX.com" } } self.assertEquals(self.config.get_config(), expected_config) def
test_get_not_enabled(self): """ Tests get configuration that is not enabled. """ self.config.enabled = False self.config.save() self.assertEquals(self.config.get_config(), {})
UTC-Coding/Benji-s-Python
Rob/Odd or Even.py
Python
gpl-3.0
238
0.037815
odd = [1,3,5,7,9] even
= [2,4,6,8,10] answer = input("Please enter your number! \n") answer = int(answer) if answer in odd: print("That is quite odd!") elif answer in even: print("That's even") else: print("You are broken
!")
wqferr/AniMathors
01.py
Python
mit
2,630
0.004563
import numpy as np import colorsys from numpy import sin, cos, tan import core from core.anim import Animation from core.obj import Point, Line, Vector, Curve def update_p1(p1, t, tmax): p1.x = np.cos(t) def update_p2(p2, t, tmax): p2.y = np.sin(t/3) def update_p3(p3, t, tmax): p3.pos = (p1.x, p2.y) c = colorsys.rgb_to_hsv(*p3.color) c = ((c[0]+anim.dt/(2*np.pi)) % 1, c[1], c[2]) p3.color = colorsys.hsv_to_rgb(*c) def update_line(l, t, tmax): l.p1 = (np.co
s(t)/2, np.sin(t)/2) l.p2 = (-np.cos(t)/2, -np.sin(t)/2) def update_v(v, t, tmax): r2 = np.sqrt(2)/4 c = r2 * cos(2*t) s = r2/3 * sin(2*t) v.hx = s - c v.hy = c + s def update_c(c, t, tmax): c.set_params( tmin=min(v.hx, p3.x), tmax=max(v.hx, p3.x) ) def update_seg1(s, t, tmax): s.p1 = c.p1 s.p2 = (c.p1[0], 0) def update_seg2(s, t, tmax): s.p
1 = c.p2 s.p2 = (c.p2[0], 0) def update_seg3(s, t, tmax): s.p1 = seg1.p2 s.p2 = seg2.p2 def update_circumf(c, t, tmax): c.set_params( tmin=c.tmin+anim.dt, tmax=c.tmax+anim.dt ) col = colorsys.rgb_to_hsv(*c.color) col = ((col[0]+anim.dt/(2*np.pi)) % 1, col[1], col[2]) c.color = colorsys.hsv_to_rgb(*col) def init(anim): global p1, p2, p3, v, c, seg1, seg2 p1 = anim.create( Point, 0, 0, color='g', size=10, update=update_p1 ) p2 = anim.create( Point, 0, 0, color='b', size=10, update=update_p2 ) p3 = anim.create( Point, 0, 0, color='r', size=7, update=update_p3 ) anim.create( Line, 0, 0, 0, 0, color='r', update=update_line ) v = anim.create( Vector, -.05, -.25, color='b', update=update_v ) c = anim.create( Curve, lambda t: (t, sin(np.pi*t)), -1, 1, color='w', update=update_c ) seg1 = anim.create( Line, 0, 0, 0, 0, color='w', lw=1, update=update_seg1 ) seg2 = anim.create( Line, 0, 0, 0, 0, color='w', lw=1, update=update_seg2 ) anim.create( Line, 0, 0, 0, 0, color='w', lw=1, update=update_seg3 ) anim.create( Curve, lambda t: (cos(t), sin(t)), 0, np.pi/3, color='g', update=update_circumf ) if __name__ == '__main__': global anim anim = Animation(dt=0.01, length=6*np.pi, init_func=init, repeat=True) anim.play()
caio1982/capomastro
jenkins/management/commands/import_jenkinsserver.py
Python
mit
960
0
from __future__ import unicode_literals from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.db import transaction from jenkins.management.helpers import import_jenkinsserver # TODO: implement optional field updating... class Command(BaseCommand): help = "Import or update a JenkinsServer" args = "[name] [url] [username] [password]" optio
n_list = BaseCommand.option_list + ( make_option( "--update", action="store_true", dest="update", default=False, help="Update if server already exists."), ) def handle(self, *args, **options):
if len(args) != 4: raise CommandError("must provide all parameters") name, url, username, password = args import_jenkinsserver( name, url, username, password, update=options["update"], stdout=self.stdout) transaction.commit_unless_managed()
windskyer/weibo
weibo/test/sleep.py
Python
apache-2.0
300
0.003333
#!/usr/bin/env pytho
n # coding: utf-8 # Copyright (c) 2013 # Gmail:liuzheng712 # import time def task(): print "task ..." def timer(n): while True: print time.strftime('%Y-%m-%d %X', time.localtime()) task() time.sleep(n) if __name__ == '__main_
_': timer(5)
chop-dbhi/ehb-service
ehb_service/apps/core/encryption/encryptionFields.py
Python
bsd-2-clause
8,789
0.003186
import datetime import re import binascii import random import string import logging from django import forms from django.db import models from django.forms import fields from django.utils.encoding import force_text, smart_bytes import sys from core.encryption.Factories import FactoryEncryptionServices as efac from core.encryption.encryptionFieldsBase import encryptionBaseMethods as ebm log = logging.getLogger(__name__) class BaseField(models.Field): def __init__(self, *args, **kwargs): # Get the active encryption and key management services, if any self.use_encryption = efac.use_encryption() self.aes = efac.active_encryption_service() if self.use_encryption else None self.akms = efac.active_key_management_service() if self.use_encryption else None self.block_size = self.aes.block_size() if self.use_encryption else None # Need to adjust the max length supplied in the user's field args to account for # cipher block size and padding if self.use_encryption: user_specified_length = kwargs.get('max_length', 20) unique = kwargs.get('unique', False) max_length, usl = ebm._max_db_length (unique, user_specified_length, self.block_size, self.aes) self.user_specified_max_length = usl kwargs['max_length'] = max_length models.Field.__init__(self, *args, **kwargs) def _is_encrypted(self, value, key, iv): ''' If value contains any non hex symbols or its length i
s odd, then it was not encrypted because the encrypted values are all converted to ascii hex before storing in db using the binascii.a2b_hex method which only operates on even length values ''' hexValues = True # test to see if value is a hexadecimal # get rid of extra spaces v
alue = value.strip() try: int(value, 16) except ValueError: hexValues = False if hexValues == False or (len(value) % 2) != 0 : return False else: # Have the encryption service verify if this is encrypted return self.aes.is_encrypted(binascii.a2b_hex(value), key, iv) def get_decrypted_value (self, value): """Converts the input value into the expected Python data type by dehexifying and decrypting the value. It raises django.core.exceptions.ValidationError if the data can't be converted. Returns the converted value. """ if len(value.strip()) == 0: return value if self.use_encryption: key = self.akms.get_key() iv = self.akms.get_iv() if self._is_encrypted(value, key, iv): # dehexify and decrypt decrypted_value = self.aes.decrypt(binascii.a2b_hex(value), key, iv) # get rid of extra bytes decrypted_value = decrypted_value.split(ebm._split_byte()) # forcing to string text decrypted_value = force_text(decrypted_value[0]) return decrypted_value else: return value else: return value def get_encrypted_value (self, value, connection=None, prepared=False): ''' Perform preliminary non-db specific value checks and conversions: convert value from unicode to full byte, encrypted string, otherwise encryption service may fail according to django docs this is different than str(value) and necessary to django internals https://docs.djangoproject.com/en/dev/ref/unicode/ ''' if value is None: return value if len(value.strip()) == 0: return value # convert string value to bytes value = smart_bytes(value, encoding='utf-8', strings_only=False, errors='strict') if self.use_encryption: key = self.akms.get_key() iv = self.akms.get_iv() if value and not self._is_encrypted(value, key, iv): if len(value) > self.user_specified_max_length: raise ValueError( "Field value longer than max allowed: {0} > {1}".format( str(len(value)), self.user_specified_max_length ) ) pad_length = ebm._padding_length(value, self.block_size) if pad_length > 0: value += ebm._split_byte() + ebm._semi_random_padding_string(pad_length-1) value = self.aes.encrypt(value, key, iv) if len(value) % 2 != 0: # Some encryption services add a checksum byte which throws off the pad_length value += ebm._split_byte() value = binascii.b2a_hex(value) # need to decode to string to store in database value = value.decode("utf8") return value class EncryptCharField(BaseField): # from_db_value is called in all circumstances when # the data is loaded from the database def from_db_value(self, value, expression, connection, context): if value is None: return value return self.get_decrypted_value(value) def get_internal_type(self): return 'CharField' def deconstruct(self): name, path, args, kwargs = super(EncryptCharField, self).deconstruct() kwargs["max_length"] = 255 return name, path, args, kwargs def formfield(self, **kwargs): "Returns a django.forms.Field instance for this database Field." defaults = {'max_length': self.max_length} defaults.update(kwargs) return super(EncryptCharField, self).formfield(**defaults) # method to convert data to encrypted format before they are stored in database def get_db_prep_value(self, value, connection=None, prepared=False): if self.use_encryption: key = self.akms.get_key() iv = self.akms.get_iv() if value and not self._is_encrypted(value, key, iv): if len(value) > self.user_specified_max_length: raise ValueError( "Field value longer than max allowed: {0} > {1}".format( str(len(value)), self.user_specified_max_length ) ) return self.get_encrypted_value(value, connection=connection, prepared=prepared) class EncryptDateField(BaseField): def __init__(self, *args, **kwargs): kwargs['max_length'] = 10 # YYYY:MM:DD format super(EncryptDateField, self).__init__(*args, **kwargs) # from_db_value is called in all circumstances # when the data is loaded from the database def from_db_value(self, value, expression, connection, context): dv = None if value in fields.EMPTY_VALUES: dv = value elif isinstance(value, datetime.date): dv = value else: input_text = self.get_decrypted_value(value) try: dv = datetime.date(*[int(x) for x in input_text.split(':')]) except ValueError: log.error("Decryption failed - old ehb values need to be updated") return dv def deconstruct(self): name, path, args, kwargs = super(EncryptDateField, self).deconstruct() kwargs["max_length"] = 10 return name, path, args, kwargs def get_internal_type(self): return 'CharField' def formfield(self, **kwargs): defaults = {'widget': forms.DateInput, 'form_class': forms.DateField} defaults.update(kwargs) return super(EncryptDateField, self).formfield(**defaults) # for django custom fields, to_python() is called by deserialization # and during the clean() method used from forms def to_python(self, value): dv = None if value in fields.EMPTY_VALUES: dv = value elif isinstance(value, datetime.date): dv = value els
Mariaanisimova/pythonintask
PMIa/2015/Donkor_A_H/task_3_10.py
Python
apache-2.0
928
0.018018
# Задача 3. Вариант 10. # Напишите программу, которая выводит имя "Игорь Васильевич Лотарев", и запрашивает его псевдоним. Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире. # Donkor A.H. # 14.04.2016 print("Итак,гость нынешнего дня является русский поэт - Игорь Васильевич Лотарев") print("Под каким же псевдонимом мы знаем
этого человека?") input("\n\nВаш ответ: ") print("\nВсе верно: Игорь Васильевич Лотарев - Северянин, Игорь Васильевич") input("\n
Нажмите Enter для завершения")
saberman888/Archiver
archive.py
Python
mit
5,894
0.023583
from __future__ import print_function import sys, time import requests, urllib import demjson, shelve import os.path class Archi
ver: def __init__(self): """ A class for archiving URLS into the wayback machine """ self._machine = "http://archive.org/wayback/available?url=" self._arch = "https://web.archive.org/save/" self.archived
_urls = [] # load data if os.path.isfile("archived_urls.dat"): self.archived_urls = self.load_data() def available(self, url, silent=False): """ :param: url :param: silent=False Checks if the given URL exists in the wayback machine. The silent argument if set True does not print anything to the console """ print("[Checking]: %s\n" % url) if silent == False else 0 data = demjson.decode(requests.get(self._machine+url).text)["archived_snapshots"] if "closest" in data: print(self.print_item(data)) if silent == False else 0 return (data["closest"])["available"] return False def load_data(self): """ Loads the archived URLS from a file called archived_urls.dat """ return shelve.open("archived_urls.dat")["main"] def out_text(self, filename): """ :param: filename Outputs a list of archived urls into text format """ map(open(filename, 'w').write, map(lambda x : x+"\n",self.archived_urls)) print("Done.") def save_data(self): """ Saves the archived urls into archived_urls.dat """ shelve.open("archived_urls.dat")["main"] = self.archived_urls def archive(self, url): """ :param: url Archves a url into the wayback machine. """ l = requests.get(self._arch+url) print("Archiving...") self.archived_urls.append(url) self.save_data() def print_item(self, data): """ :param: data Print function for json data for archive data """ dat = data["closest"] stamp = "Archived:%s\nAvailable:%s\nURL:%s\nStatus:%s" % (dat["timestamp"], dat['available'], dat['url'], dat['status']) return stamp def save_webpage(self, url, filename): """ :param: url :param: filename Saves a webpage """ print("[OK]: Saving webpage..") if not os.path.isdir(os.getcwd()+"\\saved_webpages"): os.mkdir("saved_webpages") open(os.getcwd()+"\\saved_webpages\\"+filename, 'w').write((requests.get(url).text).encode("utf-8")) if os.path.isfile(os.getcwd()+"\\saved_webpages\\"+filename): print("Done.") Help = \ " \ Usage: archive.py [option] [option2]\n \ \ Options:\n \ -CH/ch [url] - Check if a URL already exists in the wayback machine and return it's information if it does\n \ -ARCH/arch [url] - Archive a URL\n \ -CHARCH/charch [url] - Archive a url if it doesn't already exists\n \ -OUTT/outt [filename] - Output a list of archived urls in text format\n \ -H/h - Print this help message\n \ -LARCH/larch - print out a list of urls you archived\n \ -SAVE/save [url] [filename] - Save a url into a file" def main(): global Help A = Archiver() args = map(lambda x : x.lower(), sys.argv[1:len(sys.argv)]) print(args) if len(args) == 2: print(args[0]) if args[0] == "-ch": if A.available(args[1]) is True: print("URL found.") else: print("URL not found in wayback machine.") sys.exit(0) elif args[0] == "-arch": A.archive(args[1]) if A.available(args[1], True) is True: print("[Success]: Archiving is successful") else: print("[Error]: Archiving failed!") b = list(A.archived_urls[len(A.archived_urls)-1]) A.archived_urls.remove(A.archived_urls[len(A.archived_urls)-1]) b.insert(0, "FAILED TO ARCHIVE: ") A.archived_urls.append(b) sys.exit(0) elif args[0] == "-charch": main = A.available(args[1]) if main is True or main == "True": print("URL exists.") elif main is False: print("URL does not exist.") A.archive(args[1]) sys.exit(0) elif args[0] == "-outt": A.out_text(args[1]) sys.exit(0) elif len(args) == 3: if args[0] == "-save": A.save_webpage(args[1], args[2]) sys.exit(0) elif len(args) == 1: if args[0] == "-h": print("-h") print(Help) sys.exit(0) elif args[0] == "-larch": print("-larch") map(lambda x : print(x), A.archived_urls) sys.exit(0) else: print("[Error]: Unknown argument \'%s\'" % args[0]) sys.exit(0) else: print("Archiver: No arguments found.\n Type '-h' for help") sys.exit(0) if __name__ == "__main__": main()
ExaScience/smurff
python/smurff/smurff.py
Python
mit
3,868
0.009566
from .trainsession import TrainSession from .helper import FixedNoise class SmurffSession(TrainSession): def __init__(self, Ytrain, priors, is_scarce = True, Ytest=None, side_info=None, direct=True, *args, **kwargs): TrainSession.__init__(self, priors=priors, *args, **kwargs) self.addTrainAndTest(Ytrain, Ytest, is_scarce = is_scarce) if side_info is not None: nmodes = len(Ytrain.shape) assert len(side_info) == nmodes, "Too many side info, got %d, expected %d" % (len(side_info), nmodes) for mode, si in enumerate(side_info): if si is not None: self.addSideInfo(mode, si, direct=direct) class MacauSession(SmurffSession): """A train trainSession specialized for use with the Macau algorithm Attributes ---------- Ytrain : :class: `numpy.ndarray`, :mod:`scipy.s
parse` matrix or :class: `SparseTensor` Train matrix/tensor Ytest : :mod:`scipy.sparse` matrix or :class: `SparseTensor` Test matrix/tensor. Mainly used for calculating RMSE. side_info : list of :class: `numpy.ndarray`, :mod:`scipy.sparse` matrix or None Side info matrix/tensor for each dimension If there is no side info for a certain mode, pass `None`.
Each side info should have as many rows as you have elemnts in corresponding dimension of `Ytrain`. direct : bool Use Cholesky instead of CG solver univariate : bool Use univariate or multivariate sampling. \*\*args: Extra arguments are passed to the :class:`TrainSession` """ def __init__(self, Ytrain, is_scarce = True, Ytest=None, side_info=None, univariate=False, direct=True, *args, **kwargs): nmodes = len(Ytrain.shape) priors = ['normal'] * nmodes if side_info is not None: assert len(side_info) == nmodes for d in range(nmodes): if side_info[d] is not None: priors[d] = 'macau' if univariate: priors = [p + "one" for p in priors] SmurffSession.__init__(self, Ytrain, priors, is_scarce, Ytest, side_info, direct, *args, **kwargs) class BPMFSession(MacauSession): """A train trainSession specialized for use with the BPMF algorithm Attributes ---------- Ytrain : :class: `numpy.ndarray`, :mod:`scipy.sparse` matrix or :class: `SparseTensor` Train matrix/tensor Ytest : :mod:`scipy.sparse` matrix or :class: `SparseTensor` Test matrix/tensor. Mainly used for calculating RMSE. univariate : bool Use univariate or multivariate sampling. \*\*args: Extra arguments are passed to the :class:`TrainSession` """ def __init__(self, Ytrain, is_scarce = True, Ytest=None, univariate=False, *args, **kwargs): MacauSession.__init__(self, Ytrain, is_scarce, Ytest, None, univariate, *args, **kwargs) class GFASession(SmurffSession): def __init__(self, Views, Ytest=None, *args, noise = FixedNoise(), **kwargs): Ytrain = Views[0] nmodes = len(Ytrain.shape) assert nmodes == 2 priors = ['normal', 'spikeandslab'] TrainSession.__init__(self, priors=priors, *args, **kwargs) self.addTrainAndTest(Ytrain, Ytest, noise = noise) for p in range(1, len(Views)): self.addData([0, p], Views[p], noise = noise) # old API -- for compatibility reasons def smurff(*args, **kwargs): return SmurffSession(*args, **kwargs).run() def bpmf(*args, **kwargs): return BPMFSession(*args, **kwargs).run() def macau(*args, **kwargs): return MacauSession(*args, **kwargs).run() def gfa(*args, **kwargs): return GFASession(*args, **kwargs).run()
iti-luebeck/BEEP
Software/catkin_ws/src/beep_imu/ir_distance_zusmoro.py
Python
bsd-3-clause
1,517
0.039552
#!/usr/bin/env python from __future__ import division from std_msgs.msg import Int32 import roslib; roslib.load_manifest('beep_imu') import rospy import smbus import time bus = smbus.SMBus(1) irAddress = 0x55 #pub_mag_xsens = rospy.Publisher('imu/mag', Vector3Stamped) def decFrom2Compl(val, bitlen): if val & (1 << bitlen - 1): val = val - (1 << bitlen) return val #returns the val scaled from an old range into a new continouse range def scaleToRange(val, oldMin, oldMax, newMin, newMax): val -= oldMin val /= oldMax - oldMin val *= newMax - newMin val += newMin return val def init(): bus.write_byte_data(irAddress, 0x80, 0x01) # start unit bus.write_byte_data(irAddress, 0x81, 0xD0) # pulses bus.write_byte_data(irAddress, 0x82, 0x20) # start messurement # reading imu data
and publishing Imu msg to topic Imu def talker(): rospy.init_node('IRNode') rospy.loginfo('starting IR_Node') while not rospy.is_shutdown(): storeDistance() rospy.sleep(0.05) rospy.loginfo('IRNode shut down') def storeDistance(): #read current linear accelerations #msg = decFrom2Compl(msg,12) #update acceleration in msg #msg = scaleToRange(dist, -512, 511, -19.6133, 19.6133) pub = rospy.Publisher('topic/IR3', Int32) pub2 = rosp
y.Publisher('topic/IR0', Int32) msg = Int32() msg.data = bus.read_byte_data(irAddress, 0x85) msg.data += bus.read_byte_data(irAddress, 0x86) * 0x100 pub.publish(msg) pub2.publish(msg) print msg if __name__ == '__main__': init() talker() pass
ubaldino/pyloop
prueba3.py
Python
agpl-3.0
369
0.01897
#encoding:utf-8 impo
rt Queue import threading import urllib2 # called by each thread def get_url(q, url): q.put(urllib2.urlopen(url).read()) theurls = '''http://google.com http://yahoo.com'''.split() print theurls q = Queue.Queue() for u in theurls: t = threading.Thread(target=get_url, args = (q,u)) t.daemon = True t.start() s = q.get() print
s
pepeportela/edx-platform
lms/djangoapps/instructor/features/common.py
Python
agpl-3.0
4,560
0.001316
""" Define common steps for instructor dashboard acceptance tests. """ # pylint: disable=missing-docstring # pylint: disable=redefined-outer-name from __future__ import absolute_import from lettuce import step, world from mock import patch from nose.tools import assert_in from courseware.tests.factories import InstructorFactory, StaffFactory @step(u'Given I am "([^"]*)" for a very large course') def make_staff_or_instructor_for_large_course(step, role): make_large_course(step, role) @patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0}) def make_large_course(step, role): i_am_staff_or_instructor(step, role) @step(u'Given I am "([^"]*)" for a course') def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument ## In summary: makes a test course, makes a new Staff or Instructor user ## (depending on `role`), and logs that user in to the course # Store the role assert_in(role, ['instructor', 'staff']) # Clear existing courses to avoid conflicts world.clear_courses() # Create a new course course = world.CourseFactory.create( org='edx', number='999', display_name='Test Course' ) world.course_key = course.id world.role = 'instructor' # Log in as the an instructor or staff for the course if role == 'instructor': # Make & register an instructor for the course world.instructor = InstructorFactory(course_key=world.course_key) world.enroll_user(world.instructor, world.course_key) world.log_in( username=world.instructor.username, password='test', email=world.instructor.email, name=world.instructor.profile.name ) else: world.role = 'staff' # Make & register a staff member world.staff = StaffFactory(course_key=world.course_key) world.enroll_user(world.staff, world.course_key) world.log_in( username=world.staff.username, password='test', email=world.staff.email, name=world.staff.profile.name ) def go_to_section(section_name): # section name should be one of # course_info, membership, student_admin, data_download, analytics, send_email world.visit(u'/courses/{}'.format(world.course_key)) world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key)) world.css_click('[data-section="{0}"]'.format(section_name)) @step(u'I click "([^"]*)"') def click_a_button(step, button): # pylint: disable=unused-argument if button == "Generate Grade Report": # Go to the data download section of the instructor dash go_to_section("data_download") # Click generate grade report button world.css_click('input[name="calculate-grades-csv"]') # Expect to see a message that grade report is being generated expected_msg = "The grade report is being created." \ " To view the status of the report, see" \ " Pending Tasks below." world.wait_for_visible('#report-request-response') assert_in( expected_msg, world.css_text('#report-request-response'), msg="Could not find grade report generation success message." ) elif button == "Grading Configuration": # Go to the data download section of the instructor dash go_to_section("data_download") world.css_click('input[name="dump-gradeconf"]') elif button == "List enrolled students' profile information": # Go to the data download section of the instructor dash go_to_section("data_download") world.css_click('input[name="list-profiles"]') elif button == "Download profile information as a CSV": # Go to the data download section of the instructor dash go_to_section("data_download") world.css_click('input[name="list-profiles-csv"]') else: raise ValueError("Unrecognized button option " + button) @step(u'I visit the "([^"]*)" tab') def click_a_button(step, tab_name): # pylint: disable=unused-argument # course_info, membership, student_a
dmin, data_download, analytics, send_email tab_name_dict = { 'Course Info': 'course_info', 'Membership': 'membership', 'Student Admin': 'student_admin', 'Data Download': 'data_download',
'Analytics': 'analytics', 'Email': 'send_email', } go_to_section(tab_name_dict[tab_name])
Distrotech/scons
test/Script-import.py
Python
mit
2,878
0.000695
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGE
S OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Test that a module that we import into an SConscript file can itself easily impor
t the global SCons variables, and a handful of other variables directly from SCons.Script modules. """ import TestSCons test = TestSCons.TestSCons() test.write('SConstruct', """\ import m1 """) test.write("m1.py", """\ from SCons.Script import * SConscript('SConscript') """) test.write('SConscript', """\ import m2 import m3 import m4 """) test.write("m2.py", """\ from SCons.Script import * Command("file.out", "file.in", Copy("$TARGET", "$SOURCE")) """) test.write("m3.py", """\ import SCons.Script SCons.Script.BuildTask SCons.Script.CleanTask SCons.Script.QuestionTask old_SCons_Script_variables = [ 'PrintHelp', 'OptParser', 'keep_going_on_error', 'print_explanations', 'print_includes', 'print_objects', 'print_time', 'memory_stats', 'ignore_errors', 'repositories', 'print_dtree', 'print_tree', 'sconscript_time', 'command_time', 'exit_status', 'profiling', ] for var in old_SCons_Script_variables: try: getattr(SCons.Script, var) except AttributeError: pass else: raise Exception("unexpected variable SCons.Script.%s" % var) """) test.write("m4.py", """\ import SCons.Script.SConscript SCons.Script.SConscript.Arguments SCons.Script.SConscript.ArgList SCons.Script.SConscript.BuildTargets SCons.Script.SConscript.CommandLineTargets SCons.Script.SConscript.DefaultTargets """) test.write("file.in", "file.in\n") test.run(arguments = '.') test.must_match("file.out", "file.in\n") test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
bozzzzo/quark
quarkc/test/emit/expected/py/defaulted-methods/defaulted_methods.py
Python
apache-2.0
87
0
import defaulted_methods if __name__ == "__main
__": defaulted_methods
.call_main()
everyevery/programming_study
leetcode/404-SumOfLeftLeaves/sum_of_left_leaves.py
Python
mit
719
0.002782
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def sumOfLeftLeavesRec(self, root, left)
: if root is None: return 0 elif root.left is None and root.right is None: if left: return root.val else: return 0 else: return self.sumOfLeftLeavesRec(root.left, True)
+ self.sumOfLeftLeavesRec(root.right, False) def sumOfLeftLeaves(self, root): """ :type root: TreeNode :rtype: int """ return self.sumOfLeftLeavesRec(root, False)
bharathramh92/easy-ecom
accounts/constants.py
Python
apache-2.0
102
0.009804
__author__ = 'bharathramh' EMAIL_VERIF
ICATION_EXPIRATION_DAYS = 1 FORGOT_PASSWORD_EXPIR
ATION_DAYS = 1
zitkino/backend
zitkino/spiders/praha_premierecinemas.py
Python
agpl-3.0
229
0
# -*- coding: utf-8 -*- from .base_premierecinemas
import BasePremierecinemasCinemaSpider class Spider(BasePr
emierecinemasCinemaSpider): name = 'praha-premierecinemas' calendar_url = 'http://www.premierecinemas.cz/'
hainm/pythran
pythran/tests/openmp.legacy/omp_parallel_sections_reduction.py
Python
bsd-3-clause
8,373
0.003583
def omp_parallel_sections_reduction(): import math dt = 0.5 rounding_error = 1.E-9 sum = 7 dsum = 0 dt = 1. / 3. result = True product = 1 logic_and = 1 logic_or = 0 bit_and = 1 bit_or = 0 i = 0 exclusiv_bit_or = 0 known_sum = (1000 * 999) / 2 + 7 if 'omp parallel sections private(i) reduction(+:sum)': if 'omp section': for i in xrange(1,300): sum += i if 'omp section': for i in xrange(300,700): sum += i if 'omp section': for i in xrange(700,1000): sum += i if known_sum != sum: print "E: reduction(+:sum)" result = False diff = (1000 * 999) / 2 if 'omp parallel sections private(i) reduction(-:diff)': if 'omp section': for i in xrange(1,300): diff -= i if 'omp section': for i in xrange(300,700): diff -= i if 'omp section': for i in xrange(700,1000): diff -= i if diff != 0: print "E: reduction(-:diff)" result = False dsum = 0 dpt = 0 for i in xrange(0, 20): dpt *= dt dknown_sum = (1 - dpt) / (1 - dt) if 'omp parallel sections private(i) reduction(+:dsum)': if 'omp section': for i in xrange(0,7): dsum += math.pow(dt, i) if 'omp section': for i in xrange(7,14): dsum += math.pow(dt, i) if 'omp section': for i in xrange(14,20): dsum += math.pow(dt, i) if abs(dsum-dknown_sum) > rounding_error: print "E: reduction(+:dsum)" result = False dsum = 0 dpt = 0 for i in xrange(0, 20): dpt *= dt ddiff = (1 - dpt) / (1 - dt) if 'omp parallel sections private(i) reduction(-:ddiff)': if 'omp section': for i in xrange(0,6): ddiff -= math.pow(dt, i) if 'omp section': for i in xrange(6,12): ddiff -= math.pow(dt, i) if 'omp section': for i in xrange(12,20): ddiff -= math.pow(dt, i) if abs(ddiff) > rounding_error: print "E: reduction(-:ddiff)" result = False if 'omp parallel sections private(i) reduction(*:product)': if 'omp section': for i in xrange(1,3): product *= i if 'omp section': for i in xrange(3,6): product *= i if 'omp section': for i in xrange(6,11): product *= i known_product = 3628800 if known_product != product: print "E: reduction(*:product)" result = False logics = [1 for i in xrange(0,1000)] if 'omp parallel sections private(i) reduction(&&:logic_and)': if 'omp section': for i in xrange(0, 300): logic_and = (logic_and and logics[i]) if 'omp section': for i in xrange(300, 700): logic_and = (logic_and and logics[i]) if 'omp section': for i in xrange(700, 1000): logic_and = (logic_and and logics[i]) if not logic_and: print "E: reduction(&&:logic_and)" result = False logic_and = 1; logics[1000/2]=0 if 'omp parallel sections private(i) reduction(&&:logic_and)': if 'omp section': for i in xrange(0, 300): logic_and = (logic_and and logics[i]) if 'omp section': for i in xrange(300, 700): logic_and = (logic_and and logics[i]) if 'omp section': for i in xrange(700, 1000): logic_and = (logic_and and logics[i]) if logic_and: print "E: reduction(&&:logic_and) with logics[1000/2]=0" result = False logics = [0 for i in xrange(0,1000)] if 'omp parallel sections private(i) reduction(||:logic_or)': if 'omp section': for i in xrange(0, 300): logic_or = (logic_or or logics[i]) if 'omp section': for i in xrange(300, 700): logic_or = (logic_or or logics[i]) if 'omp section': for i in xrange(700, 1000): logic_or = (logic_or or logics[i]) if logic_or: print "E: reduction(||:logic_or)" result = False logic_or = 0; logics[1000/2]=1 if 'omp parallel sections private(i) reduction(||:logic_or)': if 'omp section': for i in xrange(0, 300): logic_or = (logic_or or logics[i]) if 'omp section': for i in xrange(300, 700): logic_or = (logic_or or logics[i]) if 'omp section': for i in xrange(700, 1000): logic_or = (logic_or or logics[i]) if not logic_or: print "E: reduction(||:logic_or) with logics[1000/2]=1" result = False logics = [1 for i in xrange(0,1000)] if 'omp parallel sections private(i) reduction(&:bit_and)': if 'omp section': for i in xrange(0, 300): bit_and = (bit_and & logics[i]) if 'omp section': for i in xrange(300, 700): bit_and = (bit_and & logics[i]) if 'omp section': for i in xrange(700, 1000): bit_and = (bit_and & logics[i]) if not bit_and: print "E: reduction(&:bit_and)" result = False bit_and = 1; logics[1000/2]=0 if 'omp parallel sections private(i) reduction(&:bit_and)': if 'omp section': for i in xrange(0, 300): bit_and = (bit_and & logics[i]) if 'omp section': for i in xrange(300, 700): bit_and = (bit_and & logics[i]) if 'omp section': for i in xrange(700, 1000): bit_and = (bit_and & logics[i]) if bit_and: print "E: reduction(&:bit_and) with logics[1000/2]=0" result = False logics = [0 for i in xrange(0,1000)] if 'omp parallel sections private(i) reduction(|:bit_or)': if 'omp section': for i in xrange(0, 300): bit_or = (bit_or | logics[i]) if 'omp section': for i in xrange(300, 700): bit_or = (bit_or | logics[i]) if 'omp section': for i in xrange(700, 1000): bit_or = (bit_or | logics[i]) if bit_or: print "E: reduction(|:bit_or)" result = False bit_or = 0; logics[1000/2]=1 if 'omp parallel sections private(i) reduction(|:bit_or)': if 'omp section': for i in xrange(0, 300): bit_or = (bit_or | logics[i]) if 'omp section': for i in xrange(300, 700): bit_or = (bit_or | logics[i]) if 'omp section': for i in xrange(700, 1000): bit_or = (bit_or | logics[i]) if not bit_or: print "E: reduction(|:bit_or) with logics[1000/2]=1" result = False logics = [0 for i in xrange(0,1000)] if 'omp parallel sections private(i) reduction(^
:exclusiv_bit_or)': if 'omp section': for i in xrange(0, 300): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in xrange(300, 700): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in xrange(700, 100
0): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if exclusiv_bit_or: print "E: reduction(^:exclusiv_bit_or)" result = False exclusiv_bit_or = 0; logics[1000/2]=1 if 'omp parallel sections private(i) reduction(^:exclusiv_bit_or)': if 'omp section': for i in xrange(0, 300): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in xrange(300, 700): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in xrange(700, 1000):
SINGROUP/pycp2k
pycp2k/classes/_each126.py
Python
lgpl-3.0
1,114
0.001795
from pycp2k.inputsection import InputSection class _each126(InputSection): def __init__(self): InputSection.__init__(self) self.Just_energy = None self.Powell_o
pt = None self.Qs_scf = None self.Xas_scf = None self.Md = None self.Pint = None self.Metadynamics = None self.Geo_opt = None self.Rot_opt = None self.Cell_opt = None self.Band = None self.Ep_lin_solver = None self.Spline_find_coeffs = None self.Replica_eval = None self.Bsse = None self.Shell_opt = None se
lf.Tddft_scf = None self._name = "EACH" self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
osgcc/ryzom
nel/tools/build_gamedata/processes/pacs_prim_list/0_setup.py
Python
agpl-3.0
1,786
0.006719
#!/usr/bin/python # # \file 0_setup.py # \brief setup pacs_prim_list # \date 2011-09-28 7:22GMT # \author Jan Boon (Kaetemi) # Python port of game data build pipeline. # Setup pacs_prim_list # # NeL - MMORPG Framework <http://de
v.ryzom.com/projects/nel/> # Copyright (C) 2010 Winch Gate Property Limited # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # pu
blished by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import time, sys, os, shutil, subprocess, distutils.dir_util sys.path.append("../../configuration") if os.path.isfile("log.log"): os.remove("log.log") log = open("log.log", "w") from scripts import * from buildsite import * from process import * from tools import * from directories import * printLog(log, "") printLog(log, "-------") printLog(log, "--- Setup pacs_prim_list") printLog(log, "-------") printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time()))) printLog(log, "") # Setup source directories printLog(log, ">>> Setup source directories <<<") for dir in PacsPrimExportSourceDirectories: mkPath(log, ExportBuildDirectory + "/" + dir) # Setup build directories printLog(log, ">>> Setup build directories <<<") mkPath(log, DataCommonDirectory) # no choice log.close() # end of file
pixunil/Cinnamon
files/usr/share/cinnamon/cinnamon-settings/modules/cs_effects.py
Python
gpl-2.0
10,983
0.004826
#!/usr/bin/env python2 from GSettingsWidgets import * from ChooserButtonWidgets import TweenChooserButton, EffectChooserButton EFFECT_SETS = { "cinnamon": ("traditional", "traditional", "traditional", "none", "none", "none"), "scale": ("scale", "scale", "scale", "scale", "scale", "scale"), "fade": ("fade", "fade", "fade", "scale", "scale", "scale"), "blend": ("blend", "blend", "blend", "scale", "scale", "scale"), "move": ("move", "move", "move", "scale", "scale", "scale"), "flyUp": ("flyUp", "flyDown", "flyDown", "scale", "scale", "scale"), "flyDown": ("flyDown", "flyUp", "flyUp", "scale", "scale", "scale"), "default": ("scale", "scale", "none", "none", "none", "none") } TRANSITIONS_SETS = { "cinnamon": ("easeOutQuad", "easeOutQuad", "easeInQuad", "easeInExpo", "easeNone", "easeInQuad"), "normal": ("easeOutSine", "easeInBack", "easeInSine", "easeInBack", "easeOutBounce", "easeInBack"), "extra": ("easeOutElastic", "easeOutBounce", "easeOutExpo", "easeInExpo", "easeOutElastic", "easeInExpo"), "fade": ("easeOutQuart", "easeInQuart", "easeInQuart", "easeInBack", "easeOutBounce", "easeInBack") } TIME_SETS = { "cinnamon": (175, 175, 200, 100, 100, 100), "slow": (400, 400, 400, 100, 100, 100), "normal": (250, 250, 250, 100, 100, 100), "fast": (100, 100, 100, 100, 100, 100), "default": (250, 250, 150, 400, 400, 400) } COMBINATIONS = { # name effect transition time "cinnamon": ("cinnamon", "cinnamon", "cinnamon"), "scale": ("scale", "normal", "normal"), "fancyScale": ("scale", "extra", "slow"), "fade": ("fade", "fade", "normal"), "blend": ("blend", "fade", "normal"), "move": ("move", "normal", "fast"), "flyUp": ("flyUp", "normal", "fast"), "flyDown": ("flyDown", "normal", "fast"), #for previous versions "default": ("default", "normal", "default") } OPTIONS = ( ("cinnamon", _("Cinnamon")), ("scale", _("Scale")), ("fancyScale", _("Fancy Scale")), ("fade", _("Fade")), ("blend", _("Blend")), ("move", _("Move")), ("flyUp", _("Fly up, down")), ("flyDown", _("Fly down, up")), #for previous versions ("default", _("Default")) ) TYPES = ("map", "close", "minimize", "maximize", "unmaximize", "tile") SCHEMA = "org.cinnamon" DEP_PATH = "org.cinnamon/desktop-effects" KEY_TEMPLATE = "desktop-effects-%s-%s" class GSettingsTweenChooserButton(TweenChooserButton, CSGSettingsBackend): def __init__(self, schema, key, dep_key): self.key = key self.bind_prop = "tween" self.bind_dir = Gio.SettingsBindFlags.DEFAULT self.bind_object = self if schema not in settings_objects.keys(): settings_objects[schema] = Gio.Settings.new(schema) self.settings = settings_objects[schema] super(GSettingsTweenChooserButton, self).__init__() self.bind_settings() class GSettingsEffectChooserButton(EffectChooserButton, CSGSettingsBackend): def __init__(self, schema, key, dep_key, options): self.key = key self.bind_prop = "effect" self.bind_dir = Gio.SettingsBindFlags.DEFAULT self.bind_object = self if schema not in settings_objects.keys(): settings_objects[
schema] = Gio.Settings.new(schema) self.settings = settings_objects[schema] super(GSettingsEffectChooserButton, self).__i
nit__(options) self.bind_settings() class Module: name = "effects" category = "appear" comment = _("Control Cinnamon visual effects.") def __init__(self, content_box): keywords = _("effects, fancy, window") sidePage = SidePage(_("Effects"), "cs-desktop-effects", keywords, content_box, module=self) self.sidePage = sidePage def on_module_selected(self): if not self.loaded: print "Loading Effects module" self.sidePage.stack = SettingsStack() self.sidePage.add_widget(self.sidePage.stack) self.schema = Gio.Settings(SCHEMA) self.effect_sets = {} for name, sets in COMBINATIONS.items(): self.effect_sets[name] = (EFFECT_SETS[sets[0]], TRANSITIONS_SETS[sets[1]], TIME_SETS[sets[2]]) # Enable effects page = SettingsPage() self.sidePage.stack.add_titled(page, "effects", _("Enable effects")) settings = page.add_section(_("Enable Effects")) widget = GSettingsSwitch(_("Window effects"), "org.cinnamon", "desktop-effects") settings.add_row(widget) widget = GSettingsSwitch(_("Effects on dialog boxes"), "org.cinnamon", "desktop-effects-on-dialogs") settings.add_reveal_row(widget, "org.cinnamon", "desktop-effects") widget = GSettingsSwitch(_("Effects on menus"), "org.cinnamon", "desktop-effects-on-menus") settings.add_reveal_row(widget, "org.cinnamon", "desktop-effects") self.chooser = GSettingsComboBox(_("Effects style"), "org.cinnamon", "desktop-effects-style", OPTIONS) self.chooser.content_widget.connect("changed", self.on_value_changed) settings.add_reveal_row(self.chooser, "org.cinnamon", "desktop-effects") widget = GSettingsSwitch(_("Fade effect on Cinnamon scrollboxes (like the Menu application list)"), "org.cinnamon", "enable-vfade") settings.add_row(widget) widget = GSettingsSwitch(_("Session startup animation"), "org.cinnamon", "startup-animation") settings.add_row(widget) if Gtk.get_major_version() == 3 and Gtk.get_minor_version() >= 16: widget = GSettingsSwitch(_("Overlay scroll bars (logout required)"), "org.cinnamon.desktop.interface", "gtk-overlay-scrollbars") settings.add_row(widget) self.schema.connect("changed::desktop-effects", self.on_desktop_effects_enabled_changed) # Customize page = SettingsPage() self.sidePage.stack.add_titled(page, "customize", _("Customize")) box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL) label = Gtk.Label() label.set_markup("<b>%s</b>" % _("Customize settings")) box.pack_start(label, False, False, 0) self.custom_switch = Gtk.Switch(active = self.is_custom()) box.pack_end(self.custom_switch, False, False, 0) self.custom_switch.connect("notify::active", self.update_effects) page.add(box) self.revealer = Gtk.Revealer() self.revealer.set_transition_type(Gtk.RevealerTransitionType.SLIDE_DOWN) self.revealer.set_transition_duration(150) page.add(self.revealer) settings = SettingsBox(_("Effect")) self.revealer.add(settings) self.size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL) effects = ["none", "scale", "fade", "blend", "move", "flyUp", "flyDown", "traditional"] # MAPPING WINDOWS widget = self.make_effect_group(_("Mapping windows"), "map", effects) settings.add_row(widget) # CLOSING WINDOWS widget = self.make_effect_group(_("Closing windows"), "close", effects) settings.add_row(widget) # MINIMIZING WINDOWS widget = self.make_effect_group(_("Minimizing windows"), "minimize", effects) settings.add_row(widget) # MAXIMIZING WINDOWS # effects = ["none", _("None")], ["scale", _("Scale")]] widget = self.make_effect_group(_("Maximizing windows"), "maximize") settings.add_row(widget) # UNMAXIMIZING WINDOWS widget = self.make_effect_group(_("Unmaximizing windows"), "unmaximize") settings.add_row(widget)
lordamit/youtube-dl-gui
youtube-dl-gui.py
Python
gpl-3.0
3,654
0.001368
#!/usr/bin/python # Youtube-dl-GUI provides a front-end GUI to youtube-dl # Copyright (C) 2013 Amit Seal Ami # # Th== program == free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Th== program == distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with th== program. If not, see {http://www.gnu.org/licenses/}. # from PySide.QtGui import QMessageBox from PySide import QtGui from ui.main_window import Ui_MainWindow import sys from os import system from urllib2 import urlopen from urllib2 import HTTPError class MyApplication(QtGui.QMainWindow): format_selected = 35 def __init__(self, parent=None): """Initializes""" QtGui.QMainWindow.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.ui.comboBoxFormats.activated[str].connect(self.combo_formats) self.ui.btnDownload.clicked.connect(self.download_button_pressed) def download_button_pressed(self): if self.ui.textEditDownload is not None: if self.check_url(self.ui.textEditDownload.toPlainText()): #subprocess.Popen(self.return_youtube_dl_cmd()) system(self.return_youtube_dl_cmd()) else: msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Critical) msgBox.setText("Error in URL") msgBox.setInformativeText("Please check the URL you provided.") msgBox.setStandardButtons(QMessageBox.Ok) msgBox.exec_() def
check_url(self, url_tobe_checked): """ @param url_tobe_checked: @return: """ try: code = urlopen(url_tobe_checked).code except ValueError: return False except HTTPError: return False
if (code / 100) >= 4: return False else: return True def return_youtube_dl_cmd(self): from os.path import expanduser home = expanduser("~") cmd = "gnome-terminal -e " cmd += '"youtube-dl -f{0} -c -o {1}/Downloads/%(title)s-%(id)s.%(ext)s {2}"'.format( self.format_selected, home, self.ui.textEditDownload.toPlainText() ) return cmd def combo_formats(self, text): """ checks the selected option @param text: the selected option's text """ if text == 'H264 MP4 1080p': self.format_selected = 37 if text == 'H264 MP4 720p': self.format_selected = 22 if text == 'WebM 720p': self.format_selected = 45 if text == 'WebM 480p': self.format_selected = 43 if text == 'H264 MP4 480p': self.format_selected = 18 if text == 'H264 FLV 480p': self.format_selected = 35 if text == 'H264 FLV 360p': self.format_selected = 34 if text == 'H263 240p': self.format_selected = 5 if text == '3GP video': self.format_selected = 17 if __name__ == "__main__": APP = QtGui.QApplication(sys.argv) WINDOW = MyApplication() WINDOW.show() sys.exit(APP.exec_())
technomaniac/trelliopg
trelliopg/__init__.py
Python
mit
118
0.008475
from .sql import * __all__ = ['DBAdapter', 'get_db_adapter', 'async_a
tomic', 'async_atomic_func', 'get_db_settings']
lambdal/envois
test/test_envois.py
Python
mit
1,000
0.004
#-*- coding: utf-8 -*- """ envois.test ~~~~~~
~~~~~
~ nosetests for the envois pkg :copyright: (c) 2012 by Mek :license: BSD, see LICENSE for more details. """ import os import json import unittest from envois import invoice jsn = {"seller": {"name": "Lambda Labs, Inc.", "address": {"street": "857 Clay St. Suite 206", "city": "San Francisco", "state": "CA", "zip": "94108", "phone": "(555) 555-5555", "email": "some@email.com" }, "account": {"swift": "...", "number": "...", "name": "Lambda Labs Inc.", "same_address": True}}, "buyer": {"name": "Foo Corp", "address": {"street": "88 Foo Road, Foo Place", "city": "Fooville", "state": "BA", "zip": "31337"}, "logo": "http://lambdal.com/images/lambda-labs-logo.png"}, "items": [{"description": "Facial Detection & Landmark Recognition Perpetual License", "qty": 1, "unit_price": 32768}], "terms": {"days": 30, "string": ""}} class Envois_Test(unittest.TestCase): def test_invoice(self): invoice.make_invoice(jsn)
jasonwee/asus-rt-n14uhp-mrtg
src/lesson_text/re_fullmatch.py
Python
apache-2.0
247
0.004049
import re text = 'This is some text -- with punctuation.' pattern = 'is' print('Text :', text) print('Pattern :', pattern) m = re.search(pattern, text) prin
t('Search :', m) s = re.fullmatch(
pattern, text) print('Full match :', s)
yland/coala
tests/parsing/StringProcessing/PositionIsEscapedTest.py
Python
agpl-3.0
2,282
0
from coalib.parsing.StringProcessing import position_is_escaped from tests.parsing.StringProcessing.StringProcessingTestBase import ( StringProcessingTestBase) class PositionIsEscapedTest(StringProcessingTestBase): # Test the position_is_escaped() function. def test_basic(self): expected_results = [ 30 * [False] + [True] + 7 * [False], 30 * [False] + [True] + 7 * [False], 30 * [False] + [True] + 7 * [False], 28 * [False] + [True, False, True] + 7 * [False], 31 * [False] + [True] + 6 * [False],
31 * [False] + [True] + 6 * [False], 38 * [False], 6 * [Fals
e] + [True] + 31 * [False], 6 * [False] + [True, False, True] + 29 * [False], 6 * [False] + [True] + 31 * [False], 6 * [False] + [True, False, True] + 29 * [False], 14 * [False] + [True] + 23 * [False], 12 * [False] + [True, False, True] + 23 * [False], 38 * [False], [], 14 * [False], [False], [False, True]] self.assertResultsEqual( position_is_escaped, {(test_string, position): result for test_string, string_result in zip(self.test_strings, expected_results) for position, result in zip(range(len(test_string)), string_result)}) # Test position_is_escaped() with a more special test string. def test_extended(self): test_string = r"\\\\\abcabccba###\\13q4ujsabbc\+'**'ac###.#.####-ba" result_dict = { 0: False, 1: True, 2: False, 3: True, 4: False, 5: True, 6: False, 7: False, 17: False, 18: True, 19: False, 30: False, 31: True, 50: False, 51: False, 6666666: False, -1: False, -20: True, -21: False} self.assertResultsEqual( position_is_escaped, {(test_string, position): result for position, result in result_dict.items()})
jeremiahyan/odoo
addons/l10n_cl/models/account_move.py
Python
gpl-3.0
9,118
0.005485
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.exceptions import ValidationError from odoo import models, fields, api, _ from odoo.osv import expression SII_VAT = '60805000-0' class AccountMove(models.Model): _inherit = "account.move" partner_id_vat = fields.Char(related='partner_id.vat', string='VAT No') l10n_latam_internal_type = fields.Selection( related='l10n_latam_document_type_id.internal_type', string='L10n Latam Internal Type') def _get_l10n_latam_documents_domain(self): self.ensure_one() if self.journal_id.company_id.account_fiscal_country_id != self.env.ref('base.cl') or not \ self.journal_id.l10n_latam_use_documents: return super()._get_l10n_latam_documents_domain() if self.journal_id.type == 'sale': domain = [('country_id.code', '=', "CL"), ('internal_type', '!=', 'invoice_in')] if self.company_id.partner_id.l10n_cl_sii_taxpayer_type == '1': domain += [('code', '!=', '71')] # Companies with VAT Affected doesn't have "Boleta de honorarios Electrónica" return domain domain = [ ('country_id.code', '=', 'CL'), ('internal_type', 'in', ['invoice', 'debit_note', 'credit_note', 'invoice_in'])] if self.partner_id.l10n_cl_sii_taxpayer_type == '1' and self.partner_id_vat != '60805000-0': domain += [('code', 'not in', ['39', '70', '71', '914', '911'])] elif self.partner_id.l10n_cl_sii_taxpayer_type == '1' and self.partner_id_vat == '60805000-0': domain += [('code', 'not in', ['39', '70', '71'])] if self.move_type == 'in_invoice': domain += [('internal_type', '!=', 'credit_note')] elif self.partner_id.l10n_cl_sii_taxpayer_type == '2': domain += [('code', 'in', ['70', '71', '56', '61'])] elif self.partner_id.l10n_cl_sii_taxpayer_type == '3': domain += [('code', 'in', ['35', '38', '39', '41', '56', '61'])] elif not self.partner_id.l10n_cl_sii_taxpayer_type or self.partner_id.country_id != self.env.ref( 'base.cl') or self.partner_id.l10n_cl_sii_taxpayer_type == '4': domain += [('code', 'in', [])] return domain def _check_document_types_post(self): for rec in self.filtered( lambda r: r.company_id.account_fiscal_country_id.code == "CL" and r.journal_id.type in ['sale', 'purchase']): tax_payer_type = rec.partner_id.l10n_cl_sii_taxpayer_type vat = rec.partner_id.vat country_id = rec.partner_id.country_id latam_document_type_code = rec.l10n_latam_document_type_id.code if (not tax_payer_type or not vat) and (country_id.code == "CL" and latam_document_type_code and latam_document_type_code not in ['35', '38', '39', '41']): raise ValidationError(_('Tax payer type and vat number are mandatory for this type of ' 'document. Please set the current tax payer type of this customer')) if rec.journal_id.type == 'sale' and rec.journal_id.l10n_latam_use_documents: if country_id.code != "CL": if not ((tax_payer_type == '4' and latam_document_type_code in ['110', '111', '112']) or ( tax_payer_type == '3' and latam_document_type_code in ['39', '41', '61', '56'])): raise ValidationError(_( 'Document types for foreign customers must be export type (codes 110, 111 or 112) or you \ should define the customer as an end consumer and use receipts (codes 39 or 41)')) if rec.journal_id.type == 'purchase' and rec.journal_id.l10n_latam_use_documents: if vat != SII_VAT and latam_document_type_code == '914': raise ValidationError(_('The DIN document is intended to be used only with RUT 60805000-0' ' (Tesorería General de La República)')) if not tax_payer_type or not vat: if country_id.code == "CL" and latam_document_type_code not in [ '35', '38', '39', '41']: raise ValidationError(_('Tax payer type and vat number are mandatory for this type of ' 'document. Please set the current tax payer type of this supplier')) if tax_payer_type == '2' and latam_document_type_code not in ['70', '71', '56', '61']: raise ValidationError(_('The tax payer type of this supplier is incorrect for the selected type' ' of document.')) if tax_payer_type in ['1', '3']: if latam_document_type_code in ['70', '71']: raise ValidationError(_('The tax payer type of this supplier is not entitled to deliver ' 'fees documents')) if latam_document_type_code in ['110', '111', '112']: raise ValidationError(_('The tax payer type of this supplier is not entitled to deliver ' 'imports documents')) if tax_payer_type == '4' or country_id.code != "CL": raise ValidationError(_('You need a journal without the use of documents for foreign ' 'suppliers')) @api.onchange('journal_id') def _l10n_cl_onchange_journal(self): if self.company_id.country_id.code == 'CL': self.l10n_latam_document_type_id = False def _post(self, soft=True): self._check_document_types_post() return super()._post(soft) def _l10n_cl_get_formatted_sequence(self, number=0): return '%s %06d' % (self.l10n_latam_document_type_id.doc_code_prefix, number) def _get_starting_sequence(self): """ If use documents then will create a new starting sequence using the document type code prefix and the journal document number with a 6 padding number """ if self.journal_id.l10n_latam_use_documents and self.env.company.account_fiscal_country_id.code == "CL": if self.l10n_latam_document_type_id: return self._l10n_cl_get_formatted_sequence() return super()._get_starting_sequence() def _get_last_sequence_domain(self, relaxed=False): where_string, param = super(AccountMove, self)._get_last_sequence_domain(relaxed) if self.company_id.account_fiscal_country_id.code == "CL" and self.l10n_latam_use_documents: where_string = where_string.replace('journal_id = %(journal_id)s AND', '') where_string += ' AND l10n_latam_document_type_id = %
(l10n_latam_document_type_id)s AND ' \ 'company_id = %(company_id)s AND move_type IN %(move_type)s' param['company_id'] = self.company_id.id or False param['l10n_latam_document_type_id'] = self.l10
n_latam_document_type_id.id or 0 param['move_type'] = (('in_invoice', 'in_refund') if self.l10n_latam_document_type_id._is_doc_type_vendor() else ('out_invoice', 'out_refund')) return where_string, param def _get_name_invoice_report(self): self.ensure_one() if self.l10n_latam_use_documents and self.company_id.account_fiscal_country_id.code == 'CL': return 'l10n_cl.report_invoice_document' return super()._get_name_invoice_report() def _l10n_cl_get_invoice_totals_for_report(self): self.ensure_one() tax_ids_filter = tax_line_id_filter = None include_sii = self._l10n_cl_include_sii() if include_sii: tax_ids_filter = (lambda aml, tax: bool(tax.l10n_cl_sii_code != 14)) tax_line_id_filter = (lambda aml, tax: bool(tax.l10n_cl_sii_code != 14)) tax_lines_data = sel
hankcs/HanLP
plugins/hanlp_common/hanlp_common/visualization.py
Python
apache-2.0
10,303
0.000883
# -*- coding:utf-8 -*- # Modified from https://github.com/tylerneylon/explacy import io from collections import defaultdict from pprint import pprint from phrasetree.tree import Tree def make_table(rows, insert_header=False): col_widths = [max(len(s) for s in col) for col in zip(*rows[1:])] rows[0] = [x[:l] for x, l in zip(rows[0], col_widths)] fmt = '\t'.join('%%-%ds' % width for width in col_widths) if insert_header: rows.insert(1, ['─' * width for width in col_widths]) return '\n'.join(fmt % tuple(row) for row in rows) def _start_end(arrow): start, end = arrow['from'], arrow['to'] mn = min(start, end) mx = max(start, end) return start, end, mn, mx def pretty_tree_horizontal(arrows, _do_print_debug_info=False): """Print the dependency tree horizontally Args: arrows: _do_print_debug_info: (Default value = False) Returns: """ # Set the base height; these may increase to allow room for arrowheads after this. arrows_with_deps = defaultdict(set) for i, arrow in enumerate(arrows): arrow['underset'] = set() if _do_print_debug_info: print('Arrow %d: "%s" -> "%s"' % (i, arrow['from'], arrow['to'])) num_deps = 0 start, end, mn, mx = _start_end(arrow) for j, other in enumerate(arrows): if arrow is other: continue o_start, o_end, o_mn, o_mx = _start_end(other) if ((start == o_start and mn <= o_end <= mx) or (start != o_start and mn <= o_start <= mx)): num_deps += 1 if _do_print_debug_info: print('%d is over %d' % (i, j)) arrow['underset'].add(j) arrow['num_deps_left'] = arrow['num_deps'] = num_deps arrows_with_deps[num_deps].add(i) if _do_print_debug_info: print('') print('arrows:') pprint(arrows) print('') print('arrows_with_deps:') pprint(arrows_with_deps) # Render the arrows in characters. Some heights will be raised to make room for arrowheads. sent_len = (max([max(arrow['from'], arrow['to']) for arrow in arrows]) if arrows else 0) + 1 lines = [[] for i in range(sent_len)] num_arrows_left = len(arrows) while num_arrows_left > 0: assert len(arrows_with_deps[0]) arrow_index = arrows_with_deps[0].pop() arrow = arrows[arrow_index] src, dst, mn, mx = _start_end(arrow) # Check the height needed. height = 3 if arrow['underset']: height = max(arrows[i]['height'] for i in arrow['underset']) + 1 height = max(height, 3, len(lines[dst]) + 3) arrow['height'] = height if _do_print_debug_info: print('') print('Rendering arrow %d: "%s" -> "%s"' % (arrow_index, arrow['from'], arrow['to'])) print(' height = %d' % height) goes_up = src > dst # Draw the outgoing src line. if lines[src] and len(lines[src]) < height: lines[src][-1].add('w') while len(lines[src]) < height - 1: lines[src].append(set(['e', 'w'])) if len(lines[src]) < height: lines[src].append({'e'}) lines[src][height - 1].add('n' if goes_up else 's') # Draw the incoming dst line. lines[dst].append(u'►') while len(lines[dst]) < height: lines[dst].append(set(['e', 'w'])) lines[dst][-1] = set(['e', 's']) if goes_up else set(['e', 'n']) # Draw the adjoining vertical line. for i in range(mn + 1, mx): while len(lines[i]) < height - 1: lines[i].append(' ') lines[i].append(set(['n', 's'])) # Update arrows_with_deps. for arr_i, arr in enumerate(arrows): if arrow_index in arr['underset']: arrows_with_deps[arr['num_deps_left']].remove(arr_i) arr['num_deps_left'] -= 1 arrows_with_deps[arr['num_deps_left']].add(arr_i) num_arrows_left -= 1 return render_arrows(lines) def render_arrows(lines): arr_chars = {'ew': u'─', 'ns': u'│', 'en': u'└', 'es': u'┌', 'enw': u'┴', 'ensw': u'┼', 'ens': u'├', 'esw': u'┬'} # Convert the character lists into strings. max_len = max(len(line) for line in lines) for i in range(len(lines)): lines[i] = [arr_chars[''.join(sorted(ch))] if type(ch) is set else ch for ch in lines[i]] lines[i] = ''.join(reversed(lines[i])) lines[i] = ' ' * (max_len - len(lines[i])) + lines[i] return lines def render_span(begin, end, unidirectional=False): if end - begin == 1: return ['───►'] elif end - begin == 2: return [ '──┐', '──┴►', ] if unidirectional else [ '◄─┐', '◄─┴►', ] rows = [] for i in range(begin, end): if i == (end - begin) // 2 + begin: rows.append(' ├►') elif i == begin: rows.append('──┐' if unidirectional else '◄─┐') elif i == end - 1: rows.append('──┘' if unidirectional else '◄─┘') else: rows.append(' │') return rows def tree_to_list(T): return [T.label(), [tree_to_list(t) if isinstance(t, Tree) else t for t in T]] def list_to_tree(L): if isinstance(L, str): return L return Tree(L[0], [list_to_tree(child) for child in L[1]]) def render_labeled_span(b, e, spans, labels, label, offset, unidirectional=False): spans.extend([''] * (b - offset)) spans.extend(render_span(b, e, unidirectional)) center = b + (e - b) // 2 labels.extend([''] * (center - offset)) labels.append(label) labels.extend([''] * (e - center - 1)) def main(): # arrows = [{'from': 1, 'to': 0}, {'from': 2, 'to': 1}, {'from': 2, 'to': 4}, {'from': 2, 'to': 5}, # {'from': 4, 'to': 3}] # lines = pretty_tree_horizontal(arrows) # print('\n'.join(lines)) # print('\n'.join([ # '◄─┐', # ' │', # ' ├►', # ' │', # '◄─┘', # ])) print('\n'.join(render_span(7, 12))) if __name__ == '__main__': main() left_rule = {'<': ':', '^': ':', '>': '-'} right_rule = {'<': '-', '^': ':', '>': ':'} def evalute_field(record, field_spec): """Evalute a field of a record using the type of the field_spec as a guide. Args: record: field_spec: Returns: """ if type(field_s
pec) is int: return str(record[field_spec]) elif type(field_spec) is str: return str(getattr(record, field_spec)) else: return str(field_spec(record)) def markdown_table(headings, records, fields=None, alignment=None, file=None): """Ge
nerate a Doxygen-flavor Markdown table from records. See https://stackoverflow.com/questions/13394140/generate-markdown-tables file -- Any object with a 'write' method that takes a single string parameter. records -- Iterable. Rows will be generated from this. fields -- List of fields for each row. Each entry may be an integer, string or a function. If the entry is an integer, it is assumed to be an index of each record. If the entry is a string, it is assumed to be a field of each record. If the entry is a function, it is called with the record and its return value is taken as the value of the field. headings -- List of column headings. alignment - List of pairs alignment characters. The first of the pair specifies the alignment of the header, (Doxygen won't respect this, but it might look good, the second specifies the alignment of the cells in the column. Possible alignment characters are: '<' = Left align '>' = Right align (default for cells) '^' = Center (default for column
natj/bender
sweep.py
Python
mit
8,380
0.024105
import sys sys.path.append('/Users/natj/projects/arcmancer/lib/') import pyarcmancer as pyac from img import Imgplane from visualize_polar import Visualize from lineprofile import * import units import numpy as np import matplotlib as mpl from pylab import * import os from matplotlib import cm import scipy.interpolate as interp #from joblib import Parallel, delayed #import multiprocessing outdir = 'out/lines2/' ################################################## # Set up figure & layout fig = figure(figsize=(6,10)) mpl.rc('font', family='serif') mpl.rc('xtick', labelsize='x-small') mpl.rc('ytick', labelsize='x-small') mpl.rcParams['image.cmap'] = 'inferno' #num_cores = multiprocessing.cpu_count() #print "num of cores {}", num_cores #Setup pyarcmancer ################################################## conf = pyac.Configuration() conf.absolute_tolerance = 1e-12 conf.relative_tolerance = 1e-12 conf.henon_tolerance = 1e-8 conf.sampling_interval = 1e-3 conf.minimum_stepsize = 1e-10 conf.maximum_steps = 10000 conf.enforce_maximum_stepsize = False conf.enforce_minimum_stepsize = True conf.enforce_maximum_steps = True conf.store_only_endpoints = True #pyac.Log.set_console() pyac.Log.set_file() ################################################## # Star parameters #R = 12.0 #M = 1.6 freq = 700.0 #incl = 15.0 #for M in [1.5, 1.1, 1.8]: for M in [1.4]: print "##################################################" print "M = ", M for R in [10.0]: print "##################################################" print " R = ", R #for incl in [90, 80, 70, 60, 50, 40, 30, 20, 15, 10, 5, 1]: #for incl in [9, 8, 7, 6, 4, 3, 2, 0.5]: for incl in [20.0]: print "##################################################" print " i = ",incl fname = 'neutronstar_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.png'.format( np.int(freq), np.int(R), M, np.int(incl)) if os.path.isfile( outdir+fname ): continue # Variables in units of solar mass are derived here # and typically presented with full name mass = M radius = R * units.solar_mass_per_km / mass angvel = freq * 2.0*np.pi / units.solar_mass_per_s * mass imgscale = (mass/units.solar_mass_per_km*1.0e5)**2 #cm^2/Msun compactness = np.sqrt(1 - 2/radius) #isotropic radius compactness conf.absolute_tolerance = 1e-12 * radius conf.minimum_stepsize = 1e-10 * radius ################################################## #Define metric and surfaces of the spacetime #S+D metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_no_quadrupole) ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.spherical) #Oblate Sch #WORKS #metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_no_quadrupole) #ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.agm_no_quadrupole) #Full AGM + oblate #metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_standard) #ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.agm) surfaces = [ ns_surface ] # Build and configure image plane by hand img = Imgplane(conf, metric, surfaces) img.verbose = 1 img.incl = np.deg2rad(incl) #set inclination img.distance = 100000.0*mass #set distance #Locate star edges img.find_boundaries(Nedge=50, reltol=1.0e-4, max_iterations=30) #Build internal coarse grid for the interpolation routines img.generate_internal_grid(Nrad = 80, Nchi = 50 ) img.dissect_geos() #Construct output xy image plane from img object ################################################## ion() visz = Visualize() visz.gs.update(hspace = 0.5) visz.compactness = compactness visz.plot(img) #prepare line profile axis object visz.axs[6] = subplot( visz.gs[3,:] ) visz.axs[6].minorticks_on() visz.axs[6].set_xlabel(r'Energy') visz.axs[6].set_ylabel(r'Flux') #Construct image #visz.star(img, spot) #visz.polar(img, spot) visz.dissect(img) visz.star_plot(0.0) visz.polar_dissect(img) visz.polar_plot(0.0) ################################################## # Compute line profile es, yy2 = lineprofile(visz.redshift**4, visz.redshift) dE = np.max( np.abs(es[0] - compactness), np.abs(compactness - es[-1])) ################################################## #Save redshift into a file fname = 'reds_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format( np.int(freq), np.int(R), M, np.int(incl), ) print 'Saving to a file: '+fname np.savetxt(outdir+fname, visz.redshift.flatten(), delimiter=',', fmt = '%10.9e' ) #Save thetas into a file fname = 'thetas_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format( np.int(freq), np.int(R), M, np.int(incl), ) print 'Saving to a file: '+fname np.savetxt(outdir+fname, visz.thetas.flatten(), delimiter=',', fmt = '%10.9e' ) #Save phi into a file fname = 'phis_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format( np.int(freq), np.int(R), M, np.int(incl), ) print 'Saving to a file: '+fname np.savetxt(outdir+fname, visz.phis.flatten(), delimiter=',', fmt = '%10.9e' ) #redshift limits vmin = compactness - dE vmax = compactness + dE # Line p
rofile ################################################## #ax = subplot(gs[2,2]) #ax.set_xlim(0.8, 1.2) visz.axs[6].plot(e
s, yy2, "b-") pause(1.0) fname = 'neutronstar_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.png'.format( np.int(freq), np.int(R), M, np.int(incl), ) savefig(outdir+fname) #save lineprofile ################################################## #Finally save to file fname = 'lineprofile_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format( np.int(freq), np.int(R), M, np.int(incl), ) print 'Saving to a file: '+fname np.savetxt(outdir+fname
ganga-devs/ganga
ganga/GangaND280/Tasks/ND280Transform_CSVEvtList.py
Python
gpl-3.0
4,112
0.020185
from GangaCore.GPIDev.Schema import * from GangaCore.GPIDev.Lib.Tasks.common import * from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Job.Job import JobError from GangaCore.GPIDev.Lib.Registry.JobRegistry import JobRegistrySlice, JobRegistrySliceProxy from GangaCore.Core.exceptions import ApplicationConfigurationError from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Tasks.TaskLocalCopy import TaskLocalCopy from GangaCore.Utility.logging import getLogger from .ND280Unit_CSVEvtList import ND280Unit_CSVEvtList from GangaND280.ND280Dataset.ND280Dataset import ND280LocalDataset from GangaND280.ND280Splitter import splitCSVFile import GangaCore.GPI as GPI import os logger = getLogger() class ND280Transform_CSVEvtList(ITransform): _schema = Schema(Version(1,0), dict(list(ITransform._schema.datadict.items()) + list({ 'nbevents' : SimpleItem(defvalue=-1,doc='The number of events for each unit'), }.items()))) _category = 'transforms' _name = 'ND280Transform_CSVEvtList' _exportmethods = ITransform._exportmethods + [ ] def __init__(self): super(ND280Transform_CSVEvtList,self).__init__() def createUnits(self): """Create new units if required given the inputdata""" # call parent for chaining super(ND280Transform_CSVEvtList,self).createUnits() # Look at the application schema and check if there is a csvfile variable try: csvfile = self.application.csvfile except AttributeError: logger.error('This application doesn\'t contain a csvfile variable. Use another Transform !') return subsets = splitCSVFile(self.application.csvfile, self.nbevents) for s,sub in enumerate(subsets): # check if this data is being run over by checking all the names listed ok = False for unit in self.units: if unit.subpartid == s: ok = True if ok: continue # new unit required for this dataset unit = ND280Unit_CSVEvtList() unit.name = "Unit %d" % len(self.units) unit.subpartid = s unit.eventswanted = sub unit.inputdata = self.inputdata[0] self.addUnitT
oTRF( unit ) def createChainUnit( self, parent_units, use_copy_output = True ): """Create a chained unit using the output data from the given units""" # check all parent units for copy_output copy_output_ok = True for parent in parent_units: if not parent.copy_output: copy_output_ok = False # all parent units must be c
ompleted so the outputfiles are filled correctly for parent in parent_units: if parent.status != "completed": return None if not use_copy_output or not copy_output_ok: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # loop over the output files and add them to the ND280LocalDataset - THIS MIGHT NEED SOME WORK! job = GPI.jobs(parent.active_job_ids[0]) for f in job.outputfiles: # should check for different file types and add them as appropriate to the dataset # self.inputdata (== TaskChainInput).include/exclude_file_mask could help with this # This will be A LOT easier with Ganga 6.1 as you can easily map outputfiles -> inputfiles! unit.inputdata.names.append( os.path.join( job.outputdir, f.namePattern ) ) else: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # unit needs to have completed and downloaded before we can get file list if parent.status != "completed": return None # we should be OK so copy all output to the dataset for f in parent.copy_output.files: unit.inputdata.names.append( os.path.join( parent.copy_output.local_location, f ) ) return unit
flgiordano/netcash
+/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_projection_spec.py
Python
bsd-3-clause
10,147
0.00542
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A class that creates resource projection specification.""" import sys from googlecloudsdk.third_party.py27 import py27_copy as copy PROJECTION_ARG_DOC = ' projection: The parent ProjectionSpec.' ALIGN_DEFAULT = 'left' ALIGNMENTS = {'left': lambda s, w: s.ljust(w), 'center': lambda s, w: s.center(w), 'right': lambda s, w: s.rjust(w)} class ProjectionSpec(object): """Creates a resource projection specification. A resource projection is an expression string that contains a list of resource keys with optional attributes. A projector is a method that takes a projection specification and a resource object as input and produces a new JSON-serializable object containing only the values corresponding to the keys in the projection specification. Optional projection key attributes may transform the values in the output JSON-serializable object. Cloud SDK projection attributes are used for output formatting. A default or empty projection expression still produces a projector that converts a resource to a JSON-serializable object. This class is used by the resource projection expression parser to create a resource projection specification from a projection expression string. Attributes: aliases: The short key name alias dictionary. _active: The transform active level. Incremented each time Defaults() is called. Used to determine active transforms. attributes: Projection attributes dict indexed by attribute name. _columns: A list of (key,_Attribute) tuples used to project a resource to a list of columns. _compiler: The projection compiler method for nested projections. _empty: An empty projection _Tree used by Projector(). _name: The projection name from the expression string. _tree: The projection _Tree root, used by resource_projector.Evaluate() to efficiently project each resource. symbols: Default and caller-defined transform function dict indexed by function name. """ DEFAULT = 0 # _Attribute default node flag. INNER = 1 # _Attribute inner node flag. PROJECT = 2 # _Attribute project node flag. class _Column(object): """Column key and transform attribute for self._columns. Attributes: key: The column key. attribute: The column key _Attribute. """ def __init__(self, key, attribute): self.key = key self.attribute = attribute def __init__(self, defaults=None, symbols=None, compiler=None): """Initializes a projection. Args: defaults: resource_projection_spec.ProjectionSpec defaults. symbols: Transform function symbol table dict indexed by function name. compiler: The projection compiler method for nested projections. """ self.aliases = {} self.attributes = {} self._columns = [] self._compiler = compiler self._empty = None self._name = None self._snake_headings = {} self._snake_re = None if defaults: self._active = defaults.active self._tree = copy.deepcopy(defaults.GetRoot()) self.Defaults() if defaults.symbols: self.symbols = copy.deepcopy(defaults.symbols) if symbols: self.symbols.update(symbols) else: self.symbols = symbols if symbols else {} self.aliases.update(defaults.aliases) else: self._active = 0 self._tree = None self.symbols = symbols @property def active(self): """Gets the transform active level.""" return self._active @property def compiler(self): """Returns the projection compiler method for nested projections.""" return self._compiler def _Defaults(self, projection): """Defaults() helper -- converts a projection to a default projection. Args: projection: A node in the original projection _Tree. """ projection
.attribute.flag = self.DEFAULT for node in projection.tree.values(): self._Defaults(node) def _Print(self, projection, out, level): """Print() helper -- prints projection node p and its children. Args: projection: A _Tree node in the original projection. out: The output stream. level: The nesting level counting from 1 at the root. """ for key in projection.tree: out.write('{indent} {key} : {attribute}\n'.format(
indent=' ' * level, key=key, attribute=projection.tree[key].attribute)) self._Print(projection.tree[key], out, level + 1) def AddAttribute(self, name, value): """Adds name=value to the attributes. Args: name: The attribute name. value: The attribute value """ self.attributes[name] = value def DelAttribute(self, name): """Deletes name from the attributes if it is in the attributes. Args: name: The attribute name. """ if name in self.attributes: del self.attributes[name] def AddAlias(self, name, key): """Adds name as an alias for key to the projection. Args: name: The short (no dots) alias name for key. key: The parsed key to add. """ self.aliases[name] = key def AddKey(self, key, attribute): """Adds key and attribute to the projection. Args: key: The parsed key to add. attribute: Parsed _Attribute to add. """ self._columns.append(self._Column(key, attribute)) def SetName(self, name): """Sets the projection name. The projection name is the rightmost of the names in the expression. Args: name: The projection name. """ if self._name: # Reset the name-specific attributes. self.attributes = {} self._name = name def GetRoot(self): """Returns the projection root node. Returns: The resource_projector_parser._Tree root node. """ return self._tree def SetRoot(self, root): """Sets the projection root node. Args: root: The resource_projector_parser._Tree root node. """ self._tree = root def GetEmpty(self): """Returns the projector resource_projector_parser._Tree empty node. Returns: The projector resource_projector_parser._Tree empty node. """ return self._empty def SetEmpty(self, node): """Sets the projector resource_projector_parser._Tree empty node. The empty node is used by to apply [] empty slice projections. Args: node: The projector resource_projector_parser._Tree empty node. """ self._empty = node def Columns(self): """Returns the projection columns. Returns: The columns in the projection, None if the entire resource is projected. """ return self._columns def ColumnCount(self): """Returns the number of columns in the projection. Returns: The number of columns in the projection, 0 if the entire resource is projected. """ return len(self._columns) def Defaults(self): """Converts the projection to a default projection. A default projection provides defaults for attribute values and function symbols. An explicit non-default projection value always overrides the corresponding default value. """ if self._tree: self._Defaults(self._tree) self._columns = [] self._active += 1 def Aliases(self): """Returns the short key name alias dictionary. This dictionary maps short (no dots) names to parsed keys. Returns: The short key name alias dictionary. """ return self.aliases def Attributes(self): """Returns the projection _Attribute dictionary.
danrschlosser/eventum
tests/test_text.py
Python
mit
790
0
import pytest from eventum.lib.text import clean_markdown @pytest.mark.parametrize(["markdown", "output"], [ ('**Bold** text is unbolded.', 'Bold text is unbolded.'), ('So is *underlined* text.', 'So is underlined text.'), ('An [](http://empty-link).', 'An.'), ('A [test](https://adicu.com)', 'A test (
https://adicu.com)'), ('A [test](http://adicu.com)', 'A test (http://adicu.com)'), ('A [test](garbage) passes.', 'A test passes.'), ('An ![image](http://anything) gets removed.', 'An gets removed.'), ('An ![image](garbage), [link](http://adicu.com), and an ' '[![image in a link](imgurl)](http://adicu.com).', 'An, link (http://adicu.com), and an.'), ]) def test_clean_markdown(markdown, output):
assert clean_markdown(markdown) == output
jedie/pypyjs-standalone
website/js/pypy.js-0.3.0/lib/modules/distutils/sysconfig_pypy.py
Python
mit
4,713
0.001485
"""Provide access to Python's configuration information. This is actually PyPy's minimal configuration information. The specific configuration variables available depend heavily on the platform and configuration. The values may be retrieved using get_config_var(name), and the list of variables is available via get_config_vars().keys(). Additional convenience functions are also available. """ __revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" import sys import os import shlex from distutils.errors import DistutilsPlatformError PREFIX = os.path.normpath(sys.prefix) EXEC_PREFIX = os.path.normpath(sys.exec_prefix) project_base = os.path.dirname(os.path.abspath(sys.executable)) python_build = False def get_python_inc(plat_specific=0, prefix=None): from os.path import join as j return j(sys.prefix, 'include') def get_python_version(): """Return a string containing the major and minor Python version, leaving off the patchlevel. Sample return values could be '1.5' or '2.2'. """ return sys.version[:3] def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): """Return the directory containing the Python library (standard or site additions). If 'plat_specific' is true, return the directory containing platform-specific modules, i.e. any module from a non-pure-Python module distribution; otherwise, return the platform-shared library directory. If 'standard_lib' is true, return the directory containing standard Python library modules; otherwise, return the directory for site-specific modules. If 'prefix' is supplied, use it instead of sys.prefix or sys.exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: prefix = PREFIX if standard_lib: return os.path.join(prefix, "lib-python", get_python_version()) return os.path.join(prefix, 'site-packages') _config_vars = None def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check global _config_vars _config_vars = g def _init_nt(): """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars _config_vars = g def get_config_vars(*args): """With no arguments,
return a dictionary of all configuration variables relevant for the current platform. Generally th
is includes everything needed to build extensions and install both pure modules and extensions. On Unix, this means every variable defined in Python's installed Makefile; on Windows and Mac OS it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _config_vars if _config_vars is None: func = globals().get("_init_" + os.name) if func: func() else: _config_vars = {} _config_vars['prefix'] = PREFIX _config_vars['exec_prefix'] = EXEC_PREFIX if args: vals = [] for name in args: vals.append(_config_vars.get(name)) return vals else: return _config_vars def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ return get_config_vars().get(name) def customize_compiler(compiler): """Dummy method to let some easy_install packages that have optional C speedup components. """ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') if "CPPFLAGS" in os.environ: cppflags = shlex.split(os.environ["CPPFLAGS"]) compiler.compiler.extend(cppflags) compiler.compiler_so.extend(cppflags) compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) if "LDFLAGS" in os.environ: ldflags = shlex.split(os.environ["LDFLAGS"]) compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( parse_makefile, _variable_rx, expand_makefile_vars)
projectbuendia/server-status
libraries/Adafruit_Nokia_LCD/setup.py
Python
apache-2.0
591
0.099831
from ez_setup import use
_setuptools use_setuptools() from setuptools import setup, find_packages setup(name = 'Adafruit_Nokia_LCD', version = '0.1.0', author = 'Tony DiCola', author_email = 'tdicola@adafruit.com', description = 'Library to display images on the Nokia 5110/3110 LCD.', license = 'MIT', url = 'https://github.com/adafruit/Adafruit_Nokia_LCD/', dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.1.0'],
install_requires = ['Adafruit-GPIO>=0.1.0'], packages = find_packages())
zmlabe/IceVarFigs
Scripts/SeaIce/JAXA_seaice_movinglines.py
Python
mit
6,992
0.034611
""" Plots Arctic sea ice extent from June 2002-present using JAXA metadata Website : https://ads.nipr.ac.jp/vishop/vishop-extent.html Author : Zachary M. Labe Date : 4 August 2016 """ ### Import modules import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import matplotlib import datetime import urllib.request import urllib as UL ### Directory and time directory = './Figures/' source = 'twitter' now = datetime.datetime.now() currentmn = str(now.month) currentdy = str(now.day-1) currentyr = str(now.year) currenttime = currentmn + '_' + currentdy + '_' + currentyr ### Load url url = 'https://ads.nipr.ac.jp/vishop.ver1/data/graph/plot_extent_n_v2.csv' ### Read file raw_data = UL.request.urlopen(url) dataset = np.genfromtxt(raw_data, skip_header=0,delimiter=",",) ### Set missing data to nan dataset[np.where(dataset==-9999)] = np.nan ### Variables month = dataset[1:,0] # 1-12, nan as month[0] day = dataset[1:,1] # 1-31, nan as day[0] mean1980 = dataset[1:,2] # km^2, nan as mean1980[0] mean1990 = dataset[1:,3] # km^2, nan as mean1990[0] mean2000 = dataset[1:,4] # km^2, nan as mean2000[0] years = dataset[1:,5:] doy = np.arange(0,len(day),1) ### Change units to million km^2 years = years/1e6 ### Recent day of current year currentyear = years[:,-1] lastday = now.timetuple().tm_yday -1 currentice = currentyear[lastday] currentanom = currentice - (mean1980[lastday]/1e6) ### Leap
year currentyear[59] = currentyear[58] ### Changes weekchange = currentice - currentyear[lastday-7
] daychange = currentice - currentyear[lastday-1] ### Make plot matplotlib.rc('savefig', facecolor='black') matplotlib.rc('axes', edgecolor='white') matplotlib.rc('xtick', color='white') matplotlib.rc('ytick', color='white') matplotlib.rc('axes', labelcolor='white') matplotlib.rc('axes', facecolor='black') plt.rc('text',usetex=True) plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) fig = plt.figure() ax = plt.subplot(111) ### Adjust axes in time series plots def adjust_spines(ax, spines): for loc, spine in ax.spines.items(): if loc in spines: spine.set_position(('outward', 5)) else: spine.set_color('none') if 'left' in spines: ax.yaxis.set_ticks_position('left') else: ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') else: ax.xaxis.set_ticks([]) plt.plot(doy,mean1980/1e6,linewidth=1,linestyle='--', color='darkmagenta',label=r'1980s Mean',zorder=1) plt.plot(doy,mean1990/1e6,linewidth=1,linestyle='--', color='c',label=r'1990s Mean',zorder=1) plt.plot(doy,mean2000/1e6,linewidth=1,linestyle='--', color='dodgerblue',label=r'2000s Mean',zorder=1) bar4, = ax.plot(doy,years[:,-2],color='salmon',label=r'Year 2017',linewidth=1.8, alpha=1,zorder=3) bar2, = ax.plot(doy,years[:,-7],color='gold',label=r'Year 2012',linewidth=1.8, alpha=1,zorder=3) bar3, = ax.plot(doy,years[:,5],color='white',label=r'Year 2007',linewidth=1.8, alpha=1,zorder=2) bar, = ax.plot(doy,currentyear,linewidth=2.5,zorder=4,color='r') plt.scatter(doy[lastday],currentyear[lastday], s=25,color='r',zorder=4) xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul', r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan'] strmonth = xlabels[int(currentmn)-1] asof = strmonth + ' ' + currentdy + ', ' + currentyr plt.text(0.6,3.9,r'\textbf{DATA:} JAXA (Arctic Data archive System, NIPR)', fontsize=6,rotation='horizontal',ha='left',color='w',alpha=0.6) plt.text(0.6,3.5,r'\textbf{SOURCE:} https://ads.nipr.ac.jp/vishop/vishop-extent.html', fontsize=6,rotation='horizontal',ha='left',color='w',alpha=0.6) plt.text(0.6,3.1,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)', fontsize=6,rotation='horizontal',ha='left',color='w',alpha=0.6) ### Insert sea ice text if lastday <= 365: xcord = 120 ycord = 10 plt.text(xcord-4,ycord-0.65,r'\textbf{%s}' '\n' r'\textbf{%s} \textbf{km}$^2$' \ % (asof,format(currentice*1e6,",f")[:-7]),fontsize=10, rotation='horizontal',ha='right',color='w',alpha=0.6) if lastday <= 365: plt.text(xcord-4,ycord-2.5,r'\textbf{7--day change}'\ '\n' r'\textbf{%s} \textbf{km}$^2$'\ % (format(weekchange*1e6,",f")[:-7]),fontsize=10, rotation='horizontal',ha='right',color='w',alpha=0.6) plt.text(xcord-4,ycord-4,r'\textbf{1--day change}' \ '\n' r'\textbf{%s} \textbf{km}$^2$'\ % (format((daychange*1e6),",f")[:-7]),fontsize=10, rotation='horizontal',ha='right',color='w',alpha=0.6) adjust_spines(ax, ['left', 'bottom']) ax.spines['top'].set_color('none') ax.spines['right'].set_color('none') ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) plt.ylabel(r'\textbf{Extent [$\bf{\times}$10$^{6}$\ \textbf{km}$^2$]}', fontsize=15,alpha=0.6) l = plt.legend(shadow=False,fontsize=6,loc='upper left', bbox_to_anchor=(0.655, 1.013),fancybox=True,ncol=2) for text in l.get_texts(): text.set_color('w') plt.xticks(np.arange(0,361,30),xlabels,rotation=0,fontsize=10) ylabels = map(str,np.arange(2,18,1)) plt.yticks(np.arange(2,18,1),ylabels,fontsize=10) plt.ylim([3,16]) plt.xlim([0,360]) ax.yaxis.grid(zorder=1,color='w',alpha=0.35) fig.suptitle(r'\textbf{ARCTIC SEA ICE}', fontsize=33,color='w',alpha=0.6) ax.tick_params('both',length=5.5,width=2,which='major') year2012 = years[:,-7] year2007 = years[:,5] year2017 = years[:,-2] def update(num,doy,currentyear,year2017,year2012,year2007,bar,bar2,bar4): bar.set_data(doy[:num+1],currentyear[:num+1]) bar.axes.axis([0,360,3,16]) bar2.set_data(doy[:num+1],year2012[:num+1]) bar2.axes.axis([0,360,3,16]) bar3.set_data(doy[:num+1],year2007[:num+1]) bar3.axes.axis([0,360,3,16]) bar4.set_data(doy[:num+1],year2017[:num+1]) bar4.axes.axis([0,360,3,16]) return bar, ani = animation.FuncAnimation(fig,update,370,fargs=[doy,currentyear,year2017,year2012,year2007,bar,bar2,bar4], interval=.001,blit=True) ani.save(directory + 'moving_SIE_JAXA.gif',dpi=150) print('\n') print('JAXA Sea Ice Loss Missing Days') print('Day 5 Loss = %s km^2' % ((currentyear[lastday-4] - currentyear[lastday-5])*1e6)) print('Day 4 Loss = %s km^2' % ((currentyear[lastday-3] - currentyear[lastday-4])*1e6)) print('Day 3 Loss = %s km^2' % ((currentyear[lastday-2] - currentyear[lastday-3])*1e6)) print('Day 2 Loss = %s km^2' % ((currentyear[lastday-1] - currentyear[lastday-2])*1e6)) print('Day 1 Loss = %s km^2' % ((currentyear[lastday] - currentyear[lastday-1])*1e6)) print('\n' 'Total 5-day Loss = %s km^2' % ((currentyear[lastday]-currentyear[lastday-5])*1e6)) print('\n')