repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
Ircam-Web/mezzanine-organization
organization/agenda/migrations/0033_dynamicmultimediaevent.py
1
1459
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-11-30 10:33 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import mezzanine.core.fields class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0002_remove_content_type_name'), ('mezzanine_agenda', '0028_auto_20180926_1235'), ('organization-agenda', '0032_auto_20181108_1636'), ] operations = [ migrations.CreateModel( name='DynamicMultimediaEvent', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('_order', mezzanine.core.fields.OrderField(null=True, verbose_name='Order')), ('object_id', models.PositiveIntegerField(editable=False, null=True, verbose_name='related object')), ('content_type', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='content type')), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dynamic_multimedia', to='mezzanine_agenda.Event', verbose_name='event')), ], options={ 'verbose_name': 'Multimedia', 'ordering': ('_order',), }, ), ]
agpl-3.0
7,624,932,403,393,662,000
43.212121
199
0.626456
false
3.90107
false
false
false
JamieCressey/apt-s3
apts3/__init__.py
1
6562
# Copyright 2016 Jamie Cressey # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os.path import boto3 import botocore.exceptions import logging import sys import json import pwd import os import apt.resources from datetime import datetime, timedelta from time import sleep __author__ = 'Jamie Cressey' __version__ = '0.9.0' class AptS3(object): def __init__(self, args): self.log = self._logger() self.args = args self.debs = args.files.split() if args.action == 'upload': self.upload_debs() elif args.action == 'delete': self.delete_debs() else: self.log.error('Unknown command: {}'.format(args.action)) def _logger(self): log = logging.getLogger('apt-s3') log.setLevel(logging.INFO) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( '%(asctime)s %(levelname)-8s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S')) log.addHandler(handler) return log def _s3_conn(self): boto3.setup_default_session( profile_name=self.args.profile, region_name=self.args.region) self.s3 = boto3.client('s3') def _check_debs_exist(self, deb): if not os.path.isfile(deb): self.log.error('File {0} doesn\'t exist'.format(deb)) exit(1) def _check_lock(self, arch): if self.args.lock: lockfile = 'dists/{0}/{1}/binary-{2}/apts3_lockfile'.format( self.args.codename, self.args.component, arch) ts_now = datetime.utcnow() ts_stop = ts_now + timedelta(seconds=self.args.lock_timeout) while ts_now < ts_stop: try: lock = self.s3.get_object( Bucket=self.args.bucket, Key=lockfile) lock_body = json.loads(lock['Body'].read()) self.log.info( "Repository is locked by another user: {0}@{1}".format( lock_body['user'], lock_body['host'])) ts_now = datetime.utcnow() ts_lock = lock['LastModified'].replace(tzinfo=None) ts_diff = ts_now - ts_lock if ts_diff.seconds > self.args.lock_timeout: self.log.error( 'Repository lock is too old: {}. Please investigate.'.format(ts_diff)) exit(1) sleep(10) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'NoSuchKey': break else: raise self.log.info("Attempting to obtain a lock") lock_body = json.dumps({ "user": pwd.getpwuid(os.getuid()).pw_name, "host": os.uname()[1] }) self.s3.put_object( Body=lock_body, Bucket=self.args.bucket, Key=lockfile) self.log.info("Locked repository for updates") def _delete_lock(self, arch): if self.args.lock: self.log.info('Removing lockfile') lockfile = 'dists/{0}/{1}/binary-{2}/apts3_lockfile'.format( self.args.codename, self.args.component, arch) self.s3.delete_object( Bucket=self.args.bucket, Key=lockfile) def _parse_manifest(self, arch): self.manifests[arch] = apt.resources.Manifest( bucket=self.args.bucket, codename=self.args.codename, component=self.args.component, architecture=arch, visibility=self.args.visibility, s3=self.s3) def _parse_package(self, deb): self.log.info("Examining package file {}".format(deb)) pkg = apt.resources.Package(deb) if self.args.arch: arch = self.args.arch elif pkg.architecture: arch = pkg.architecture else: self.log.error( "No architcture given and unable to determine one for {0}. Please specify one with --arch [i386|amd64].".format(deb)) exit(1) if arch == 'all' and len(self.manifests) == 0: self.log.error( 'Package {0} had architecture "all" however noexisting package lists exist. This can often happen if the first package you are add to a new repository is an "all" architecture file. Please use --arch [i386|amd64] or another platform type to upload the file.'.format(deb)) exit(1) if arch not in self.manifests: self._parse_manifest(arch) self.manifests[arch].add(pkg) if arch == 'all': self.packages_arch_all.append(pkg) def _update_manifests(self): for arch, manifest in self.manifests.iteritems(): if arch == 'all': continue for pkg in self.packages_arch_all: manifest.add(pkg) def _upload_manifests(self): self.log.info('Uploading packages and new manifests to S3') for arch, manifest in self.manifests.iteritems(): self._check_lock(arch) manifest.write_to_s3() self.release.update_manifest(manifest) self.log.info('Update complete.') self._delete_lock(arch) def upload_debs(self): if not self.debs: self.log.error('You must specify at least one file to upload') exit(1) map(self._check_debs_exist, self.debs) self._s3_conn() self.log.info("Retrieving existing manifests") self.release = apt.resources.Release(self.args) self.manifests = {} map(self._parse_manifest, self.release['architectures']) self.packages_arch_all = [] map(self._parse_package, self.debs)
apache-2.0
-2,980,966,164,605,318,700
32.141414
287
0.556995
false
4.015912
false
false
false
noam09/deluge-telegramer
telegramer/include/telegram/passport/credentials.py
1
17262
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. try: import ujson as json except ImportError: import json from base64 import b64decode from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import OAEP, MGF1 from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers.algorithms import AES from cryptography.hazmat.primitives.ciphers.modes import CBC from cryptography.hazmat.primitives.hashes import SHA512, SHA256, Hash, SHA1 from future.utils import bord from telegram import TelegramObject, TelegramError class TelegramDecryptionError(TelegramError): """ Something went wrong with decryption. """ def __init__(self, message): super(TelegramDecryptionError, self).__init__("TelegramDecryptionError: " "{}".format(message)) def decrypt(secret, hash, data): """ Decrypt per telegram docs at https://core.telegram.org/passport. Args: secret (:obj:`str` or :obj:`bytes`): The encryption secret, either as bytes or as a base64 encoded string. hash (:obj:`str` or :obj:`bytes`): The hash, either as bytes or as a base64 encoded string. data (:obj:`str` or :obj:`bytes`): The data to decrypt, either as bytes or as a base64 encoded string. file (:obj:`bool`): Force data to be treated as raw data, instead of trying to b64decode it. Raises: :class:`TelegramDecryptionError`: Given hash does not match hash of decrypted data. Returns: :obj:`bytes`: The decrypted data as bytes. """ # Make a SHA512 hash of secret + update digest = Hash(SHA512(), backend=default_backend()) digest.update(secret + hash) secret_hash_hash = digest.finalize() # First 32 chars is our key, next 16 is the initialisation vector key, iv = secret_hash_hash[:32], secret_hash_hash[32:32 + 16] # Init a AES-CBC cipher and decrypt the data cipher = Cipher(AES(key), CBC(iv), backend=default_backend()) decryptor = cipher.decryptor() data = decryptor.update(data) + decryptor.finalize() # Calculate SHA256 hash of the decrypted data digest = Hash(SHA256(), backend=default_backend()) digest.update(data) data_hash = digest.finalize() # If the newly calculated hash did not match the one telegram gave us if data_hash != hash: # Raise a error that is caught inside telegram.PassportData and transformed into a warning raise TelegramDecryptionError("Hashes are not equal! {} != {}".format(data_hash, hash)) # Return data without padding return data[bord(data[0]):] def decrypt_json(secret, hash, data): """Decrypts data using secret and hash and then decodes utf-8 string and loads json""" return json.loads(decrypt(secret, hash, data).decode('utf-8')) class EncryptedCredentials(TelegramObject): """Contains data required for decrypting and authenticating EncryptedPassportElement. See the Telegram Passport Documentation for a complete description of the data decryption and authentication processes. Attributes: data (:class:`telegram.Credentials` or :obj:`str`): Decrypted data with unique user's nonce, data hashes and secrets used for EncryptedPassportElement decryption and authentication or base64 encrypted data. hash (:obj:`str`): Base64-encoded data hash for data authentication. secret (:obj:`str`): Decrypted or encrypted secret used for decryption. Args: data (:class:`telegram.Credentials` or :obj:`str`): Decrypted data with unique user's nonce, data hashes and secrets used for EncryptedPassportElement decryption and authentication or base64 encrypted data. hash (:obj:`str`): Base64-encoded data hash for data authentication. secret (:obj:`str`): Decrypted or encrypted secret used for decryption. **kwargs (:obj:`dict`): Arbitrary keyword arguments. Note: This object is decrypted only when originating from :obj:`telegram.PassportData.decrypted_credentials`. """ def __init__(self, data, hash, secret, bot=None, **kwargs): # Required self.data = data self.hash = hash self.secret = secret self._id_attrs = (self.data, self.hash, self.secret) self.bot = bot self._decrypted_secret = None self._decrypted_data = None @classmethod def de_json(cls, data, bot): if not data: return None data = super(EncryptedCredentials, cls).de_json(data, bot) return cls(bot=bot, **data) @property def decrypted_secret(self): """ :obj:`str`: Lazily decrypt and return secret. Raises: telegram.TelegramDecryptionError: Decryption failed. Usually due to bad private/public key but can also suggest malformed/tampered data. """ if self._decrypted_secret is None: # Try decrypting according to step 1 at # https://core.telegram.org/passport#decrypting-data # We make sure to base64 decode the secret first. # Telegram says to use OAEP padding so we do that. The Mask Generation Function # is the default for OAEP, the algorithm is the default for PHP which is what # Telegram's backend servers run. try: self._decrypted_secret = self.bot.private_key.decrypt(b64decode(self.secret), OAEP( mgf=MGF1(algorithm=SHA1()), algorithm=SHA1(), label=None )) except ValueError as e: # If decryption fails raise exception raise TelegramDecryptionError(e) return self._decrypted_secret @property def decrypted_data(self): """ :class:`telegram.Credentials`: Lazily decrypt and return credentials data. This object also contains the user specified nonce as `decrypted_data.nonce`. Raises: telegram.TelegramDecryptionError: Decryption failed. Usually due to bad private/public key but can also suggest malformed/tampered data. """ if self._decrypted_data is None: self._decrypted_data = Credentials.de_json(decrypt_json(self.decrypted_secret, b64decode(self.hash), b64decode(self.data)), self.bot) return self._decrypted_data class Credentials(TelegramObject): """ Attributes: secure_data (:class:`telegram.SecureData`): Credentials for encrypted data nonce (:obj:`str`): Bot-specified nonce """ def __init__(self, secure_data, nonce, bot=None, **kwargs): # Required self.secure_data = secure_data self.nonce = nonce self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None data['secure_data'] = SecureData.de_json(data.get('secure_data'), bot=bot) return cls(bot=bot, **data) class SecureData(TelegramObject): """ This object represents the credentials that were used to decrypt the encrypted data. All fields are optional and depend on fields that were requested. Attributes: personal_details (:class:`telegram.SecureValue`, optional): Credentials for encrypted personal details. passport (:class:`telegram.SecureValue`, optional): Credentials for encrypted passport. internal_passport (:class:`telegram.SecureValue`, optional): Credentials for encrypted internal passport. driver_license (:class:`telegram.SecureValue`, optional): Credentials for encrypted driver license. identity_card (:class:`telegram.SecureValue`, optional): Credentials for encrypted ID card address (:class:`telegram.SecureValue`, optional): Credentials for encrypted residential address. utility_bill (:class:`telegram.SecureValue`, optional): Credentials for encrypted utility bill. bank_statement (:class:`telegram.SecureValue`, optional): Credentials for encrypted bank statement. rental_agreement (:class:`telegram.SecureValue`, optional): Credentials for encrypted rental agreement. passport_registration (:class:`telegram.SecureValue`, optional): Credentials for encrypted registration from internal passport. temporary_registration (:class:`telegram.SecureValue`, optional): Credentials for encrypted temporary registration. """ def __init__(self, personal_details=None, passport=None, internal_passport=None, driver_license=None, identity_card=None, address=None, utility_bill=None, bank_statement=None, rental_agreement=None, passport_registration=None, temporary_registration=None, bot=None, **kwargs): # Optionals self.temporary_registration = temporary_registration self.passport_registration = passport_registration self.rental_agreement = rental_agreement self.bank_statement = bank_statement self.utility_bill = utility_bill self.address = address self.identity_card = identity_card self.driver_license = driver_license self.internal_passport = internal_passport self.passport = passport self.personal_details = personal_details self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None data['temporary_registration'] = SecureValue.de_json(data.get('temporary_registration'), bot=bot) data['passport_registration'] = SecureValue.de_json(data.get('passport_registration'), bot=bot) data['rental_agreement'] = SecureValue.de_json(data.get('rental_agreement'), bot=bot) data['bank_statement'] = SecureValue.de_json(data.get('bank_statement'), bot=bot) data['utility_bill'] = SecureValue.de_json(data.get('utility_bill'), bot=bot) data['address'] = SecureValue.de_json(data.get('address'), bot=bot) data['identity_card'] = SecureValue.de_json(data.get('identity_card'), bot=bot) data['driver_license'] = SecureValue.de_json(data.get('driver_license'), bot=bot) data['internal_passport'] = SecureValue.de_json(data.get('internal_passport'), bot=bot) data['passport'] = SecureValue.de_json(data.get('passport'), bot=bot) data['personal_details'] = SecureValue.de_json(data.get('personal_details'), bot=bot) return cls(bot=bot, **data) class SecureValue(TelegramObject): """ This object represents the credentials that were used to decrypt the encrypted value. All fields are optional and depend on the type of field. Attributes: data (:class:`telegram.DataCredentials`, optional): Credentials for encrypted Telegram Passport data. Available for "personal_details", "passport", "driver_license", "identity_card", "identity_passport" and "address" types. front_side (:class:`telegram.FileCredentials`, optional): Credentials for encrypted document's front side. Available for "passport", "driver_license", "identity_card" and "internal_passport". reverse_side (:class:`telegram.FileCredentials`, optional): Credentials for encrypted document's reverse side. Available for "driver_license" and "identity_card". selfie (:class:`telegram.FileCredentials`, optional): Credentials for encrypted selfie of the user with a document. Can be available for "passport", "driver_license", "identity_card" and "internal_passport". translation (List[:class:`telegram.FileCredentials`], optional): Credentials for an encrypted translation of the document. Available for "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration" and "temporary_registration". files (List[:class:`telegram.FileCredentials`], optional): Credentials for encrypted files. Available for "utility_bill", "bank_statement", "rental_agreement", "passport_registration" and "temporary_registration" types. """ def __init__(self, data=None, front_side=None, reverse_side=None, selfie=None, files=None, translation=None, bot=None, **kwargs): self.data = data self.front_side = front_side self.reverse_side = reverse_side self.selfie = selfie self.files = files self.translation = translation self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None data['data'] = DataCredentials.de_json(data.get('data'), bot=bot) data['front_side'] = FileCredentials.de_json(data.get('front_side'), bot=bot) data['reverse_side'] = FileCredentials.de_json(data.get('reverse_side'), bot=bot) data['selfie'] = FileCredentials.de_json(data.get('selfie'), bot=bot) data['files'] = FileCredentials.de_list(data.get('files'), bot=bot) data['translation'] = FileCredentials.de_list(data.get('translation'), bot=bot) return cls(bot=bot, **data) def to_dict(self): data = super(SecureValue, self).to_dict() data['files'] = [p.to_dict() for p in self.files] data['translation'] = [p.to_dict() for p in self.translation] return data class _CredentialsBase(TelegramObject): """Base class for DataCredentials and FileCredentials.""" def __init__(self, hash, secret, bot=None, **kwargs): self.hash = hash self.secret = secret # Aliases just be be sure self.file_hash = self.hash self.data_hash = self.hash self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None return cls(bot=bot, **data) @classmethod def de_list(cls, data, bot): if not data: return [] credentials = list() for c in data: credentials.append(cls.de_json(c, bot=bot)) return credentials class DataCredentials(_CredentialsBase): """ These credentials can be used to decrypt encrypted data from the data field in EncryptedPassportData. Args: data_hash (:obj:`str`): Checksum of encrypted data secret (:obj:`str`): Secret of encrypted data Attributes: hash (:obj:`str`): Checksum of encrypted data secret (:obj:`str`): Secret of encrypted data """ def __init__(self, data_hash, secret, **kwargs): super(DataCredentials, self).__init__(data_hash, secret, **kwargs) def to_dict(self): data = super(DataCredentials, self).to_dict() del data['file_hash'] del data['hash'] return data class FileCredentials(_CredentialsBase): """ These credentials can be used to decrypt encrypted files from the front_side, reverse_side, selfie and files fields in EncryptedPassportData. Args: file_hash (:obj:`str`): Checksum of encrypted file secret (:obj:`str`): Secret of encrypted file Attributes: hash (:obj:`str`): Checksum of encrypted file secret (:obj:`str`): Secret of encrypted file """ def __init__(self, file_hash, secret, **kwargs): super(FileCredentials, self).__init__(file_hash, secret, **kwargs) def to_dict(self): data = super(FileCredentials, self).to_dict() del data['data_hash'] del data['hash'] return data
gpl-3.0
-7,892,456,958,463,140,000
38.321185
99
0.630344
false
4.321983
false
false
false
ngsutils/ngsutils
ngsutils/gtf/add_xref.py
1
3246
#!/usr/bin/env python ## category General ## desc Appends name annotation from UCSC Xref file '''Adds gene name annotations to a GTF file (xref) This adds gene name annotations based upon the KnownGene annotations from the UCSC Genome Browser. Gene names will be taken from the kgXref table. This table must be downloaded separately from the UCSC Genome Browser. This assumes that the file will be in tab-delimited format and that there is one line for each transcript. You may specify which column represents the gene name. In the standard "kgXref.txt" file, this is column #5. This will add the following attributes: gene_name ''' import sys import os from ngsutils.support import gzip_reader def gtf_add_xref(gtf, xref, column=4, out=sys.stdout, quiet=False): gene_names = {} if not quiet: sys.stderr.write('Reading xref...\n') for line in gzip_reader(xref): if line[0] == '#': continue cols = line.rstrip().split('\t') gene_names[cols[0]] = cols[column] if not quiet: sys.stderr.write('Reading/writing GTF...\n') for line in gzip_reader(gtf): try: comment = None idx = line.find('#') if idx > -1: if idx == 0: sys.stdout.write(line) continue comment = line[idx:] line = line[:-idx] chrom, source, feature, start, end, score, strand, frame, attrs = line.rstrip().split('\t') transcript_id = None for key, val in [x.split(' ') for x in [x.strip() for x in attrs.split(';')] if x]: if val[0] == '"' and val[-1] == '"': val = val[1:-1] if key == 'transcript_id': transcript_id = val if attrs[-1] != ';': attrs = '%s;' % attrs if transcript_id in gene_names: attrs = '%s gene_name "%s";' % (attrs, gene_names[transcript_id]) out.write('\t'.join([chrom, source, feature, start, end, score, strand, frame, attrs])) if comment: out.write('\t%s' % comment) out.write('\n') except: import traceback sys.stderr.write('Error parsing line:\n%s\n' % line) traceback.print_exc() sys.exit(1) def usage(msg=None): if msg: print msg print __doc__ print '''\ Usage: gtfutils add_xref {-col num} filename.gtf kgXref.txt Options: -col num The gene name is stored in column {num} (1-based) (default:5) ''' sys.exit(1) if __name__ == '__main__': gtf = None xref = None column = 4 last = None for arg in sys.argv[1:]: if last == '-col': column = int(arg) - 1 last = None elif not gtf and (os.path.exists(arg) or arg == '-'): gtf = arg elif not xref and (os.path.exists(arg) or arg == '-'): xref = arg elif arg in ['-col']: last = arg if not gtf or not xref: usage() if gtf == '-' and xref == '-': usage('Both GTF and Xref files can not be from stdin') gtf_add_xref(gtf, xref, column)
bsd-3-clause
-469,859,513,389,635,600
29.336449
103
0.541282
false
3.743945
false
false
false
Seegnify/Elasticcrawler
lib/curlheaders.py
1
1702
""" API to extract bits and pieces from CURL (command line utility) headers file. The headers can be obtained by calling: curl -D 'headers' 'url'. Currenlty supported formats are for protocols: HTTP, HTTPS. """ class Curlheaders: # response codes and headers container reponses = list() def __init__(self, headers = None): if headers is not None: self.load(headers) def load(self, headers): # read headers with open(headers) as f: lines = [line.strip() for line in f] # create response list resps = list() line_iter = iter(lines) # consume response code line = next(line_iter, None) resp = dict() resp['code'] = line.split()[1] resp['head'] = dict() # iterate over headers for line in line_iter: if len(line) is 0: # append last response resps.append(resp) # consume response code line = next(line_iter, None) if line is None: break resp = dict() resp['code'] = line.split()[1] resp['head'] = dict() else: # consume response header head = line.find(': ') name = line[0:head].lower() val = line[head+2:len(line)] resp['head'][name] = val # update loaded reponses self.responses = resps def response_count(self): return len(self.responses) def http_code(self, response_index): return self.responses[response_index]['code'] def http_header(self, response_index, header_name): header_name = header_name.lower() try: return self.responses[response_index]['head'][header_name] except KeyError: return None
bsd-3-clause
1,003,504,171,861,701,200
25.59375
77
0.59342
false
3.894737
false
false
false
raviii/ravii
items/fields.py
1
1881
from django.db.models.fields.files import ImageField, ImageFieldFile from PIL import Image import os def _add_thumb(s): """ Modifies a string (filename, URL) containing an image filename, to insert '.thumb' """ parts = s.split(".") parts.insert(-1, "thumb") if parts[-1].lower() not in ['jpeg', 'jpg']: parts[-1] = 'jpg' return ".".join(parts) class ThumbnailImageFieldFile(ImageFieldFile): def _get_thumb_path(self): return _add_thumb(self.path) thumb_path = property(_get_thumb_path) def _get_thumb_url(self): return _add_thumb(self.url) thumb_url = property(_get_thumb_url) def save(self, name, content, save=True): super(ThumbnailImageFieldFile, self).save(name, content, save) img = Image.open(self.path) img.thumbnail( (self.field.thumb_width, self.field.thumb_height), Image.ANTIALIAS ) img.save(self.thumb_path, 'JPEG') def delete(self, save=True): if os.path.exists(self.thumb_path): os.remove(self.thumb_path) super(ThumbnailImageFieldFile, self).delete(save) class ThumbnailImageField(ImageField): """ Behaves like a regular ImageField, but stores an extra (JPEG) thumbnail image, providing FIELD.thumb_url and FIELD.thumb_path. Accepts two additional, optional arguments: thumb_width and thumb_height, both defaulting to 128 (pixels). Resizing will preserve aspect ratio while staying inside the requested dimensions; see PIL's Image.thumbnail() method documentation for details. """ attr_class = ThumbnailImageFieldFile def __init__(self, thumb_width=128, thumb_height=128, *args, **kwargs): self.thumb_width = thumb_width self.thumb_height = thumb_height super(ThumbnailImageField, self).__init__(*args, **kwargs)
bsd-3-clause
6,572,445,099,955,995,000
33.2
78
0.654971
false
3.769539
false
false
false
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/tests/test_language.py
1
1657
'''tests for langauge detection transform .. This software is released under an MIT/X11 open source license. Copyright 2012-2015 Diffeo, Inc. ''' from __future__ import absolute_import import os import pytest import streamcorpus_pipeline from streamcorpus_pipeline._clean_html import clean_html from streamcorpus_pipeline._language import language from streamcorpus import make_stream_item, ContentItem @pytest.mark.skipif(True, reason='no longer used') def test_langauge(test_data_dir): path = os.path.join(test_data_dir, 'test/raw-unicode-issues.html') si = make_stream_item(None, 'test') si.body = ContentItem(raw=open(path).read()) context = {} lang = language(config={}) lang(si, context) assert si.body.language.name == 'Japanese' assert si.body.language.code == 'ja' @pytest.mark.skipif(True, reason='no longer used') @pytest.mark.parametrize('with_clean_html', [(True,), (False,)]) def test_language_unreliable_on_raw(test_data_dir, with_clean_html): path = os.path.join(test_data_dir, 'test/unreliable-language-detect-on-raw.html') si = make_stream_item(None, 'http://bbs.sjtu.edu.cn/bbsanc?path=%2Fgroups%2FGROUP_0%2Fmessage%2FD4EFC2634%2FD7AC8E3A8%2FG.1092960050.A') raw = open(path).read() #raw = raw.decode('GB2312', 'ignore').encode('utf8') si.body = ContentItem(raw=raw) si.body.encoding = 'GB2312' si.body.media_type = 'text/html' context = {} if with_clean_html: ch = clean_html(config={}) ch(si, context) lang = language(config={}) lang(si, context) assert si.body.language.name == 'Chinese' assert si.body.language.code == 'zh'
mit
-3,371,838,387,927,223,000
35.021739
140
0.694629
false
3.132325
true
false
false
MechanisM/musicdb
musicdb/common/management/commands/initial_import_fixups.py
1
3570
from django.core.management.base import NoArgsCommand from musicdb.classical.models import * class Command(NoArgsCommand): def handle_noargs(self, **options): work_pairs = ( ('felix-mendelssohn', ('string-quartet-in-e-flat', 'string-quartet-in-e-flat-1')), ('ludvig-van-beethoven', ('piano-trio-in-e-flat-triosatz', 'piano-trio-in-e-flat-triosatz-1')), ('fryderyk-chopin', ('ballade-no-4-op-52-in-f-minor', 'ballade-no-4-op-52-in-f-minor-1')), ) for a, (b, c) in work_pairs: try: Work.objects.get(composer__slug=a, slug=b).merge_from( Work.objects.get(composer__slug=a, slug=c) ) except Work.DoesNotExist: print "W: Skipping", a, b, c ensemble_pairs = ( ('chamber-orchestra-of-europe', 'chamber-orchestra-of-europe-1'), ('orquestra-sinfonica-haydn-de-bolzano-e-trento', 'orquestra-sinfonica-haydn-de-bolzano-e-trento-1'), ('i-solisti-veneti', 'i-solisti-veneti-1'), ('london-symphony-orchestra', 'london-symphony-orchestra-principals'), ('vienna-philharmonic-orchestra', 'wiener-philharmoniker'), ) for a, b in ensemble_pairs: try: Ensemble.objects.get(slug=a).merge_from(Ensemble.objects.get(slug=b)) except Ensemble.DoesNotExist: print "W: Skipping", a, b relationships = { 'arrangement': ( ('orchesographie', 'capriol-suite-for-string-orchestra'), ), 'revision': ( ('brandenburg-concerto-no-5-early-version-bwv-1050a-in-d', 'brandenburg-concerto-no-5-bwv-1050-in-d'), ('brandenburg-concerto-no-1-early-version-bwv-1046a-in-f', 'brandenburg-concerto-no-1-bwv-1046-in-f'), ), 'variations': ( ('twelve-variations-on-ah-vous-dirai-je-maman-k-265-in-c', 'romantic-piece-op-18'), ), 'transcription': ( ('brandenburg-concerto-no-4-bwv-1049-in-g', 'concerto-for-harpsichord-and-two-recorders-transcription-of-brandenburg-concerto-no-4-bwv-1057'), ('violin-concerto-bwv-1041-in-a-minor', 'harpsichord-concerto-bwv-1058r-in-g-minor'), ('violin-concerto-bwv-1042-in-e', 'harpsichord-concerto-bwv-1054-in-d'), ('concerto-for-oboe-and-violin-bwv-1060r-in-g-minor', 'concerto-for-two-harpsichords-bwv-1060-in-c-minor'), ('double-violin-concerto-bwv-1043-in-d-minor', 'concerto-for-two-harpsichords-bwv-1062-in-c-minor'), ('concerto-for-three-violins-bwv-1064r-in-d', 'concerto-for-three-harpsichords-bwv-1064-in-c'), ('concerto-for-four-violins-op-3-no-10-rv-580-in-b-minor', 'concerto-for-three-harpsichords-bwv-1064-in-c'), ('concerto-for-oboe-damore-bwv-1055r-in-a', 'harpsichord-concerto-bwv-1055-in-a'), ) } for nature, data in relationships.items(): for x, y in data: WorkRelationship.objects.create( source=Work.objects.get(slug=x), derived=Work.objects.get(slug=y), nature=nature, ) to_delete = () for klass, pks in to_delete: for pk in pks: try: klass.objects.get(pk=pk).delete() except klass.DoesNotExist: print "W: Skipping deletion of", klass, pk
agpl-3.0
4,716,751,957,606,330,000
47.243243
158
0.564986
false
2.862871
false
false
false
michaelBenin/sqlalchemy
lib/sqlalchemy/sql/naming.py
1
5728
# sqlalchemy/naming.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Establish constraint and index naming conventions. """ from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \ UniqueConstraint, CheckConstraint, Index, Table, Column from .. import event, events from .. import exc from .elements import _truncated_label import re class conv(_truncated_label): """Mark a string indicating that a name has already been converted by a naming convention. This is a string subclass that indicates a name that should not be subject to any further naming conventions. E.g. when we create a :class:`.Constraint` using a naming convention as follows:: m = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}) t = Table('t', m, Column('x', Integer), CheckConstraint('x > 5', name='x5')) The name of the above constraint will be rendered as ``"ck_t_x5"``. That is, the existing name ``x5`` is used in the naming convention as the ``constraint_name`` token. In some situations, such as in migration scripts, we may be rendering the above :class:`.CheckConstraint` with a name that's already been converted. In order to make sure the name isn't double-modified, the new name is applied using the :func:`.schema.conv` marker. We can use this explicitly as follows:: m = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}) t = Table('t', m, Column('x', Integer), CheckConstraint('x > 5', name=conv('ck_t_x5'))) Where above, the :func:`.schema.conv` marker indicates that the constraint name here is final, and the name will render as ``"ck_t_x5"`` and not ``"ck_t_ck_t_x5"`` .. versionadded:: 0.9.4 .. seealso:: :ref:`constraint_naming_conventions` """ class ConventionDict(object): def __init__(self, const, table, convention): self.const = const self._is_fk = isinstance(const, ForeignKeyConstraint) self.table = table self.convention = convention self._const_name = const.name def _key_table_name(self): return self.table.name def _column_X(self, idx): if self._is_fk: fk = self.const.elements[idx] return fk.parent else: return list(self.const.columns)[idx] def _key_constraint_name(self): if not self._const_name: raise exc.InvalidRequestError( "Naming convention including " "%(constraint_name)s token requires that " "constraint is explicitly named." ) if not isinstance(self._const_name, conv): self.const.name = None return self._const_name def _key_column_X_name(self, idx): return self._column_X(idx).name def _key_column_X_label(self, idx): return self._column_X(idx)._label def _key_referred_table_name(self): fk = self.const.elements[0] refs = fk.target_fullname.split(".") if len(refs) == 3: refschema, reftable, refcol = refs else: reftable, refcol = refs return reftable def _key_referred_column_X_name(self, idx): fk = self.const.elements[idx] refs = fk.target_fullname.split(".") if len(refs) == 3: refschema, reftable, refcol = refs else: reftable, refcol = refs return refcol def __getitem__(self, key): if key in self.convention: return self.convention[key](self.const, self.table) elif hasattr(self, '_key_%s' % key): return getattr(self, '_key_%s' % key)() else: col_template = re.match(r".*_?column_(\d+)_.+", key) if col_template: idx = col_template.group(1) attr = "_key_" + key.replace(idx, "X") idx = int(idx) if hasattr(self, attr): return getattr(self, attr)(idx) raise KeyError(key) _prefix_dict = { Index: "ix", PrimaryKeyConstraint: "pk", CheckConstraint: "ck", UniqueConstraint: "uq", ForeignKeyConstraint: "fk" } def _get_convention(dict_, key): for super_ in key.__mro__: if super_ in _prefix_dict and _prefix_dict[super_] in dict_: return dict_[_prefix_dict[super_]] elif super_ in dict_: return dict_[super_] else: return None @event.listens_for(Constraint, "after_parent_attach") @event.listens_for(Index, "after_parent_attach") def _constraint_name(const, table): if isinstance(table, Column): # for column-attached constraint, set another event # to link the column attached to the table as this constraint # associated with the table. event.listen(table, "after_parent_attach", lambda col, table: _constraint_name(const, table) ) elif isinstance(table, Table): metadata = table.metadata convention = _get_convention(metadata.naming_convention, type(const)) if convention is not None: if const.name is None or "constraint_name" in convention: newname = conv( convention % ConventionDict(const, table, metadata.naming_convention) ) if const.name is None: const.name = newname
mit
-7,769,484,997,395,845,000
33.506024
97
0.597416
false
4.022472
false
false
false
jn2840/bitcoin
share/qt/extract_strings_qt.py
1
1875
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt4 stringdefs so that they can be picked up by Qt linguist. ''' from subprocess import Popen, PIPE import glob import operator import os import sys OUT_CPP="qt/bitcoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = sys.argv[1:] # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out) f = open(OUT_CPP, 'w') f.write(""" #include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *bitcoin_strings[] = {\n') messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("beardcoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
mit
1,085,203,892,940,296,700
23.038462
82
0.578667
false
3.517824
false
false
false
anthonyfok/frescobaldi
frescobaldi_app/logtool/__init__.py
1
3820
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/ # # Copyright (c) 2008 - 2014 by Wilbert Berendsen # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # See http://www.gnu.org/licenses/ for more information. """ The log dockwindow. """ from PyQt5.QtCore import QSettings, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QAction import actioncollection import actioncollectionmanager import app import panel class LogTool(panel.Panel): """A dockwidget showing the log of running Jobs.""" def __init__(self, mainwindow): super(LogTool, self).__init__(mainwindow) self.hide() self.toggleViewAction().setShortcut(QKeySequence("Meta+Alt+L")) ac = self.actionCollection = Actions() ac.log_next_error.triggered.connect(self.slotNextError) ac.log_previous_error.triggered.connect(self.slotPreviousError) actioncollectionmanager.manager(mainwindow).addActionCollection(ac) mainwindow.addDockWidget(Qt.BottomDockWidgetArea, self) app.jobStarted.connect(self.slotJobStarted) app.jobFinished.connect(self.slotJobFinished) def translateUI(self): self.setWindowTitle(_("LilyPond Log")) self.toggleViewAction().setText(_("LilyPond &Log")) def createWidget(self): from . import logwidget return logwidget.LogWidget(self) def slotJobStarted(self, doc, job): """Called whenever job starts, decides whether to follow it and show the log.""" import jobattributes jattrs = jobattributes.get(job) if doc == self.mainwindow().currentDocument() or self.mainwindow() == jattrs.mainwindow: self.widget().switchDocument(doc) if not jattrs.hidden and QSettings().value("log/show_on_start", True, bool): self.show() def slotJobFinished(self, document, job, success): import jobattributes if (not success and not job.is_aborted() and not jobattributes.get(job).hidden and document == self.mainwindow().currentDocument()): self.show() def slotNextError(self): """Jumps to the position pointed to by the next error message.""" self.activate() self.widget().gotoError(1) def slotPreviousError(self): """Jumps to the position pointed to by the next error message.""" self.activate() self.widget().gotoError(-1) class Actions(actioncollection.ActionCollection): name = "logtool" def createActions(self, parent=None): self.log_next_error = QAction(parent) self.log_previous_error = QAction(parent) self.log_next_error.setShortcut(QKeySequence("Ctrl+E")) self.log_previous_error.setShortcut(QKeySequence("Ctrl+Shift+E")) def translateUI(self): self.log_next_error.setText(_("Next Error Message")) self.log_previous_error.setText(_("Previous Error Message")) # log errors by initializing Errors instance @app.jobStarted.connect def _log_errors(document): from . import errors errors.errors(document)
gpl-2.0
-6,433,061,246,334,416,000
36.087379
96
0.685079
false
4.042328
false
false
false
joshgeller/PyPardot
pypardot/objects/opportunities.py
1
3806
class Opportunities(object): """ A class to query and use Pardot opportunities. Opportunity field reference: http://developer.pardot.com/kb/api-version-3/object-field-references/#opportunity """ def __init__(self, client): self.client = client def query(self, **kwargs): """ Returns the opportunities matching the specified criteria parameters. Supported search criteria: http://developer.pardot.com/kb/api-version-3/opportunities/#supported-search-criteria """ response = self._get(path='/do/query', params=kwargs) # Ensure result['opportunity'] is a list, no matter what. result = response.get('result') if result['total_results'] == 0: result['opportunity'] = [] elif result['total_results'] == 1: result['opportunity'] = [result['opportunity']] return result def create_by_email(self, prospect_email=None, name=None, value=None, probability=None, **kwargs): """ Creates a new opportunity using the specified data. <prospect_email> must correspond to an existing prospect. """ kwargs.update({'name': name, 'value': value, 'probability': probability}) response = self._post( path='/do/create/prospect_email/{prospect_email}'.format(prospect_email=prospect_email), params=kwargs) return response def create_by_id(self, prospect_id=None, name=None, value=None, probability=None, **kwargs): """ Creates a new opportunity using the specified data. <prospect_id> must correspond to an existing prospect. """ kwargs.update({'name': name, 'value': value, 'probability': probability}) response = self._post( path='/do/create/prospect_id/{prospect_id}'.format(prospect_id=prospect_id), params=kwargs) return response def read(self, id=None): """ Returns the data for the opportunity specified by <id>, including campaign assignment and associated visitor activities. <id> is the Pardot ID for the target opportunity. """ response = self._post(path='/do/read/id/{id}'.format(id=id)) return response def update(self, id=None): """ Updates the provided data for the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Fields that are not updated by the request remain unchanged. Returns an updated version of the opportunity. """ response = self._post(path='/do/update/id/{id}'.format(id=id)) return response def delete(self, id=None): """ Deletes the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Returns no response on success. """ response = self._post(path='/do/delete/id/{id}'.format(id=id)) return response def undelete(self, id=None): """ Un-deletes the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Returns no response on success. """ response = self._post(path='/do/undelete/id/{id}'.format(id=id)) return response def _get(self, object_name='opportunity', path=None, params=None): """GET requests for the Opportunity object.""" if params is None: params = {} response = self.client.get(object_name=object_name, path=path, params=params) return response def _post(self, object_name='opportunity', path=None, params=None): """POST requests for the Opportunity object.""" if params is None: params = {} response = self.client.post(object_name=object_name, path=path, params=params) return response
mit
-5,657,057,192,978,973,000
40.824176
120
0.625854
false
4.219512
false
false
false
dmsimard/ansible
lib/ansible/plugins/lookup/first_found.py
1
7109
# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ name: first_found author: Seth Vidal (!UNKNOWN) <skvidal@fedoraproject.org> version_added: historical short_description: return first file found from list description: - this lookup checks a list of files and paths and returns the full path to the first combination found. - As all lookups, when fed relative paths it will try use the current task's location first and go up the chain to the containing role/play/include/etc's location. - The list of files has precedence over the paths searched. i.e, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not. - Either a list of files C(_terms) or a key `files` with a list of files is required for this plugin to operate. notes: - This lookup can be used in 'dual mode', either passing a list of file names or a dictionary that has C(files) and C(paths). options: _terms: description: list of file names files: description: list of file names type: list default: [] paths: description: list of paths in which to look for the files type: list default: [] skip: type: boolean default: False description: Return an empty list if no file is found, instead of an error. """ EXAMPLES = """ - name: show first existing file or ignore if none do debug: msg={{lookup('first_found', findme, errors='ignore')}} vars: findme: - "/path/to/foo.txt" - "bar.txt" # will be looked in files/ dir relative to role and/or play - "/path/to/biz.txt" - name: | include tasks only if files exist. Note the use of query() to return a blank list for the loop if no files are found. import_tasks: '{{ item }}' vars: params: files: - path/tasks.yaml - path/other_tasks.yaml loop: "{{ query('first_found', params, errors='ignore') }}" - name: | copy first existing file found to /some/file, looking in relative directories from where the task is defined and including any play objects that contain it copy: src={{lookup('first_found', findme)}} dest=/some/file vars: findme: - foo - "{{inventory_hostname}}" - bar - name: same copy but specific paths copy: src={{lookup('first_found', params)}} dest=/some/file vars: params: files: - foo - "{{inventory_hostname}}" - bar paths: - /tmp/production - /tmp/staging - name: INTERFACES | Create Ansible header for /etc/network/interfaces template: src: "{{ lookup('first_found', findme)}}" dest: "/etc/foo.conf" vars: findme: - "{{ ansible_virtualization_type }}_foo.conf" - "default_foo.conf" - name: read vars from first file found, use 'vars/' relative subdir include_vars: "{{lookup('first_found', params)}}" vars: params: files: - '{{ansible_distribution}}.yml' - '{{ansible_os_family}}.yml' - default.yml paths: - 'vars' """ RETURN = """ _raw: description: - path to file found type: list elements: path """ import os from jinja2.exceptions import UndefinedError from ansible.errors import AnsibleLookupError, AnsibleUndefinedVariable from ansible.module_utils.common._collections_compat import Mapping, Sequence from ansible.module_utils.six import string_types from ansible.plugins.lookup import LookupBase def _split_on(terms, spliters=','): # TODO: fix as it does not allow spaces in names termlist = [] if isinstance(terms, string_types): for spliter in spliters: terms = terms.replace(spliter, ' ') termlist = terms.split(' ') else: # added since options will already listify for t in terms: termlist.extend(_split_on(t, spliters)) return termlist class LookupModule(LookupBase): def _process_terms(self, terms, variables, kwargs): total_search = [] skip = False # can use a dict instead of list item to pass inline config for term in terms: if isinstance(term, Mapping): self.set_options(var_options=variables, direct=term) elif isinstance(term, string_types): self.set_options(var_options=variables, direct=kwargs) elif isinstance(term, Sequence): partial, skip = self._process_terms(term, variables, kwargs) total_search.extend(partial) continue else: raise AnsibleLookupError("Invalid term supplied, can handle string, mapping or list of strings but got: %s for %s" % (type(term), term)) files = self.get_option('files') paths = self.get_option('paths') # NOTE: this is used as 'global' but can be set many times?!?!? skip = self.get_option('skip') # magic extra spliting to create lists filelist = _split_on(files, ',;') pathlist = _split_on(paths, ',:;') # create search structure if pathlist: for path in pathlist: for fn in filelist: f = os.path.join(path, fn) total_search.append(f) elif filelist: # NOTE: this seems wrong, should be 'extend' as any option/entry can clobber all total_search = filelist else: total_search.append(term) return total_search, skip def run(self, terms, variables, **kwargs): total_search, skip = self._process_terms(terms, variables, kwargs) # NOTE: during refactor noticed that the 'using a dict' as term # is designed to only work with 'one' otherwise inconsistencies will appear. # see other notes below. # actually search subdir = getattr(self, '_subdir', 'files') path = None for fn in total_search: try: fn = self._templar.template(fn) except (AnsibleUndefinedVariable, UndefinedError): continue # get subdir if set by task executor, default to files otherwise path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True) # exit if we find one! if path is not None: return [path] # if we get here, no file was found if skip: # NOTE: global skip wont matter, only last 'skip' value in dict term return [] raise AnsibleLookupError("No file was found when using first_found. Use errors='ignore' to allow this task to be skipped if no files are found")
gpl-3.0
-7,768,599,136,003,885,000
33.014354
152
0.608524
false
4.116387
false
false
false
chrys87/orca-beep
src/orca/speechdispatcherfactory.py
1
20607
# Copyright 2006, 2007, 2008, 2009 Brailcom, o.p.s. # # Author: Tomas Cerha <cerha@brailcom.org> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. # # [[[TODO: richb - Pylint is giving us a bunch of warnings along these # lines throughout this file: # # W0142:202:SpeechServer._send_command: Used * or ** magic # # So for now, we just disable these warnings in this module.]]] # # pylint: disable-msg=W0142 """Provides an Orca speech server for Speech Dispatcher backend.""" __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __author__ = "Tomas Cerha <cerha@brailcom.org>" __copyright__ = "Copyright (c) 2006-2008 Brailcom, o.p.s." __license__ = "LGPL" from gi.repository import GLib import re import time from . import chnames from . import debug from . import guilabels from . import messages from . import speechserver from . import settings from . import orca_state from . import punctuation_settings from .acss import ACSS try: import speechd except: _speechd_available = False else: _speechd_available = True try: getattr(speechd, "CallbackType") except AttributeError: _speechd_version_ok = False else: _speechd_version_ok = True PUNCTUATION = re.compile('[^\w\s]', re.UNICODE) ELLIPSIS = re.compile('(\342\200\246|(?<!\.)\.{3,4}(?=(\s|\Z)))') class SpeechServer(speechserver.SpeechServer): # See the parent class for documentation. _active_servers = {} DEFAULT_SERVER_ID = 'default' _SERVER_NAMES = {DEFAULT_SERVER_ID: guilabels.DEFAULT_SYNTHESIZER} def getFactoryName(): return guilabels.SPEECH_DISPATCHER getFactoryName = staticmethod(getFactoryName) def getSpeechServers(): servers = [] default = SpeechServer._getSpeechServer(SpeechServer.DEFAULT_SERVER_ID) if default is not None: servers.append(default) for module in default.list_output_modules(): servers.append(SpeechServer._getSpeechServer(module)) return servers getSpeechServers = staticmethod(getSpeechServers) def _getSpeechServer(cls, serverId): """Return an active server for given id. Attempt to create the server if it doesn't exist yet. Returns None when it is not possible to create the server. """ if serverId not in cls._active_servers: cls(serverId) # Don't return the instance, unless it is succesfully added # to `_active_Servers'. return cls._active_servers.get(serverId) _getSpeechServer = classmethod(_getSpeechServer) def getSpeechServer(info=None): if info is not None: thisId = info[1] else: thisId = SpeechServer.DEFAULT_SERVER_ID return SpeechServer._getSpeechServer(thisId) getSpeechServer = staticmethod(getSpeechServer) def shutdownActiveServers(): for server in list(SpeechServer._active_servers.values()): server.shutdown() shutdownActiveServers = staticmethod(shutdownActiveServers) # *** Instance methods *** def __init__(self, serverId): super(SpeechServer, self).__init__() self._id = serverId self._client = None self._current_voice_properties = {} self._acss_manipulators = ( (ACSS.RATE, self._set_rate), (ACSS.AVERAGE_PITCH, self._set_pitch), (ACSS.GAIN, self._set_volume), (ACSS.FAMILY, self._set_family), ) if not _speechd_available: msg = 'ERROR: Speech Dispatcher is not available' debug.println(debug.LEVEL_WARNING, msg, True) return if not _speechd_version_ok: msg = 'ERROR: Speech Dispatcher version 0.6.2 or later is required.' debug.println(debug.LEVEL_WARNING, msg, True) return # The following constants must be initialized in runtime since they # depend on the speechd module being available. self._PUNCTUATION_MODE_MAP = { settings.PUNCTUATION_STYLE_ALL: speechd.PunctuationMode.ALL, settings.PUNCTUATION_STYLE_MOST: speechd.PunctuationMode.SOME, settings.PUNCTUATION_STYLE_SOME: speechd.PunctuationMode.SOME, settings.PUNCTUATION_STYLE_NONE: speechd.PunctuationMode.NONE, } self._CALLBACK_TYPE_MAP = { speechd.CallbackType.BEGIN: speechserver.SayAllContext.PROGRESS, speechd.CallbackType.CANCEL: speechserver.SayAllContext.INTERRUPTED, speechd.CallbackType.END: speechserver.SayAllContext.COMPLETED, #speechd.CallbackType.INDEX_MARK:speechserver.SayAllContext.PROGRESS, } self._default_voice_name = guilabels.SPEECH_DEFAULT_VOICE % serverId try: self._init() except: debug.printException(debug.LEVEL_WARNING) msg = 'ERROR: Speech Dispatcher service failed to connect' debug.println(debug.LEVEL_WARNING, msg, True) else: SpeechServer._active_servers[serverId] = self self._lastKeyEchoTime = None def _init(self): self._client = client = speechd.SSIPClient('Orca', component=self._id) client.set_priority(speechd.Priority.MESSAGE) if self._id != self.DEFAULT_SERVER_ID: client.set_output_module(self._id) self._current_voice_properties = {} mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle] client.set_punctuation(mode) def updateCapitalizationStyle(self): """Updates the capitalization style used by the speech server.""" if settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_ICON: style = 'icon' elif settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_SPELL: style = 'spell' else: style = 'none' self._client.set_cap_let_recogn(style) def updatePunctuationLevel(self): """ Punctuation level changed, inform this speechServer. """ mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle] self._client.set_punctuation(mode) def _send_command(self, command, *args, **kwargs): if hasattr(speechd, 'SSIPCommunicationError'): try: return command(*args, **kwargs) except speechd.SSIPCommunicationError: msg = "SPEECH DISPATCHER: Connection lost. Trying to reconnect." debug.println(debug.LEVEL_INFO, msg, True) self.reset() return command(*args, **kwargs) except: pass else: # It is not possible tho catch the error with older SD versions. return command(*args, **kwargs) def _set_rate(self, acss_rate): rate = int(2 * max(0, min(99, acss_rate)) - 98) self._send_command(self._client.set_rate, rate) def _set_pitch(self, acss_pitch): pitch = int(20 * max(0, min(9, acss_pitch)) - 90) self._send_command(self._client.set_pitch, pitch) def _set_volume(self, acss_volume): volume = int(15 * max(0, min(9, acss_volume)) - 35) self._send_command(self._client.set_volume, volume) def _set_family(self, acss_family): familyLocale = acss_family.get(speechserver.VoiceFamily.LOCALE) if not familyLocale: import locale familyLocale, encoding = locale.getdefaultlocale() if familyLocale: lang = familyLocale.split('_')[0] if lang and len(lang) == 2: self._send_command(self._client.set_language, str(lang)) try: # This command is not available with older SD versions. set_synthesis_voice = self._client.set_synthesis_voice except AttributeError: pass else: name = acss_family.get(speechserver.VoiceFamily.NAME) if name != self._default_voice_name: self._send_command(set_synthesis_voice, name) def _debug_sd_values(self, prefix=""): if debug.debugLevel > debug.LEVEL_INFO: return try: sd_rate = self._send_command(self._client.get_rate) sd_pitch = self._send_command(self._client.get_pitch) except: sd_rate = "(exception occurred)" sd_pitch = "(exception occurred)" current = self._current_voice_properties msg = "SPEECH DISPATCHER: %sOrca rate %s, pitch %s; " \ "SD rate %s, pitch %s" % \ (prefix, self._current_voice_properties.get(ACSS.RATE), self._current_voice_properties.get(ACSS.AVERAGE_PITCH), sd_rate, sd_pitch) debug.println(debug.LEVEL_INFO, msg, True) def _apply_acss(self, acss): if acss is None: acss = settings.voices[settings.DEFAULT_VOICE] current = self._current_voice_properties for acss_property, method in self._acss_manipulators: value = acss.get(acss_property) if value is not None: if current.get(acss_property) != value: method(value) current[acss_property] = value elif acss_property == ACSS.AVERAGE_PITCH: method(5.0) current[acss_property] = 5.0 elif acss_property == ACSS.FAMILY \ and acss == settings.voices[settings.DEFAULT_VOICE]: # We need to explicitly reset (at least) the family. # See bgo#626072. # method({}) current[acss_property] = {} def __addVerbalizedPunctuation(self, oldText): """Depending upon the users verbalized punctuation setting, adjust punctuation symbols in the given text to their pronounced equivalents. The pronounced text will either replace the punctuation symbol or be inserted before it. In the latter case, this is to retain spoken prosity. Arguments: - oldText: text to be parsed for punctuation. Returns a text string with the punctuation symbols adjusted accordingly. """ spokenEllipsis = messages.SPOKEN_ELLIPSIS + " " newText = re.sub(ELLIPSIS, spokenEllipsis, oldText) symbols = set(re.findall(PUNCTUATION, newText)) for symbol in symbols: try: level, action = punctuation_settings.getPunctuationInfo(symbol) except: continue if level != punctuation_settings.LEVEL_NONE: # Speech Dispatcher should handle it. # continue charName = " %s " % chnames.getCharacterName(symbol) if action == punctuation_settings.PUNCTUATION_INSERT: charName += symbol newText = re.sub(symbol, charName, newText) if orca_state.activeScript: newText = orca_state.activeScript.utilities.adjustForDigits(newText) return newText def _speak(self, text, acss, **kwargs): if isinstance(text, ACSS): text = '' text = self.__addVerbalizedPunctuation(text) if orca_state.activeScript: text = orca_state.activeScript.\ utilities.adjustForPronunciation(text) # Replace no break space characters with plain spaces since some # synthesizers cannot handle them. See bug #591734. # text = text.replace('\u00a0', ' ') # Replace newline followed by full stop, since # this seems to crash sd, see bgo#618334. # text = text.replace('\n.', '\n') self._apply_acss(acss) self._debug_sd_values("Speaking '%s' " % text) self._send_command(self._client.speak, text, **kwargs) def _say_all(self, iterator, orca_callback): """Process another sayAll chunk. Called by the gidle thread. """ try: context, acss = next(iterator) except StopIteration: pass else: def callback(callbackType, index_mark=None): # This callback is called in Speech Dispatcher listener thread. # No subsequent Speech Dispatcher interaction is allowed here, # so we pass the calls to the gidle thread. t = self._CALLBACK_TYPE_MAP[callbackType] if t == speechserver.SayAllContext.PROGRESS: if index_mark: context.currentOffset = int(index_mark) else: context.currentOffset = context.startOffset elif t == speechserver.SayAllContext.COMPLETED: context.currentOffset = context.endOffset GLib.idle_add(orca_callback, context, t) if t == speechserver.SayAllContext.COMPLETED: GLib.idle_add(self._say_all, iterator, orca_callback) self._speak(context.utterance, acss, callback=callback, event_types=list(self._CALLBACK_TYPE_MAP.keys())) return False # to indicate, that we don't want to be called again. def _cancel(self): self._send_command(self._client.cancel) def _change_default_speech_rate(self, step, decrease=False): acss = settings.voices[settings.DEFAULT_VOICE] delta = step * (decrease and -1 or +1) try: rate = acss[ACSS.RATE] except KeyError: rate = 50 acss[ACSS.RATE] = max(0, min(99, rate + delta)) msg = 'SPEECH DISPATCHER: Rate set to %d' % rate debug.println(debug.LEVEL_INFO, msg, True) self.speak(decrease and messages.SPEECH_SLOWER \ or messages.SPEECH_FASTER, acss=acss) def _change_default_speech_pitch(self, step, decrease=False): acss = settings.voices[settings.DEFAULT_VOICE] delta = step * (decrease and -1 or +1) try: pitch = acss[ACSS.AVERAGE_PITCH] except KeyError: pitch = 5 acss[ACSS.AVERAGE_PITCH] = max(0, min(9, pitch + delta)) msg = 'SPEECH DISPATCHER: Pitch set to %d' % pitch debug.println(debug.LEVEL_INFO, msg, True) self.speak(decrease and messages.SPEECH_LOWER \ or messages.SPEECH_HIGHER, acss=acss) def _change_default_speech_volume(self, step, decrease=False): acss = settings.voices[settings.DEFAULT_VOICE] delta = step * (decrease and -1 or +1) try: volume = acss[ACSS.GAIN] except KeyError: volume = 5 acss[ACSS.GAIN] = max(0, min(9, volume + delta)) msg = 'SPEECH DISPATCHER: Volume set to %d' % volume debug.println(debug.LEVEL_INFO, msg, True) self.speak(decrease and messages.SPEECH_SOFTER \ or messages.SPEECH_LOUDER, acss=acss) def getInfo(self): return [self._SERVER_NAMES.get(self._id, self._id), self._id] def getVoiceFamilies(self): # Always offer the configured default voice with a language # set according to the current locale. from locale import getlocale, LC_MESSAGES locale = getlocale(LC_MESSAGES)[0] if locale is None or locale == 'C': lang = None dialect = None else: lang, dialect = locale.split('_') voices = ((self._default_voice_name, lang, None),) try: # This command is not available with older SD versions. list_synthesis_voices = self._client.list_synthesis_voices except AttributeError: pass else: try: voices += self._send_command(list_synthesis_voices) except: pass families = [speechserver.VoiceFamily({ \ speechserver.VoiceFamily.NAME: name, #speechserver.VoiceFamily.GENDER: speechserver.VoiceFamily.MALE, speechserver.VoiceFamily.DIALECT: dialect, speechserver.VoiceFamily.LOCALE: lang}) for name, lang, dialect in voices] return families def speak(self, text=None, acss=None, interrupt=True): #if interrupt: # self._cancel() # "We will not interrupt a key echo in progress." (Said the comment in # speech.py where these next two lines used to live. But the code here # suggests we haven't been doing anything with the lastKeyEchoTime in # years. TODO - JD: Dig into this and if it's truly useless, kill it.) if self._lastKeyEchoTime: interrupt = interrupt and (time.time() - self._lastKeyEchoTime) > 0.5 if text: self._speak(text, acss) def speakUtterances(self, utteranceList, acss=None, interrupt=True): #if interrupt: # self._cancel() for utterance in utteranceList: if utterance: self._speak(utterance, acss) def sayAll(self, utteranceIterator, progressCallback): GLib.idle_add(self._say_all, utteranceIterator, progressCallback) def speakCharacter(self, character, acss=None): self._apply_acss(acss) if character == '\n': self._send_command(self._client.sound_icon, 'end-of-line') return name = chnames.getCharacterName(character) if not name: self._send_command(self._client.char, character) return if orca_state.activeScript: name = orca_state.activeScript.\ utilities.adjustForPronunciation(name) self.speak(name, acss) def speakKeyEvent(self, event): if event.isPrintableKey() and event.event_string.isupper(): acss = settings.voices[settings.UPPERCASE_VOICE] else: acss = ACSS(settings.voices[settings.DEFAULT_VOICE]) event_string = event.getKeyName() if orca_state.activeScript: event_string = orca_state.activeScript.\ utilities.adjustForPronunciation(event_string) lockingStateString = event.getLockingStateString() event_string = "%s %s" % (event_string, lockingStateString) self.speak(event_string, acss=acss) self._lastKeyEchoTime = time.time() def increaseSpeechRate(self, step=5): self._change_default_speech_rate(step) def decreaseSpeechRate(self, step=5): self._change_default_speech_rate(step, decrease=True) def increaseSpeechPitch(self, step=0.5): self._change_default_speech_pitch(step) def decreaseSpeechPitch(self, step=0.5): self._change_default_speech_pitch(step, decrease=True) def increaseSpeechVolume(self, step=0.5): self._change_default_speech_volume(step) def decreaseSpeechVolume(self, step=0.5): self._change_default_speech_volume(step, decrease=True) def stop(self): self._cancel() def shutdown(self): self._client.close() del SpeechServer._active_servers[self._id] def reset(self, text=None, acss=None): self._client.close() self._init() def list_output_modules(self): """Return names of available output modules as a tuple of strings. This method is not a part of Orca speech API, but is used internally by the Speech Dispatcher backend. The returned tuple can be empty if the information can not be obtained (e.g. with an older Speech Dispatcher version). """ try: return self._send_command(self._client.list_output_modules) except AttributeError: return () except speechd.SSIPCommandError: return ()
lgpl-2.1
5,530,888,183,352,487,000
36.950276
81
0.609841
false
3.897674
false
false
false
teto/ns-3-dev-git
doc/tutorial/source/conf.py
1
7057
# -*- coding: utf-8 -*- # # ns-3 documentation build configuration file, created by # sphinx-quickstart on Tue Dec 14 09:00:39 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.imgmath'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'ns-3' copyright = u'2010, ns-3 project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = 'ns-3-dev' # The full version, including alpha/beta/rc tags. release = 'ns-3-dev' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'ns3_html_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['../..'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = 'Tutorial' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ns-3doc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'ns-3-tutorial.tex', u'ns-3 Tutorial', u'ns-3 project', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = '../../ns3_html_theme/static/ns-3.png' # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. latex_preamble = '\usepackage{amssymb}' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ns-3-tutorial', u'ns-3 Tutorial', [u'ns-3 project'], 1) ]
gpl-2.0
-883,867,585,173,012,000
31.671296
80
0.705399
false
3.685117
true
false
false
mpetyx/pychatbot
SemanticWebApproach/RoboWriter/allegrordf-1.0.1/franz/miniclient/request.py
1
6234
import StringIO, pycurl, urllib, cjson, locale from threading import Lock class Pool: def __init__(self, create): self.create = create self.lock = Lock() self.pool = [] def get(self): self.lock.acquire() try: if len(self.pool): return self.pool.pop() else: return self.create() finally: self.lock.release() def put(self, value): self.lock.acquire() try: self.pool.append(value) finally: self.lock.release() curlPool = Pool(pycurl.Curl) class RequestError(Exception): def __init__(self, status, message): print status, message self.status = status self.message = message def __str__(self): return "Server returned %s: %s" % (self.status, self.message) def urlenc(**args): buf = [] def enc(name, val): buf.append(urllib.quote(name) + "=" + urllib.quote(val)) def encval(name, val): if val is None: pass elif isinstance(val, bool): enc(name, (val and "true") or "false") elif isinstance(val, int): enc(name, "%d" % val) elif isinstance(val, float): enc(name, "%g" % val) elif isinstance(val, list) or isinstance(val, tuple): for elt in val: encval(name, elt) elif isinstance(val, basestring): enc(name, val.encode("utf-8")) else: enc(name, unicode(val).encode("utf-8")) for name, val in args.iteritems(): encval(name, val) return "&".join(buf) def makeRequest(obj, method, url, body=None, accept="*/*", contentType=None, callback=None, errCallback=None): curl = curlPool.get() if obj: if obj.user and obj.password: curl.setopt(pycurl.USERPWD, "%s:%s" % (obj.user, obj.password)) curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) if url.startswith("/"): url = obj.url + url postbody = method == "POST" or method == "PUT" curl.setopt(pycurl.POSTFIELDS, "") if body: if postbody: curl.setopt(pycurl.POSTFIELDS, body) else: url = url + "?" + body curl.setopt(pycurl.POST, (postbody and 1) or 0) curl.setopt(pycurl.CUSTOMREQUEST, method) curl.setopt(pycurl.URL, url) # The "Expect:" is there to suppress "Expect: 100-continue" # behaviour that is the default in libcurl when posting large # bodies. headers = ["Connection: keep-alive", "Accept: " + accept, "Expect:"] if contentType and postbody: headers.append("Content-Type: " + contentType) if callback: headers.append("Connection: close") curl.setopt(pycurl.HTTPHEADER, headers) curl.setopt(pycurl.ENCODING, "") # which means 'any encoding that curl supports' if callback: status = [None] error = [] def headerfunc(string): if status[0] is None: status[0] = locale.atoi(string.split(" ")[1]) return len(string) def writefunc(string): if status[0] == 200: callback(string) else: error.append(string.decode("utf-8")) curl.setopt(pycurl.WRITEFUNCTION, writefunc) curl.setopt(pycurl.HEADERFUNCTION, headerfunc) curl.perform() if status[0] != 200: errCallback(curl.getinfo(pycurl.RESPONSE_CODE), "".join(error)) else: buf = StringIO.StringIO() curl.setopt(pycurl.WRITEFUNCTION, buf.write) curl.perform() response = buf.getvalue().decode("utf-8") buf.close() result = (curl.getinfo(pycurl.RESPONSE_CODE), response) curlPool.put(curl) return result def jsonRequest(obj, method, url, body=None, contentType="application/x-www-form-urlencoded", rowreader=None, accept="application/json"): if rowreader is None: status, body = makeRequest(obj, method, url, body, accept, contentType) if (status == 200): if accept in ('application/json', 'text/integer', "application/x-quints+json"): body = cjson.decode(body) return body else: raise RequestError(status, body) else: def raiseErr(status, message): raise RequestError(status, message) makeRequest(obj, method, url, body, accept, contentType, callback=rowreader.process, errCallback=raiseErr) def nullRequest(obj, method, url, body=None, contentType="application/x-www-form-urlencoded"): status, body = makeRequest(obj, method, url, body, "application/json", contentType) if (status < 200 or status > 204): raise RequestError(status, body) class RowReader: def __init__(self, callback): self.hasNames = None self.names = None self.skipNextBracket = False self.callback = callback self.backlog = None def process(self, string): if self.hasNames is None: self.hasNames = string[0] == "{" if not self.hasNames: self.skipNextBracket = True ln = len(string) if self.backlog: string = self.backlog + string pos = [0] def useArray(arr): if self.hasNames: if self.names: self.callback(arr, self.names) else: self.names = arr self.skipNextBracket = True else: self.callback(arr, None) def takeArrayAt(start): scanned = start + 1 while True: end = string.find("]", scanned) if end == -1: return False try: useArray(cjson.decode(string[start : end + 1].decode("utf-8"))) pos[0] = end + 1 return True except cjson.DecodeError: scanned = end + 1 while True: start = string.find("[", pos[0]) if self.skipNextBracket: self.skipNextBracket = False pos[0] = start + 1 elif start == -1 or not takeArrayAt(start): break if pos[0] == 0: self.backlog = string return ln else: self.backlog = None return pos[0]
apache-2.0
4,239,466,818,039,334,400
34.622857
137
0.569458
false
3.906015
false
false
false
simonkrogmann/planets
gui/object_3D.py
1
4105
# -*- coding: cp1252 -*- import vector import time class Planet3D: """ein 3D-Objekt für das graphics-Modul, dass mit einem Planeten verbunden ist""" def __init__(self, Parent, Planet): self.Parent = Parent self.Planet = Planet self.Planet.Register(self) self.Positions = [Planet["position"].Tuple()] self.Trace = [] self.Color = Planet["color"] self.TraceState = -1 self.Drawing = self.Parent.Canvas.create_oval(-5, -5, -6, -6, fill = Planet["color"], outline = "") self.Redraw() def ResetTrace(self): """löscht die bisher gezeichnete Spur des Planeten""" for Line in self.Trace: self.Parent.Canvas.delete(Line.Drawing) self.Parent.Drawings.remove(Line) self.Trace = [] self.TraceState = -1 self.Positions = [self.Positions[-1]] def Redraw(self): """zeichnet den Planeten neu""" C = self.Parent.DisplayPosition(self.Positions[-1]) if C: Diameter = self.Parent.DisplayDiameter(self.Positions[-1], self.Planet["diameter"]) Coordinates = (C[0] - Diameter, C[1] - Diameter, C[0] + Diameter, C[1] + Diameter) self.Parent.Canvas.coords(self.Drawing, Coordinates) else: self.Parent.Canvas.coords(self.Drawing, -5, -5, -6, -6) def Update(self, Tag, Value): """ändert die Zeichnung des Planeten entsprechend der Daten. Mögliche Daten sind die Planetenattribute.""" if Tag == "position": if type(Value) == tuple: Tuple = Value else: Tuple = Value.Tuple() if self.Planet["trace"] and self.Planet.Parent.Trace: # fasst jeweils 5 Linien für die Spur zusammen self.TraceState = (self.TraceState + 1) % 5 if not self.TraceState: self.Trace.append(Line3D(self.Parent, self.Positions[-1], Tuple, self.Color)) self.Parent.Drawings.append(self.Trace[-1]) self.Positions.append(Tuple) else: self.Positions[-1] = Tuple self.Trace[-1].End = Tuple self.Trace[-1].Redraw() else: self.Positions = [Tuple] self.Redraw() elif Tag == "diameter": self.Redraw() elif Tag == "color": self.SetColor(Value) elif Tag == "trace" and not Value: self.ResetTrace() def SetColor(self, Color): """ändert die Planetenfarbe""" self.Color = Color self.Parent.Canvas.itemconfig(self.Drawing, fill = Color) def Delete(self): """entfernt den Planeten aus der Zeichnung""" for Line in self.Trace: self.Parent.Canvas.delete(Line.Drawing) self.Parent.Drawings.remove(Line) self.Parent.Canvas.delete(self.Drawing) self.Planet.Deregister(self) def MidPoint(self): """gibt den Mittelpunkt des Planeten zurück""" return self.Positions[-1] class Line3D: """eine 3D-Linie für das graphics-Modul""" def __init__(self, Parent, Begin, End, Color): self.Parent = Parent self.Begin = Begin self.End = End self.OnScreen = False self.Drawing = self.Parent.Canvas.create_line(-5, -5, -5, -5, fill = Color) self.Redraw() def Redraw(self): """zeichnet die Linie neu""" Coordinates = self.Parent.LineDisplayCoordinates(self.Begin, self.End) if Coordinates != (-5,-5,-5,-5): self.Parent.Canvas.coords(self.Drawing, Coordinates) self.OnScreen = True elif self.OnScreen: self.OnScreen = False self.Parent.Canvas.coords(self.Drawing, Coordinates) def MidPoint(self): """gibt den Mittelpunkt der Linie zurück""" return ((self.Begin[0] + self.End[0])/ 2, (self.Begin[1] + self.End[1])/ 2, (self.Begin[2] + self.End[2])/ 2)
mit
-6,019,683,214,109,623,000
37.009259
97
0.557613
false
3.526632
false
false
false
Aloomaio/googleads-python-lib
examples/ad_manager/v201805/product_package_item_service/get_product_package_items_for_product_package.py
1
2302
#!/usr/bin/env python # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example gets all product package items belonging to a product package. """ # Import appropriate modules from the client library. from googleads import ad_manager PRODUCT_PACKAGE_ID = 'INSERT_PRODUCT_PACKAGE_ID_HERE' def main(client, product_package_id): # Initialize appropriate service. product_package_item_service = client.GetService( 'ProductPackageItemService', version='v201805') # Create a statement to select product package items. statement = (ad_manager.StatementBuilder(version='v201805') .Where('productPackageId = :productPackageId') .WithBindVariable('productPackageId', product_package_id)) # Retrieve a small amount of product package items at a time, paging # through until all product package items have been retrieved. while True: response = product_package_item_service.getProductPackageItemsByStatement( statement.ToStatement()) if 'results' in response and len(response['results']): for product_package_item in response['results']: # Print out some information for each product package item. print('Product package item with ID "%d", product ID "%d", and product ' 'package ID "%d" was found.\n' % (product_package_item['id'], product_package_item['productId'], product_package_item['productPackageId'])) statement.offset += statement.limit else: break print '\nNumber of results found: %s' % response['totalResultSetSize'] if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client, PRODUCT_PACKAGE_ID)
apache-2.0
1,369,016,464,488,928,300
40.107143
80
0.715899
false
4.193078
false
false
false
baidu/palo
build-support/run_clang_format.py
2
5703
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Modified from Apache Arrow project. from __future__ import print_function import lintutils from subprocess import PIPE import argparse import difflib import multiprocessing as mp import sys from functools import partial # examine the output of clang-format and if changes are # present assemble a (unified)patch of the difference def _check_one_file(filename, formatted): with open(filename, "rb") as reader: original = reader.read() if formatted != original: # Run the equivalent of diff -u diff = list(difflib.unified_diff( original.decode('utf8').splitlines(True), formatted.decode('utf8').splitlines(True), fromfile=filename, tofile="{} (after clang format)".format( filename))) else: diff = None return filename, diff def _check_dir(arguments, source_dir, exclude_globs): formatted_filenames = [] for path in lintutils.get_sources(source_dir, exclude_globs): formatted_filenames.append(str(path)) if arguments.fix: if not arguments.quiet: print("\n".join(map(lambda x: "Formatting {}".format(x), formatted_filenames))) # Break clang-format invocations into chunks: each invocation formats # 16 files. Wait for all processes to complete results = lintutils.run_parallel([ [arguments.clang_format_binary, "-style=file", "-i"] + some for some in lintutils.chunk(formatted_filenames, 16) ]) for returncode, stdout, stderr in results: # if any clang-format reported a parse error, bubble it if returncode != 0: sys.exit(returncode) else: # run an instance of clang-format for each source file in parallel, # then wait for all processes to complete results = lintutils.run_parallel([ [arguments.clang_format_binary, "-style=file", filename] for filename in formatted_filenames ], stdout=PIPE, stderr=PIPE) checker_args = [] for filename, res in zip(formatted_filenames, results): # if any clang-format reported a parse error, bubble it returncode, stdout, stderr = res if returncode != 0: print(stderr) sys.exit(returncode) checker_args.append((filename, stdout)) error = False pool = mp.Pool() try: # check the output from each invocation of clang-format in parallel for filename, diff in pool.starmap(_check_one_file, checker_args): if not arguments.quiet: print("Checking {}".format(filename)) if diff: print("{} had clang-format style issues".format(filename)) # Print out the diff to stderr error = True # pad with a newline print(file=sys.stderr) sys.stderr.writelines(diff) except Exception: error = True raise finally: pool.terminate() pool.join() sys.exit(1 if error else 0) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Runs clang-format on all of the source " "files. If --fix is specified enforce format by " "modifying in place, otherwise compare the output " "with the existing file and output any necessary " "changes as a patch in unified diff format") parser.add_argument("--clang_format_binary", required=True, help="Path to the clang-format binary") parser.add_argument("--exclude_globs", help="Filename containing globs for files " "that should be excluded from the checks") parser.add_argument("--source_dirs", required=True, help="Comma-separated root directories of the source code") parser.add_argument("--fix", default=False, action="store_true", help="If specified, will re-format the source " "code instead of comparing the re-formatted " "output, defaults to %(default)s") parser.add_argument("--quiet", default=False, action="store_true", help="If specified, only print errors") arguments = parser.parse_args() exclude_globs = [] if arguments.exclude_globs: with open(arguments.exclude_globs) as f: exclude_globs.extend(line.strip() for line in f) for source_dir in arguments.source_dirs.split(','): if len(source_dir) > 0: _check_dir(arguments, source_dir, exclude_globs)
apache-2.0
-5,497,260,695,327,298,000
38.604167
83
0.60512
false
4.678425
false
false
false
tisnik/fabric8-analytics-common
dashboard/src/jacoco_to_codecov.py
1
4579
"""Module to convert JaCoCo coverage report into the report compatible with Pycov utility.""" import csv def format_coverage_line(text, statements, missed, coverage, missed_lines=False): """Format one line with code coverage report of one class or for a summary.""" format_string = "{:80} {:3d} {:3d} {:3d}%" if missed_lines: format_string += " N/A" return format_string.format(text, statements, missed, coverage) def compute_coverage(statements, covered): """Compute code coverage based on number of all statemts and number of covered statements.""" return 100.0 * covered / statements class JavaClassCoverageReport: """Class representing code coverage report for one Java class.""" def __init__(self, record): """Initialize the object by using record read from the CSV file.""" self.group = record[0] self.package = record[1] self.class_name = record[2] self.missed = int(record[7]) self.covered = int(record[8]) self.statements = self.covered + self.missed self.coverage = compute_coverage(self.statements, self.covered) def __str__(self): """Return readable text representation compatible with Pycov utility output.""" pc = "{package}/{class_name}".format(package=self.package, class_name=self.class_name) return format_coverage_line(pc, self.statements, self.missed, int(self.coverage)) class ProjectCoverageReport: """Class to perform conversion from JaCoCo output to report compatible with Pycov utility.""" def __init__(self, csv_input_file_name): """Initialize the object, store the name of input (CSV) file.""" self.csv_input_file_name = csv_input_file_name @staticmethod def read_csv(csv_input_file_name, skip_first_line=False): """Read the given CSV file, parse it, and return as list of records.""" output = [] with open(csv_input_file_name, 'r') as fin: csv_content = csv.reader(fin, delimiter=',') if skip_first_line: next(csv_content, None) for row in csv_content: output.append(row) return output @staticmethod def write_horizontal_rule(fout): """Write horizontal rule into the output file.""" fout.write("-" * 108) fout.write("\n") @staticmethod def write_coverage_report_header(fout): """Write header compatible with Pycov to the output file.""" fout.write("{:80} {:5} {:4} {:5} {}\n".format( "Name", "Stmts", "Miss", "Cover", "Missing")) ProjectCoverageReport.write_horizontal_rule(fout) @staticmethod def write_coverage_report_summary(fout, statements, missed, coverage): """Write summary compatible with Pycov to the output file.""" ProjectCoverageReport.write_horizontal_rule(fout) fout.write(format_coverage_line("TOTAL", statements, missed, int(coverage))) fout.write("\n") def read_java_classes(self): """Read and parse into about Java classes from JaCoCo results.""" data = ProjectCoverageReport.read_csv(self.csv_input_file_name, True) return [JavaClassCoverageReport(record) for record in data] def convert_code_coverage_report(self, output_file_name): """Convert code coverage report that would be compatible with PyCov output.""" java_classes = self.read_java_classes() statements, missed, coverage = ProjectCoverageReport.compute_total(java_classes) with open(output_file_name, "w") as fout: ProjectCoverageReport.write_coverage_report_header(fout) for java_class in java_classes: fout.write(str(java_class) + "\n") ProjectCoverageReport.write_coverage_report_summary(fout, statements, missed, coverage) @staticmethod def compute_total(records): """Compute total/summary from all Java class coverage reports.""" statements = 0 covered = 0 missed = 0 for record in records: statements += record.statements covered += record.covered missed += record.missed coverage = compute_coverage(statements, covered) return statements, missed, coverage def main(): """Just a test ATM.""" p = ProjectCoverageReport("fabric8-analytics-jenkins-plugin.coverage.csv") p.convert_code_coverage_report("fabric8-analytics-jenkins-plugin.coverage.txt") if __name__ == "__main__": # execute only if run as a script main()
apache-2.0
-754,818,450,584,529,800
39.166667
99
0.647085
false
4.084746
false
false
false
huiyiqun/check_mk
cmk_base/core.py
1
9266
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # tails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. """All core related things like direct communication with the running core""" import fcntl import os import socket import subprocess import sys import cmk.paths import cmk.debug import cmk.tty as tty import livestatus from cmk.exceptions import MKGeneralException import cmk_base.console as console import cmk_base.config as config import cmk_base.core_config as core_config import cmk_base.core_nagios as core_nagios from cmk_base.exceptions import MKTimeout from cmk_base import config_cache try: import cmk_base.cee.core_cmc as core_cmc except ImportError: core_cmc = None _restart_lock_fd = None #. # .--Control-------------------------------------------------------------. # | ____ _ _ | # | / ___|___ _ __ | |_ _ __ ___ | | | # | | | / _ \| '_ \| __| '__/ _ \| | | # | | |__| (_) | | | | |_| | | (_) | | | # | \____\___/|_| |_|\__|_| \___/|_| | # | | # +----------------------------------------------------------------------+ # | Invoke actions affecting the core like reload/restart | # '----------------------------------------------------------------------' def do_reload(): do_restart(True) # TODO: Cleanup duplicate code with automation_restart() def do_restart(only_reload = False): try: backup_path = None if try_get_activation_lock(): # TODO: Replace by MKBailOut()/MKTerminate()? console.error("Other restart currently in progress. Aborting.\n") sys.exit(1) # Save current configuration if os.path.exists(cmk.paths.nagios_objects_file): backup_path = cmk.paths.nagios_objects_file + ".save" console.verbose("Renaming %s to %s\n", cmk.paths.nagios_objects_file, backup_path, stream=sys.stderr) os.rename(cmk.paths.nagios_objects_file, backup_path) else: backup_path = None try: core_config.do_create_config(with_agents=True) except Exception, e: # TODO: Replace by MKBailOut()/MKTerminate()? console.error("Error creating configuration: %s\n" % e) if backup_path: os.rename(backup_path, cmk.paths.nagios_objects_file) if cmk.debug.enabled(): raise sys.exit(1) if config.monitoring_core == "cmc" or core_nagios.do_check_nagiosconfig(): if backup_path: os.remove(backup_path) core_config.precompile() do_core_action(only_reload and "reload" or "restart") else: # TODO: Replace by MKBailOut()/MKTerminate()? console.error("Configuration for monitoring core is invalid. Rolling back.\n") broken_config_path = "%s/check_mk_objects.cfg.broken" % cmk.paths.tmp_dir file(broken_config_path, "w").write(file(cmk.paths.nagios_objects_file).read()) console.error("The broken file has been copied to \"%s\" for analysis.\n" % broken_config_path) if backup_path: os.rename(backup_path, cmk.paths.nagios_objects_file) else: os.remove(cmk.paths.nagios_objects_file) sys.exit(1) except Exception, e: try: if backup_path and os.path.exists(backup_path): os.remove(backup_path) except: pass if cmk.debug.enabled(): raise # TODO: Replace by MKBailOut()/MKTerminate()? console.error("An error occurred: %s\n" % e) sys.exit(1) def try_get_activation_lock(): global _restart_lock_fd # In some bizarr cases (as cmk -RR) we need to avoid duplicate locking! if config.restart_locking and _restart_lock_fd == None: lock_file = cmk.paths.default_config_dir + "/main.mk" _restart_lock_fd = os.open(lock_file, os.O_RDONLY) # Make sure that open file is not inherited to monitoring core! fcntl.fcntl(_restart_lock_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) try: console.verbose("Waiting for exclusive lock on %s.\n" % lock_file, stream=sys.stderr) fcntl.flock(_restart_lock_fd, fcntl.LOCK_EX | ( config.restart_locking == "abort" and fcntl.LOCK_NB or 0)) except: return True return False # Action can be restart, reload, start or stop def do_core_action(action, quiet=False): if not quiet: console.output("%sing monitoring core..." % action.title()) if config.monitoring_core == "nagios": os.putenv("CORE_NOVERIFY", "yes") command = [ "%s/etc/init.d/core" % cmk.paths.omd_root, action ] else: command = [ "omd", action, "cmc" ] p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) result = p.wait() if result != 0: output = p.stdout.read() if not quiet: console.output("ERROR: %s\n" % output) raise MKGeneralException("Cannot %s the monitoring core: %s" % (action, output)) else: if not quiet: console.output(tty.ok + "\n") #. # .--Timeperiods---------------------------------------------------------. # | _____ _ _ _ | # | |_ _(_)_ __ ___ ___ _ __ ___ _ __(_) ___ __| |___ | # | | | | | '_ ` _ \ / _ \ '_ \ / _ \ '__| |/ _ \ / _` / __| | # | | | | | | | | | | __/ |_) | __/ | | | (_) | (_| \__ \ | # | |_| |_|_| |_| |_|\___| .__/ \___|_| |_|\___/ \__,_|___/ | # | |_| | # +----------------------------------------------------------------------+ # | Fetching timeperiods from the core | # '----------------------------------------------------------------------' # Check if a timeperiod is currently active. We have no other way than # doing a Livestatus query. This is not really nice, but if you have a better # idea, please tell me... def check_timeperiod(timeperiod): # Let exceptions happen, they will be handled upstream. try: update_timeperiods_cache() except MKTimeout: raise except: if cmk.debug.enabled(): raise # If the query is not successful better skip this check then fail return True # Note: This also returns True when the timeperiod is unknown # The following function timeperiod_active handles this differently return config_cache.get_dict("timeperiods_cache").get(timeperiod, True) == True # Returns # True : active # False: inactive # None : unknown timeperiod # # Raises an exception if e.g. a timeout or connection error appears. # This way errors can be handled upstream. def timeperiod_active(timeperiod): update_timeperiods_cache() return config_cache.get_dict("timeperiods_cache").get(timeperiod) def update_timeperiods_cache(): # { "last_update": 1498820128, "timeperiods": [{"24x7": True}] } # The value is store within the config cache since we need a fresh start on reload tp_cache = config_cache.get_dict("timeperiods_cache") if not tp_cache: response = livestatus.LocalConnection().query("GET timeperiods\nColumns: name in") for tp_name, tp_active in response: tp_cache[tp_name] = bool(tp_active) def cleanup_timeperiod_caches(): config_cache.get_dict("timeperiods_cache").clear()
gpl-2.0
3,326,192,585,655,252,000
38.598291
113
0.506583
false
3.780498
true
false
false
pfalcon/picotui
picotui/editorext.py
1
5118
# # Extended VT100 terminal text editor, etc. widgets # Copyright (c) 2015 Paul Sokolovsky # Distributed under MIT License # import sys import os from .editor import * # Edit single line, quit on Enter/Esc class LineEditor(Editor): def handle_cursor_keys(self, key): if super().handle_cursor_keys(key): self.just_started = False return True return False def handle_key(self, key): if key in (KEY_ENTER, KEY_ESC): return key if self.just_started: # Overwrite initial string with new content self.set_lines([""]) self.col = 0 self.just_started = False return super().handle_key(key) def edit(self, line): self.set_lines([line]) self.col = len(line) self.adjust_cursor_eol() self.just_started = True key = self.loop() if key == KEY_ENTER: return self.content[0] return None class Viewer(Editor): def handle_key(self, key): if key in (KEY_ENTER, KEY_ESC): return key if super().handle_cursor_keys(key): return True # Viewer with colored lines, (whole line same color) class LineColorViewer(Viewer): def show_line(self, l, i): if self.is_dict_color: c = self.lines_c.get(i, self.def_c) else: try: c = self.lines_c[i] except IndexError: c = self.def_c self.attr_color(c) super().show_line(l, i) self.attr_reset() def set_line_colors(self, default_color, color_list={}): self.def_c = default_color self.lines_c = color_list self.is_dict_color = isinstance(color_list, dict) # Viewer with color support, (echo line may consist of spans # of different colors) class CharColorViewer(Viewer): def show_line(self, l, i): # TODO: handle self.margin, self.width length = 0 for span in l: if isinstance(span, tuple): span, c = span else: c = self.def_c self.attr_color(c) self.wr(span) length += len(span) self.attr_color(self.def_c) self.clear_num_pos(self.width - length) self.attr_reset() def set_def_color(self, default_color): self.def_c = default_color class EditorExt(Editor): screen_width = 80 def __init__(self, left=0, top=0, width=80, height=24): super().__init__(left, top, width, height) # +1 assumes there's a border around editor pane self.status_y = top + height + 1 def get_cur_line(self): return self.content[self.cur_line] def line_visible(self, no): return self.top_line <= no < self.top_line + self.height # If line "no" is already on screen, just reposition cursor to it and # return False. Otherwise, show needed line either at the center of # screen or at the top, and return True. def goto_line(self, no, col=None, center=True): self.cur_line = no if self.line_visible(no): self.row = no - self.top_line if col is not None: self.col = col if self.adjust_cursor_eol(): self.redraw() self.set_cursor() return False if center: c = self.height // 2 if no > c: self.top_line = no - c self.row = c else: self.top_line = 0 self.row = no else: self.top_line = no self.row = 0 if col is not None: self.col = col self.adjust_cursor_eol() self.redraw() return True def show_status(self, msg): self.cursor(False) self.goto(0, self.status_y) self.wr(msg) self.clear_to_eol() self.set_cursor() self.cursor(True) def show_cursor_status(self): self.cursor(False) self.goto(0, 31) self.wr("% 3d:% 3d" % (self.cur_line, self.col + self.margin)) self.set_cursor() self.cursor(True) def dialog_edit_line(self, left=None, top=8, width=40, height=3, line="", title=""): if left is None: left = (self.screen_width - width) / 2 self.dialog_box(left, top, width, height, title) e = LineEditor(left + 1, top + 1, width - 2, height - 2) return e.edit(line) if __name__ == "__main__": with open(sys.argv[1]) as f: content = f.read().splitlines() #content = f.readlines() #os.write(1, b"\x1b[18t") #key = os.read(0, 32) #print(repr(key)) #key = os.read(0, 32) #print(repr(key)) #1/0 e = EditorExt(left=1, top=1, width=60, height=25) e.init_tty() e.enable_mouse() s = e.dialog_edit_line(10, 5, 40, 3, title="Enter name:", line="test") e.cls() e.deinit_tty() print() print(s) 1/0 # e.cls() e.draw_box(0, 0, 62, 27) e.set_lines(content) e.loop() e.deinit_tty()
mit
-3,940,507,001,391,494,700
24.979695
88
0.540445
false
3.484003
false
false
false
anthropo-lab/XP
EPHEMER/insider_trading_project/insider_trading/consumers.py
1
7993
from channels import Group as channelsGroup from channels.sessions import channel_session import random from .models import Group as OtreeGroup, Subsession as OtreeSubsession, Constants import json import channels import logging from otree import constants_internal import django.test from otree.common_internal import (get_admin_secret_code) from threading import Event import time client = django.test.Client() ADMIN_SECRET_CODE = get_admin_secret_code() ############################################# ############################################# # Connected to websocket.connect def ws_admin_connect(message): print("*********CONNECT************") channelsGroup("adminreport").add(message.reply_channel) # Connected to websocket.receive def ws_admin_message(message): print("*********RECEIVE************") # Decrypt the url: No info in the url in this app # Decrypt the received message jsonmessage = json.loads(message.content['text']) subsession_pk = jsonmessage['subsession_pk'] mysubsession = OtreeSubsession.objects.get(pk=subsession_pk) if 'order' in jsonmessage: order = jsonmessage['order'] if order == "push_all_players_on_page": page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mysubsession.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb)): # This player is one of those who needs to be advanced try: if p.participant._current_form_page_url: resp = client.post( p.participant._current_form_page_url, data={ constants_internal.timeout_happened: True, constants_internal.admin_secret_code: ADMIN_SECRET_CODE }, follow=True ) else: resp = client.get(p.participant._start_url(), follow=True) except: logging.exception("Failed to advance participant.") raise assert resp.status_code < 400 p.participant.vars['participant_was_pushed'] = 'True' p.participant.save() channels.Group( 'auto-advance-{}'.format(p.participant.code) ).send( {'text': json.dumps( {'auto_advanced': True})} ) elif order == "push_active_players_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb) & (p.participant.vars['active_flag'] != 'Inactive')): # This player is one of those who needs to be advanced try: if p.participant._current_form_page_url: resp = client.post( p.participant._current_form_page_url, data={ constants_internal.timeout_happened: True, constants_internal.admin_secret_code: ADMIN_SECRET_CODE }, follow=True ) else: resp = client.get(p.participant._start_url(), follow=True) except: logging.exception("Failed to advance participant.") raise assert resp.status_code < 400 p.participant.vars['participant_was_pushed'] = 'True' p.participant.save() channels.Group( 'auto-advance-{}'.format(p.participant.code) ).send( {'text': json.dumps( {'auto_advanced': True})} ) elif order == "push_inactive_players_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb) & (p.participant.vars['active_flag'] == 'Inactive')): # This player is one of those who needs to be advanced try: if p.participant._current_form_page_url: resp = client.post( p.participant._current_form_page_url, data={ constants_internal.timeout_happened: True, constants_internal.admin_secret_code: ADMIN_SECRET_CODE }, follow=True ) else: resp = client.get(p.participant._start_url(), follow=True) except: logging.exception("Failed to advance participant.") raise assert resp.status_code < 400 p.participant.vars['participant_was_pushed'] = 'True' p.participant.save() channels.Group( 'auto-advance-{}'.format(p.participant.code) ).send( {'text': json.dumps( {'auto_advanced': True})} ) elif order == "deactivate_all_group_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb)): p.participant.vars['active_flag'] = 'Inactive' p.participant.save() elif order == "reactivate_all_group_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb)): p.participant.vars['active_flag'] = 'Playing_No_Change_Game' p.participant.save() elif order == "make_grouping_phase1": mysubsession.make_grouping_phase1() elif order == "make_grouping_phase2": mysubsession.make_grouping_phase2() ############################################# # Give feedback channelsGroup("adminreport").send({'text': json.dumps( {"order": "refresh"})} ) # Connected to websocket.disconnect def ws_admin_disconnect(message): print("*********DISCONNECT************") channelsGroup("adminreport").discard(message.reply_channel)
gpl-3.0
-7,776,490,304,224,844,000
44.936782
91
0.477793
false
4.829607
false
false
false
icarito/sugar
src/jarabe/journal/listmodel.py
1
10564
# Copyright (C) 2009, Tomeu Vizoso # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import time import json from gi.repository import GObject from gi.repository import Gtk from gettext import gettext as _ from sugar3.graphics.xocolor import XoColor from sugar3.graphics import style from sugar3 import util from jarabe.journal import model from jarabe.journal import misc DS_DBUS_SERVICE = 'org.laptop.sugar.DataStore' DS_DBUS_INTERFACE = 'org.laptop.sugar.DataStore' DS_DBUS_PATH = '/org/laptop/sugar/DataStore' class ListModel(GObject.GObject, Gtk.TreeModel, Gtk.TreeDragSource): __gtype_name__ = 'JournalListModel' __gsignals__ = { 'ready': (GObject.SignalFlags.RUN_FIRST, None, ([])), 'progress': (GObject.SignalFlags.RUN_FIRST, None, ([])), } COLUMN_UID = 0 COLUMN_FAVORITE = 1 COLUMN_ICON = 2 COLUMN_ICON_COLOR = 3 COLUMN_TITLE = 4 COLUMN_TIMESTAMP = 5 COLUMN_CREATION_TIME = 6 COLUMN_FILESIZE = 7 COLUMN_PROGRESS = 8 COLUMN_BUDDY_1 = 9 COLUMN_BUDDY_2 = 10 COLUMN_BUDDY_3 = 11 COLUMN_SELECT = 12 _COLUMN_TYPES = { COLUMN_UID: str, COLUMN_FAVORITE: bool, COLUMN_ICON: str, COLUMN_ICON_COLOR: object, COLUMN_TITLE: str, COLUMN_TIMESTAMP: str, COLUMN_CREATION_TIME: str, COLUMN_FILESIZE: str, COLUMN_PROGRESS: int, COLUMN_BUDDY_1: object, COLUMN_BUDDY_3: object, COLUMN_BUDDY_2: object, COLUMN_SELECT: bool, } _PAGE_SIZE = 10 def __init__(self, query): GObject.GObject.__init__(self) self._last_requested_index = None self._temp_drag_file_uid = None self._cached_row = None self._query = query self._all_ids = [] t = time.time() self._result_set = model.find(query, ListModel._PAGE_SIZE) logging.debug('init resultset: %r', time.time() - t) self._temp_drag_file_path = None self._selected = [] # HACK: The view will tell us that it is resizing so the model can # avoid hitting D-Bus and disk. self.view_is_resizing = False # Store the changes originated in the treeview so we do not need # to regenerate the model and stuff up the scroll position self._updated_entries = {} self._result_set.ready.connect(self.__result_set_ready_cb) self._result_set.progress.connect(self.__result_set_progress_cb) def get_all_ids(self): return self._all_ids def __result_set_ready_cb(self, **kwargs): t = time.time() self._all_ids = self._result_set.find_ids(self._query) logging.debug('get all ids: %r', time.time() - t) self.emit('ready') def __result_set_progress_cb(self, **kwargs): self.emit('progress') def setup(self, updated_callback=None): self._result_set.setup() self._updated_callback = updated_callback def stop(self): self._result_set.stop() def get_metadata(self, path): return model.get(self[path][ListModel.COLUMN_UID]) def do_get_n_columns(self): return len(ListModel._COLUMN_TYPES) def do_get_column_type(self, index): return ListModel._COLUMN_TYPES[index] def do_iter_n_children(self, iterator): if iterator is None: return self._result_set.length else: return 0 def set_value(self, iterator, column, value): index = iterator.user_data self._result_set.seek(index) metadata = self._result_set.read() if column == ListModel.COLUMN_FAVORITE: metadata['keep'] = value if column == ListModel.COLUMN_TITLE: metadata['title'] = value self._updated_entries[metadata['uid']] = metadata if self._updated_callback is not None: model.updated.disconnect(self._updated_callback) model.write(metadata, update_mtime=False, ready_callback=self.__reconnect_updates_cb) def __reconnect_updates_cb(self, metadata, filepath, uid): logging.error('__reconnect_updates_cb') if self._updated_callback is not None: model.updated.connect(self._updated_callback) def do_get_value(self, iterator, column): if self.view_is_resizing: return None index = iterator.user_data if index == self._last_requested_index: return self._cached_row[column] if index >= self._result_set.length: return None self._result_set.seek(index) metadata = self._result_set.read() metadata.update(self._updated_entries.get(metadata['uid'], {})) self._last_requested_index = index self._cached_row = [] self._cached_row.append(metadata['uid']) self._cached_row.append(metadata.get('keep', '0') == '1') self._cached_row.append(misc.get_icon_name(metadata)) if misc.is_activity_bundle(metadata): xo_color = XoColor('%s,%s' % (style.COLOR_BUTTON_GREY.get_svg(), style.COLOR_TRANSPARENT.get_svg())) else: xo_color = misc.get_icon_color(metadata) self._cached_row.append(xo_color) title = GObject.markup_escape_text(metadata.get('title', _('Untitled'))) self._cached_row.append('<b>%s</b>' % (title, )) try: timestamp = float(metadata.get('timestamp', 0)) except (TypeError, ValueError): timestamp_content = _('Unknown') else: timestamp_content = util.timestamp_to_elapsed_string(timestamp) self._cached_row.append(timestamp_content) try: creation_time = float(metadata.get('creation_time')) except (TypeError, ValueError): self._cached_row.append(_('Unknown')) else: self._cached_row.append( util.timestamp_to_elapsed_string(float(creation_time))) try: size = int(metadata.get('filesize')) except (TypeError, ValueError): size = None self._cached_row.append(util.format_size(size)) try: progress = int(float(metadata.get('progress', 100))) except (TypeError, ValueError): progress = 100 self._cached_row.append(progress) buddies = [] if metadata.get('buddies'): try: buddies = json.loads(metadata['buddies']).values() except json.decoder.JSONDecodeError, exception: logging.warning('Cannot decode buddies for %r: %s', metadata['uid'], exception) if not isinstance(buddies, list): logging.warning('Content of buddies for %r is not a list: %r', metadata['uid'], buddies) buddies = [] for n_ in xrange(0, 3): if buddies: try: nick, color = buddies.pop(0) except (AttributeError, ValueError), exception: logging.warning('Malformed buddies for %r: %s', metadata['uid'], exception) else: self._cached_row.append([nick, XoColor(color)]) continue self._cached_row.append(None) return self._cached_row[column] def do_iter_nth_child(self, parent_iter, n): return (False, None) def do_get_path(self, iterator): treepath = Gtk.TreePath((iterator.user_data,)) return treepath def do_get_iter(self, path): idx = path.get_indices()[0] iterator = Gtk.TreeIter() iterator.user_data = idx return (True, iterator) def do_iter_next(self, iterator): idx = iterator.user_data + 1 if idx >= self._result_set.length: iterator.stamp = -1 return (False, iterator) else: iterator.user_data = idx return (True, iterator) def do_get_flags(self): return Gtk.TreeModelFlags.ITERS_PERSIST | Gtk.TreeModelFlags.LIST_ONLY def do_iter_children(self, iterator): return (False, iterator) def do_iter_has_child(self, iterator): return False def do_iter_parent(self, iterator): return (False, Gtk.TreeIter()) def do_drag_data_get(self, path, selection): uid = self[path][ListModel.COLUMN_UID] target_atom = selection.get_target() target_name = target_atom.name() if target_name == 'text/uri-list': # Only get a new temp path if we have a new file, the frame # requests a path many times and if we give it a new path it # ends up with a broken path if uid != self._temp_drag_file_uid: # Get hold of a reference so the temp file doesn't get deleted self._temp_drag_file_path = model.get_file(uid) self._temp_drag_file_uid = uid logging.debug('putting %r in selection', self._temp_drag_file_path) selection.set(target_atom, 8, self._temp_drag_file_path) return True elif target_name == 'journal-object-id': # uid is unicode but Gtk.SelectionData.set() needs str selection.set(target_atom, 8, str(uid)) return True return False def set_selected(self, uid, value): if value: self._selected.append(uid) else: self._selected.remove(uid) def is_selected(self, uid): return uid in self._selected def get_selected_items(self): return self._selected def restore_selection(self, selected): self._selected = selected def select_all(self): self._selected = self._all_ids[:] def select_none(self): self._selected = []
gpl-3.0
-7,650,490,496,236,193,000
32.220126
79
0.591064
false
3.886681
false
false
false
MuffinMedic/znc-weblog
weblog.py
1
5161
import znc import os def is_safe_path(basedir, path): return os.path.abspath(path).startswith(basedir) class weblog(znc.Module): module_types = [znc.CModInfo.GlobalModule] description = "Allowings viewing of log files from the ZNC webadmin" wiki_page = "Weblog" def OnLoad(self, args, message): return True def WebRequiresLogin(self): return True def WebRequiresAdmin(self): return False def GetWebMenuTitle(self): return "Log Viewer" def OnWebRequest(self, sock, page, tmpl): user = sock.GetUser() dir = sock.GetParam('dir', False) if page == "index": if sock.GetRawParam('scope', True): scope = sock.GetRawParam('scope', True) self.setscope(scope, sock, tmpl) try: self.listdir(tmpl, dir, sock) except KeyError: row = tmpl.AddRow("ErrorLoop") row["error"] = "No scope set. Please set one above." elif page == "log" or page == "raw": self.viewlog(tmpl, dir, sock, page) self.getscopes(sock, tmpl) return True def listdir(self, tmpl, dir, sock): base = self.getbase(sock) try: dir_list = sorted(os.listdir(base + dir)) self.breadcrumbs(tmpl, dir, False) if len(dir_list) > 0: for item in dir_list: row = tmpl.AddRow("ListLoop") rel = dir + '/' + item if dir else item path = base + rel if os.path.isfile(path): url = 'log?dir=' + rel.replace('#', '%23') size = str(os.path.getsize(path) >> 10) + " KB" elif os.path.isdir(path): url = '?dir=' + rel.replace('#', '%23') size = len([name for name in os.listdir(path)]) row["scope"] = url row["item"] = item row["size"] = str(size) else: row = tmpl.AddRow("ErrorLoop") row["error"] = "Directory empty." except FileNotFoundError: row = tmpl.AddRow("ErrorLoop") row["error"] = "Directory does not exist. Please make sure you have the log module enabled and that you are attempting to access logs at the appropriate level (global, user, or network)." def viewlog(self, tmpl, dir, sock, page): base = self.getbase(sock) if not is_safe_path(base, base + dir): if page == "raw": row = tmpl.AddRow("LogLoop") row['log'] = "Error: invalid directory provided." return row = tmpl.AddRow("ErrorLoop") row["error"] = "Invalid directory provided." return path = base + dir row = tmpl.AddRow("LogLoop") with open(path, 'r', encoding='utf8') as log: log = log.read() if page == "raw": log = log.replace('<', '&lt;').replace('>', '&gt;') row['log'] = log if page == "log": self.breadcrumbs(tmpl, dir, True) row['raw'] = 'raw?dir=' + dir.replace('#', '%23') def breadcrumbs(self, tmpl, dir, islog): folders = dir.split('/') crumbs = ['<a href="">logs / </a>'] row = tmpl.AddRow("BreadcrumbLoop") row["crumbtext"] = "logs" row["crumburl"] = "" for i in range(0, len(folders)): if folders[i]: row = tmpl.AddRow("BreadcrumbLoop") row["crumbtext"] = folders[i] url = '/'.join(folders[0:i+1]) url = url.replace('#', '%23') row["crumburl"] = url if i == len(folders) - 1 and islog: row["islog"] = "True" def getbase(self, sock): base = znc.CZNC.Get().GetZNCPath() user = sock.GetUser() scope = self.nv[user] if scope == "Global": base = base + '/moddata/log/' + user + '/' elif scope == "User": base = base + '/users/' + user + '/moddata/log/' else: base = base + '/users/' + user + '/networks/' + self.nv[user] + '/moddata/log/' return base def getscopes(self, sock, tmpl): user_string = sock.GetUser() user = znc.CZNC.Get().FindUser(user_string) networks = user.GetNetworks() net_array = [] for network in networks: net_array.append(network.GetName()) net_array = sorted(net_array) net_array.insert(0, 'User'); net_array.insert(0, 'Global') for net in net_array: row = tmpl.AddRow("ScopeLoop") try: if net == self.nv[user_string]: row["active"] = "True" except KeyError: pass row["network"] = net def setscope(self, scope, sock, tmpl): user = sock.GetUser() self.nv[user] = scope row = tmpl.AddRow("MessageLoop") row["message"] = "Scope successfully set."
gpl-3.0
4,999,262,817,996,553,000
32.732026
199
0.496803
false
3.939695
false
false
false
CalvinNeo/EasyMLPlatform
py/graphic/tree.py
1
4067
#coding:utf8 import numpy as np import math import pylab as pl import matplotlib.cm as cm import matplotlib.mlab as mlab import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import json class GraphTree: def __init__(self): self.jsonobj = {} self.leafNode = dict(boxstyle = 'round4',fc = '0.8') self.branchNode = dict(boxstyle = 'sawtooth',fc = '0.8') self.arrow = dict(arrowstyle = '<-') self.depth = 0 self.leafcount = 0 def get_depth_leafcount(self,root): current_node = root.keys()[0] #name of choice node(string) branch_dict = root[current_node] maxdepth, thisdepth, thisleafcount = 0,0,0 for current_node in branch_dict.keys(): # print current_node,type(branch_dict[current_node]).__name__ if type(branch_dict[current_node]).__name__ == 'dict': temp = self.get_depth_leafcount(branch_dict[current_node]) thisdepth = 1 + temp[0] thisleafcount += temp[1] else: thisdepth = 1 thisleafcount += 1 if thisdepth > maxdepth: maxdepth = thisdepth return maxdepth,thisleafcount def load(self,strjson): self.jsonobj = dict(strjson) self.depth,self.leafcount = self.get_depth_leafcount(self.jsonobj) def plotMidText(self, cntrPt, parentPt, txtString): xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0] yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1] self.ax1.text(xMid, yMid, txtString) def plotNode(self, nodeTxt, cntrPt, parentPt, nodeType): self.ax1.annotate(nodeTxt, xy = parentPt, xycoords = 'axes fraction', xytext = cntrPt, \ textcoords = 'axes fraction', va = 'center', ha = 'center', bbox = nodeType, arrowprops = self.arrow) def plotTree(self, myTree, parentPt, nodeTxt): depth, leaves = self.get_depth_leafcount(myTree) current_node = myTree.keys()[0] cntrPt = (self.xOff + (1.0 + leaves) / 2.0 / self.leafcount, self.yOff) self.plotMidText(cntrPt, parentPt, nodeTxt) self.plotNode(current_node, cntrPt, parentPt, self.branchNode) branch_dict = myTree[current_node] self.yOff -= 1.0 / self.depth for current_node in branch_dict.keys(): if type(branch_dict[current_node]).__name__ == 'dict': self.plotTree(branch_dict[current_node], cntrPt, str(current_node)) else: self.xOff += 1.0 / self.leafcount self.plotNode(branch_dict[current_node], (self.xOff, self.yOff), cntrPt, self.leafNode) self.plotMidText((self.xOff, self.yOff), cntrPt, str(current_node)) self.yOff += 1.0 / self.depth def createPlot(self, show = True, save = ''): fig = plt.figure(1, facecolor = 'white') fig.clf() axprops = dict(xticks = [], yticks = []) self.ax1 = plt.subplot(111,frameon = False, **axprops) self.xOff, self.yOff = -0.5 / self.leafcount, 1.0 self.plotTree(self.jsonobj, (0.5,1.0), '') import StringIO, urllib, base64 if show: plt.show() else: imgdata = StringIO.StringIO() fig.savefig(imgdata, format='png') imgdata.seek(0) # rewind the data uri = 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf)) imgdata.close() return uri def showPlot(self): plt.show() if __name__ == '__main__': tr = GraphTree() # aa = '{"no surfacing":{"0":"no","1":{"flippers":{"0":"no","1":"yes"}}}}' # tr.load(json.loads(aa)) #JSON can't have non-string key aa = {"aged":{"0":"no","1":{"male":{"0":"no","1":"yes"}}}} # aa = {'water': {0: 1, 1: {'foot': {0: "'no'", 1: "'yes'"}}}} print dict(aa) # aa = {"no surfacing":{0:"no",1:{"flippers":{0:"no",1:"yes"}}}} # print dict(aa) tr.load(aa) print tr.leafcount,tr.depth tr.createPlot(show=True)
apache-2.0
7,361,431,833,645,096,000
38.485437
114
0.572412
false
3.238057
false
false
false
deplinenoise/rlaunch
vbcc-driver.py
1
1297
#! /usr/bin/python import sys import os import os.path import subprocess import re line_re = re.compile(r'^(warning|error) (\d+) in line (\d+) of "([^"]*)":\s*(.*)$') def fix_fn(root_dir, fn): # If there are path separators in the filename, assume the path is valid if fn.find(os.sep) != -1: return fn if os.path.exists(fn): return fn full_path = os.path.join(root_dir, fn) if os.path.exists(full_path): return full_path return 'bah' def munge(root_dir, line): m = re.match(line_re, line) if not m: return line.strip() fn = fix_fn(root_dir, m.group(4)) return '%s(%s) : %s %s: %s' % (fn, m.group(3), m.group(1), m.group(2), m.group(5)) if __name__ == '__main__': vbcc_root = os.environ.get('VBCC') if not vbcc_root: sys.stderr.write('VBCC environment variable not set') sys.exit(1) vc_bin = os.path.join(vbcc_root, 'bin' + os.sep + 'vc') if os.name == 'nt': vc_bin += '.exe' root_dir = '.' for arg in sys.argv[1:]: if arg.endswith('.c'): root_dir, dummy = os.path.split(arg) vc = subprocess.Popen( args = sys.argv[1:], executable = vc_bin, universal_newlines=True, stdin = None, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) for line in vc.stdout: sys.stdout.write(munge(root_dir, line)) sys.stdout.write('\n')
gpl-3.0
2,088,134,103,908,204,300
19.587302
83
0.621434
false
2.548134
false
false
false
thefab/tornadis
tornadis/pool.py
1
7269
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of tornadis library released under the MIT license. # See the LICENSE file for more information. import tornado.gen import tornado.ioloop import tornado.locks import logging import functools from collections import deque from tornadis.client import Client from tornadis.utils import ContextManagerFuture from tornadis.exceptions import ClientError LOG = logging.getLogger(__name__) class ClientPool(object): """High level object to deal with a pool of redis clients.""" def __init__(self, max_size=-1, client_timeout=-1, autoclose=False, **client_kwargs): """Constructor. Args: max_size (int): max size of the pool (-1 means "no limit"). client_timeout (int): timeout in seconds of a connection released to the pool (-1 means "no timeout"). autoclose (boolean): automatically disconnect released connections with lifetime > client_timeout (test made every client_timeout/10 seconds). client_kwargs (dict): Client constructor arguments. """ self.max_size = max_size self.client_timeout = client_timeout self.client_kwargs = client_kwargs self.__ioloop = client_kwargs.get('ioloop', tornado.ioloop.IOLoop.instance()) self.autoclose = autoclose self.__pool = deque() if self.max_size != -1: self.__sem = tornado.locks.Semaphore(self.max_size) else: self.__sem = None self.__autoclose_periodic = None if self.autoclose and self.client_timeout > 0: every = int(self.client_timeout) * 100 if int(tornado.version[0]) >= 5: cb = tornado.ioloop.PeriodicCallback(self._autoclose, every) else: cb = tornado.ioloop.PeriodicCallback(self._autoclose, every, self.__ioloop) self.__autoclose_periodic = cb self.__autoclose_periodic.start() def _get_client_from_pool_or_make_it(self): try: while True: client = self.__pool.popleft() if client.is_connected(): if self._is_expired_client(client): client.disconnect() continue break except IndexError: client = self._make_client() return (True, client) return (False, client) @tornado.gen.coroutine def get_connected_client(self): """Gets a connected Client object. If max_size is reached, this method will block until a new client object is available. Returns: A Future object with connected Client instance as a result (or ClientError if there was a connection problem) """ if self.__sem is not None: yield self.__sem.acquire() client = None newly_created, client = self._get_client_from_pool_or_make_it() if newly_created: res = yield client.connect() if not res: LOG.warning("can't connect to %s", client.title) raise tornado.gen.Return( ClientError("can't connect to %s" % client.title)) raise tornado.gen.Return(client) def get_client_nowait(self): """Gets a Client object (not necessary connected). If max_size is reached, this method will return None (and won't block). Returns: A Client instance (not necessary connected) as result (or None). """ if self.__sem is not None: if self.__sem._value == 0: return None self.__sem.acquire() _, client = self._get_client_from_pool_or_make_it() return client def _autoclose(self): newpool = deque() try: while True: client = self.__pool.popleft() if client.is_connected(): if self._is_expired_client(client): client.disconnect() else: newpool.append(client) except IndexError: self.__pool = newpool def _is_expired_client(self, client): if self.client_timeout != -1 and client.is_connected(): delta = client.get_last_state_change_timedelta() if delta.total_seconds() >= self.client_timeout: return True return False def connected_client(self): """Returns a ContextManagerFuture to be yielded in a with statement. Returns: A ContextManagerFuture object. Examples: >>> with (yield pool.connected_client()) as client: # client is a connected tornadis.Client instance # it will be automatically released to the pool thanks to # the "with" keyword reply = yield client.call("PING") """ future = self.get_connected_client() cb = functools.partial(self._connected_client_release_cb, future) return ContextManagerFuture(future, cb) def _connected_client_release_cb(self, future=None): client = future.result() self.release_client(client) def release_client(self, client): """Releases a client object to the pool. Args: client: Client object. """ if isinstance(client, Client): if not self._is_expired_client(client): LOG.debug('Client is not expired. Adding back to pool') self.__pool.append(client) elif client.is_connected(): LOG.debug('Client is expired and connected. Disconnecting') client.disconnect() if self.__sem is not None: self.__sem.release() def destroy(self): """Disconnects all pooled client objects.""" while True: try: client = self.__pool.popleft() if isinstance(client, Client): client.disconnect() except IndexError: break @tornado.gen.coroutine def preconnect(self, size=-1): """(pre)Connects some or all redis clients inside the pool. Args: size (int): number of redis clients to build and to connect (-1 means all clients if pool max_size > -1) Raises: ClientError: when size == -1 and pool max_size == -1 """ if size == -1 and self.max_size == -1: raise ClientError("size=-1 not allowed with pool max_size=-1") limit = min(size, self.max_size) if size != -1 else self.max_size clients = yield [self.get_connected_client() for _ in range(0, limit)] for client in clients: self.release_client(client) def _make_client(self): """Makes and returns a Client object.""" kwargs = self.client_kwargs client = Client(**kwargs) return client
mit
-2,969,162,958,521,919,500
35.164179
79
0.554959
false
4.61817
false
false
false
wkerzendorf/wsynphot
wsynphot/base.py
1
15987
# defining the base filter curve classes import os from scipy import interpolate from wsynphot.spectrum1d import SKSpectrum1D as Spectrum1D import pandas as pd from wsynphot.io.cache_filters import load_filter_index, load_transmission_data from astropy import units as u, constants as const from astropy import utils import numpy as np from wsynphot.calibration import get_vega_calibration_spectrum def calculate_filter_flux_density(spectrum, filter): """ Calculate the average flux through the filter by evaluating the integral ..math:: f_lambda = \\frac{\\int_}{} Parameters ---------- spectrum: ~specutils.Spectrum1D spectrum object filter: ~wsynphot.FilterCurve :return: """ filtered_spectrum = filter * spectrum filter_flux_density = np.trapz(filtered_spectrum.flux * filtered_spectrum.wavelength, filtered_spectrum.wavelength) return filter_flux_density def calculate_vega_magnitude(spectrum, filter): filter_flux_density = calculate_filter_flux_density(spectrum, filter) wavelength_delta = filter.calculate_wavelength_delta() filtered_f_lambda = (filter_flux_density / wavelength_delta) zp_vega_f_lambda = filter.zp_vega_f_lambda return -2.5 * np.log10(filtered_f_lambda / zp_vega_f_lambda) def calculate_ab_magnitude(spectrum, filter): filtered_f_lambda = (calculate_filter_flux_density(spectrum, filter) / filter.calculate_wavelength_delta()) return -2.5 * np.log10(filtered_f_lambda / filter.zp_ab_f_lambda) def list_filters(): """ List available filter sets along with their properties """ return load_filter_index() class BaseFilterCurve(object): """ Basic filter curve class Parameters ---------- wavelength: ~astropy.units.Quantity wavelength for filter curve transmission_lambda: numpy.ndarray transmission_lambda for filter curve interpolation_kind: str allowed interpolation kinds given in scipy.interpolate.interp1d """ @classmethod def load_filter(cls, filter_id=None, interpolation_kind='linear'): """ Parameters ---------- filter_id: str or None if None is provided will return a DataFrame of all filters interpolation_kind: str see scipy.interpolation.interp1d """ if filter_id is None: return list_filters() else: filter = load_transmission_data(filter_id) wavelength_unit = 'angstrom' wavelength = filter['Wavelength'].values * u.Unit(wavelength_unit) return cls(wavelength, filter['Transmission'].values, interpolation_kind=interpolation_kind, filter_id=filter_id) def __init__(self, wavelength, transmission_lambda, interpolation_kind='linear', filter_id=None): if not hasattr(wavelength, 'unit'): raise ValueError('the wavelength needs to be a astropy quantity') self.wavelength = wavelength self.transmission_lambda = transmission_lambda self.interpolation_object = interpolate.interp1d(self.wavelength, self.transmission_lambda, kind=interpolation_kind, bounds_error=False, fill_value=0.0) self.filter_id = filter_id def __mul__(self, other): if not hasattr(other, 'flux') or not hasattr(other, 'wavelength'): raise ValueError('requiring a specutils.Spectrum1D-like object that' 'has attributes "flux" and "wavelength"') #new_wavelength = np.union1d(other.wavelength.to(self.wavelength.unit).value, # self.wavelength.value) * self.wavelength.unit transmission = self.interpolate(other.wavelength) return Spectrum1D.from_array(other.wavelength, transmission * other.flux) def __rmul__(self, other): return self.__mul__(other) @utils.lazyproperty def lambda_pivot(self): """ Calculate the pivotal wavelength as defined in Bessell & Murphy 2012 .. math:: \\lambda_\\textrm{pivot} = \\sqrt{ \\frac{\\int S(\\lambda)\\lambda d\\lambda}{\\int \\frac{S(\\lambda)}{\\lambda}}}\\\\ <f_\\nu> = <f_\\lambda>\\frac{\\lambda_\\textrm{pivot}^2}{c} """ return np.sqrt((np.trapz(self.transmission_lambda * self.wavelength, self.wavelength)/ (np.trapz(self.transmission_lambda / self.wavelength, self.wavelength)))) @utils.lazyproperty def wavelength_start(self): return self.get_wavelength_start() @utils.lazyproperty def wavelength_end(self): return self.get_wavelength_end() @utils.lazyproperty def zp_ab_f_lambda(self): return (self.zp_ab_f_nu * const.c / self.lambda_pivot**2).to( 'erg/s/cm^2/Angstrom', u.spectral()) @utils.lazyproperty def zp_ab_f_nu(self): return (3631 * u.Jy).to('erg/s/cm^2/Hz') @utils.lazyproperty def zp_vega_f_lambda(self): return (calculate_filter_flux_density(get_vega_calibration_spectrum(), self) / self.calculate_wavelength_delta()) def interpolate(self, wavelength): """ Interpolate the filter onto new wavelength grid Parameters ---------- wavelength: ~astropy.units.Quantity wavelength grid to interpolate on """ converted_wavelength = wavelength.to(self.wavelength.unit) return self.interpolation_object(converted_wavelength) def _calculuate_flux_density(self, wavelength, flux): return _calculcate_filter_flux_density(flux, self) def calculate_flux_density(self, spectrum): return calculate_filter_flux_density(spectrum, self) def calculate_f_lambda(self, spectrum): return (self.calculate_flux_density(spectrum) / self.calculate_wavelength_delta()) def calculate_wavelength_delta(self): """ Calculate the Integral :math:`\integral :return: """ return np.trapz(self.transmission_lambda * self.wavelength, self.wavelength) def calculate_weighted_average_wavelength(self): """ Calculate integral :math:`\\frac{\\int S(\\lambda) \\lambda d\\lambda}{\\int S(\\lambda) d\\lambda}` Returns : ~astropy.units.Quantity """ return (np.trapz(self.transmission_lambda * self.wavelength, self.wavelength) / self.calculate_wavelength_delta()) def calculate_vega_magnitude(self, spectrum): __doc__ = calculate_vega_magnitude.__doc__ return calculate_vega_magnitude(spectrum, self) def calculate_ab_magnitude(self, spectrum): __doc__ = calculate_ab_magnitude.__doc__ return calculate_ab_magnitude(spectrum, self) def convert_ab_magnitude_to_f_lambda(self, mag): return 10**(-0.4*mag) * self.zp_ab_f_lambda def convert_vega_magnitude_to_f_lambda(self, mag): return 10**(-0.4*mag) * self.zp_vega_f_lambda def plot(self, ax, scale_max=None, make_label=True, plot_kwargs={}, format_filter_id=None): if scale_max is not None: if hasattr(scale_max, 'unit'): scale_max = scale_max.value transmission = (self.transmission_lambda * scale_max / self.transmission_lambda.max()) else: transmission = self.transmission_lambda ax.plot(self.wavelength, transmission, **plot_kwargs) ax.set_xlabel('Wavelength [{0}]'.format( self.wavelength.unit.to_string(format='latex'))) ax.set_ylabel('Transmission [1]') if make_label==True and self.filter_id is not None: if format_filter_id is not None: filter_id = format_filter_id(self.filter_id) else: filter_id = self.filter_id text_x = (self.lambda_pivot).value text_y = transmission.max()/2 ax.text(text_x, text_y, filter_id, horizontalalignment='center', verticalalignment='center', bbox=dict(facecolor='white', alpha=0.5)) def get_wavelength_start(self, threshold=0.01): norm_cum_sum = (np.cumsum(self.transmission_lambda) / np.sum(self.transmission_lambda)) return self.wavelength[norm_cum_sum.searchsorted(threshold)] def get_wavelength_end(self, threshold=0.01): norm_cum_sum = (np.cumsum(self.transmission_lambda) / np.sum(self.transmission_lambda)) return self.wavelength[norm_cum_sum.searchsorted(1 - threshold)] class FilterCurve(BaseFilterCurve): def __repr__(self): if self.filter_id is None: filter_id = "{0:x}".format(self.__hash__()) else: filter_id = self.filter_id return "FilterCurve <{0}>".format(filter_id) class FilterSet(object): """ A set of filters Parameters ---------- filter_set: ~list a list of strings or a list of filters interpolation_kind: ~str scipy interpolaton kinds """ def __init__(self, filter_set, interpolation_kind='linear'): if hasattr(filter_set[0], 'wavelength'): self.filter_set = filter_set else: self.filter_set = [FilterCurve.load_filter(filter_id, interpolation_kind= interpolation_kind) for filter_id in filter_set] def __iter__(self): self.current_filter_idx = 0 return self def __next__(self): try: item = self.filter_set[self.current_filter_idx] except IndexError: raise StopIteration self.current_filter_idx += 1 return item next = __next__ def __getitem__(self, item): return self.filter_set.__getitem__(item) def __repr__(self): return "<{0} \n{1}>".format(self.__class__.__name__, '\n'.join( [item.filter_id for item in self.filter_set])) @property def lambda_pivot(self): return u.Quantity([item.lambda_pivot for item in self]) def calculate_f_lambda(self, spectrum): return u.Quantity( [item.calculate_f_lambda(spectrum) for item in self.filter_set]) def calculate_ab_magnitudes(self, spectrum): mags = [item.calculate_ab_magnitude(spectrum) for item in self.filter_set] return mags def calculate_vega_magnitudes(self, spectrum): mags = [item.calculate_vega_magnitude(spectrum) for item in self.filter_set] return mags def convert_ab_magnitudes_to_f_lambda(self, magnitudes): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambdas = [filter.convert_ab_magnitude_to_f_lambda(mag) for filter, mag in zip(self.filter_set, magnitudes)] return u.Quantity(f_lambdas) def convert_ab_magnitude_uncertainties_to_f_lambda_uncertainties( self, magnitudes, magnitude_uncertainties): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambda_positive_uncertainties = u.Quantity( [filter.convert_ab_magnitude_to_f_lambda(mag + mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties, )]) f_lambda_negative_uncertainties = u.Quantity( [filter.convert_ab_magnitude_to_f_lambda(mag - mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties)]) return np.abs(u.Quantity((f_lambda_positive_uncertainties, f_lambda_negative_uncertainties)) - self.convert_ab_magnitudes_to_f_lambda(magnitudes)) def convert_vega_magnitude_uncertainties_to_f_lambda_uncertainties( self, magnitudes, magnitude_uncertainties): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambda_positive_uncertainties = u.Quantity( [filter.convert_vega_magnitude_to_f_lambda(mag + mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties, )]) f_lambda_negative_uncertainties = u.Quantity( [filter.convert_vega_magnitude_to_f_lambda(mag - mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties)]) return np.abs(u.Quantity((f_lambda_positive_uncertainties, f_lambda_negative_uncertainties)) - self.convert_vega_magnitudes_to_f_lambda(magnitudes)) def convert_vega_magnitudes_to_f_lambda(self, magnitudes): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambdas = [filter.convert_vega_magnitude_to_f_lambda(mag) for filter, mag in zip(self.filter_set, magnitudes)] return u.Quantity(f_lambdas) def plot_spectrum(self, spectrum, ax, make_labels=True, spectrum_plot_kwargs={}, filter_plot_kwargs={}, filter_color_list=None, format_filter_id=None): """ plot a spectrum with the given filters spectrum: ax: make_labels: :return: """ ax.plot(spectrum.wavelength, spectrum.flux, **spectrum_plot_kwargs) for i, filter in enumerate(self.filter_set): filter_scale = filter.calculate_f_lambda(spectrum) if filter_color_list is not None: filter_plot_kwargs['color'] = filter_color_list[i] filter.plot(ax, scale_max=filter_scale, make_label=make_labels, plot_kwargs=filter_plot_kwargs, format_filter_id=format_filter_id) class MagnitudeSet(FilterSet): def __init__(self, filter_set, magnitudes, magnitude_uncertainties=None, interpolation_kind='linear'): super(MagnitudeSet, self).__init__(filter_set, interpolation_kind= interpolation_kind) self.magnitudes = np.array(magnitudes) self.magnitude_uncertainties = np.array(magnitude_uncertainties) def __repr__(self): mag_str = '{0} {1:.4f} +/- {2:.4f}' mag_data = [] for i, filter in enumerate(self.filter_set): unc = (np.nan if self.magnitude_uncertainties is None else self.magnitude_uncertainties[i]) mag_data.append(mag_str.format(filter.filter_id, self.magnitudes[i], unc)) return "<{0} \n{1}>".format(self.__class__.__name__, '\n'.join(mag_data))
bsd-3-clause
6,882,483,729,775,752,000
33.454741
108
0.585851
false
4.021887
false
false
false
ChrisTimperley/PythonCGum
cgum/program.py
1
5861
from cgum.basic import * from cgum.utility import FNULL from pprint import pprint import cgum.statement as statement import cgum.expression as expression import cgum.preprocessor as preprocessor import cgum.typs as typs from subprocess import Popen, CalledProcessError import os.path import json import tempfile import codecs # TODO: Probe class Asm(Node): CODE = "260800" LABEL = "Asm" def __init__(self, pos, length, label, children): assert label is None super().__init__(pos, length, label, children) class Label(Node): CODE = "270100" LABEL = "Label" def __init__(self, pos, length, label, children): assert label is None assert len(children) in [1, 2] assert isinstance(children[0], GenericString) super().__init__(pos, length, label, children) def name(self): return self.__children[0].to_s() def statement(self): children = self.children() if len(children) == 2: return children[1] return None class FunctionParameter(Node): CODE = "220100" LABEL = "ParameterType" def __init__(self, pos, length, label, children): assert label is None assert len(children) <= 2 # Find the optional type and name of this parameter tmp = children.copy() self.__typ = \ tmp.pop(0) if (tmp and isinstance(tmp[0], typs.FullType)) else None self.__name = tmp.pop(0) if tmp else None assert self.__typ is None or isinstance(self.__typ, typs.FullType) assert self.__name is None or isinstance(self.__name, GenericString) super().__init__(pos, length, label, children) def is_incomplete(self): return self.name() is None def typ(self): return self.__typ.to_s() if self.__typ else None def name(self): return self.__name.to_s() if self.__name else None class FunctionParameters(Node): CODE = "200000" LABEL = "ParamList" def __init__(self, pos, length, label, children): assert label is None assert all([isinstance(c, FunctionParameter) for c in children]) super().__init__(pos, length, label, children) def parameters(self): return self.__children class FunctionDefinition(Node): CODE = "380000" LABEL = "Definition" @staticmethod def from_json(jsn): return FunctionDefinition(jsn['pos'], name, params, block, storage, dots) def __init__(self, pos, length, label, children): assert len(children) >= 3 and len(children) <= 5 tmp = children.copy() self.__storage = \ tmp.pop(0) if isinstance(tmp[0], typs.Storage) else None self.__parameters = tmp.pop(0) self.__dots = \ tmp.pop(0) if isinstance(tmp[0], typs.DotsParameter) else None self.__name = tmp.pop(0) self.__block = tmp.pop(0) assert isinstance(self.__parameters, FunctionParameters) assert self.__dots is None or \ isinstance(self.__dots, typs.DotsParameter) assert self.__storage is None or \ isinstance(self.__storage, typs.Storage) assert isinstance(self.__name, GenericString) assert isinstance(self.__block, statement.Block) super().__init__(pos, length, label, children) def name(self): return self.__name def parameters(self): return self.__parameters def block(self): return self.__block def storage(self): return self.__storage def dots(self): return self.__dots def is_variadic(self): return not (self.dots() is None) # Used to mark the end of the program! class FinalDef(Token): CODE = "450800" LABEL = "FinalDef" # Represents the root AST node for a program # For now we just get all the "components" of a program and worry about what # kind of components they might be later. # # Throw away the FinalDef class Program(Node): CODE = "460000" LABEL = "Program" # Generates an AST for a given source code file, using GumTree and CGum @staticmethod def from_source_file(fn): tmp_f = tempfile.NamedTemporaryFile() Program.parse_to_json_file(fn, tmp_f) return Program.from_json_file(tmp_f.name) # Parses a JSON CGum AST, stored in a file at a specified location, into an # equivalent, Python representation @staticmethod def from_json_file(fn): #print("Attempting to read CGum AST from a JSON file: %s" % fn) assert os.path.isfile(fn), "file not found" with codecs.open(fn, 'r', 'utf-8') as f: program = Node.from_json(json.load(f)['root']) #print("Finished converting CGum AST from JSON into Python") program.renumber() return program def __init__(self, pos, length, label, children): assert label is None assert len(children) >= 1 assert isinstance(children[-1], FinalDef) children.pop() super().__init__(pos, length, label, children) @staticmethod def parse_to_json_file(src_fn, jsn_f): with tempfile.TemporaryFile() as f_err: cmd = "gumtree parse \"%s\"" % src_fn p = Popen(cmd, shell=True, stdin=FNULL, stdout=jsn_f, stderr=f_err) code = p.wait() # read the contents of the standard error f_err.seek(0) err = str(f_err.read())[2:-1] # ensure the exit status was zero if code != 0: raise Exception("ERROR [PyCGum/parse_to_json_file]: unexpected exit code - %s" % error) # run-time exceptions can occur whilst still returning an exit status # of zero elif err.startswith("java.lang.RuntimeException:"): raise Exception("ERROR [PyCGum/parse_to_json_file]: %s" % err)
mit
3,467,776,033,047,293,000
31.743017
103
0.613888
false
3.866095
false
false
false
greenonion/pytvd
tvdip.py
1
8146
""" tvdip.py ~~~~~~~~ This module is a direct port of the original [1] tvdip Matlab script into NumPy. [1] M.A. Little, Nick S. Jones (2010) "Sparse Bayesian Step-Filtering for High- Throughput Analysis of Molecular Machine Dynamics", in 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, 2010, ICASSP 2010 Proceedings. """ import numpy as np import scipy as Sci from scipy import sparse from scipy.sparse import linalg import sys def tvdiplmax(y): """Calculate the value of lambda so that if lambda >= lambdamax, the TVD functional solved by TVDIP is minimized by the trivial constant solution x = mean(y). This can then be used to determine a useful range of values of lambda, for example. Args: y: Original signal to denoise, size N x 1. Returns: lambdamax: Value of lambda at which x = mean(y) is the output of the TVDIP function. """ N = y.size M = N - 1 # Construct sparse operator matrices I1 = sparse.eye(M) O1 = sparse.dia_matrix((M, 1)) D = sparse.hstack([I1, O1]) - sparse.hstack([O1, I1]) DDT = D.dot(D.conj().T) Dy = D.dot(y) lambdamax = np.absolute(linalg.spsolve(DDT, Dy)).max(0) return lambdamax def tvdip(y, lambdas, display=1, stoptol=1e-3, maxiter=60): """Performs discrete total variation denoising (TVD) using a primal-dual interior-point solver. It minimizes the following discrete functional: E=(1/2)||y-x||_2^2+lambda*||Dx||_1 over the variable x, given the input signal y, according to each value of the regularization parametero lambda > 0. D is the first difference matrix. Uses hot-restarts from each value of lambda to speed up convergence for subsequent values: best use of the feature is made by ensuring that the chosen lambda values are close to each other. Args: y: Original signal to denoise, size N x 1. lambdas: A vector of positive regularization parameters, size L x 1. TVD will be applied to each value in the vector. display: (Optional) Set to 0 to turn off progress display, 1 to turn on. Defaults to 1. stoptol: (Optional) Precision as determined by duality gap tolerance, if not specified defaults to 1e-3. maxiter: (Optional) Maximum interior-point iterations, if not specified defaults to 60. Returns: x: Denoised output signal for each value of lambda, size N x L. E: Objective functional at minimum for each lamvda, size L x 1. s: Optimization result, 1 = solved, 0 = maximum iterations exceeded before reaching duality gap tolerance, size L x 1. lambdamax: Maximum value of lambda for the given y. If lambda >= lambdamax, the output is the trivial constant solution x = mean(y). Example: >>> import numpy as np >>> import tvdip as tv >>> # Find the value of lambda greater than which the TVD solution is >>> # just the mean. >>> lmax = tv.tvdiplmax(y) >>> # Perform TV denoising for lambda across a range of values up to a >>> # small fraction of the maximum found above. >>> lratio = np.array([1e-4, 1e-3, 1e-2, 1e-1]) >>> x, E, status, l_max = tv.tvdip(y, lmax*lratio, True, 1e-3) >>> plot(x[:,0]) """ # Search tuning parameters ALPHA = 0.01 # Backtracking linesearch parameter (0,0.5] BETA = 0.5 # Backtracking linesearch parameter (0,1) MAXLSITER = 20 # Max iterations of backtracking linesearch MU = 2 # t update N = y.size # Length of input signal y M = N - 1 # Size of Dx # Construct sparse operator matrices I1 = sparse.eye(M) O1 = sparse.dia_matrix((M, 1)) D = sparse.hstack([I1, O1]) - sparse.hstack([O1, I1]) DDT = D.dot(D.conj().T) Dy = D.dot(y) # Find max value of lambda lambdamax = (np.absolute(linalg.spsolve(DDT, Dy))).max(0) if display: print "lambda_max=%5.2e" % lambdamax L = lambdas.size x = np.zeros((N, L)) s = np.zeros((L, 1)) E = np.zeros((L, 1)) # Optimization variables set up once at the start z = np.zeros((M, 1)) mu1 = np.ones((M, 1)) mu2 = np.ones((M, 1)) # Work through each value of lambda, with hot-restart on optimization # variables for idx, l in enumerate(lambdas): t = 1e-10 step = np.inf f1 = z - l f2 = -z - l # Main optimization loop s[idx] = 1 if display: print "Solving for lambda={0:5.2e}, lambda/lambda_max={1:5.2e}".format(l, l/lambdamax) print "Iter# primal Dual Gap" for iters in xrange(maxiter): DTz = (z.conj().T * D).conj().T DDTz = D.dot(DTz) w = Dy - (mu1 - mu2) # Calculate objectives and primal-dual gap pobj1 = 0.5*w.conj().T.dot(linalg.spsolve(DDT,w))+l*(np.sum(mu1+mu2)) pobj2 = 0.5*DTz.conj().T.dot(DTz)+l*np.sum(np.absolute(Dy-DDTz)) pobj = np.minimum(pobj1, pobj2) dobj = -0.5*DTz.conj().T.dot(DTz) + Dy.conj().T.dot(z) gap = pobj - dobj if display: print "{:5d} {:7.2e} {:7.2e} {:7.2e}".format(iters, pobj[0, 0], dobj[0, 0], gap[0, 0]) # Test duality gap stopping criterion if gap <= stoptol: s[idx] = 1 break if step >= 0.2: t = np.maximum(2*M*MU/gap, 1.2*t) # Do Newton step rz = DDTz - w Sdata = (mu1/f1 + mu2/f2) S = DDT-sparse.csc_matrix((Sdata.reshape(Sdata.size), (np.arange(M), np.arange(M)))) r = -DDTz + Dy + (1/t)/f1 - (1/t)/f2 dz = linalg.spsolve(S, r).reshape(r.size, 1) dmu1 = -(mu1+((1/t)+dz*mu1)/f1) dmu2 = -(mu2+((1/t)-dz*mu2)/f2) resDual = rz.copy() resCent = np.vstack((-mu1*f1-1/t, -mu2*f2-1/t)) residual = np.vstack((resDual, resCent)) # Perform backtracking linesearch negIdx1 = dmu1 < 0 negIdx2 = dmu2 < 0 step = 1 if np.any(negIdx1): step = np.minimum(step, 0.99*(-mu1[negIdx1]/dmu1[negIdx1]).min(0)) if np.any(negIdx2): step = np.minimum(step, 0.99*(-mu2[negIdx2]/dmu2[negIdx2]).min(0)) for _ in xrange(MAXLSITER): newz = z + step*dz newmu1 = mu1 + step*dmu1 newmu2 = mu2 + step*dmu2 newf1 = newz - l newf2 = -newz - l # Update residuals newResDual = DDT.dot(newz) - Dy + newmu1 - newmu2 newResCent = np.vstack((-newmu1*newf1-1/t, -newmu2*newf2-1/t)) newResidual = np.vstack((newResDual, newResCent)) if (np.maximum(newf1.max(0), newf2.max(0)) < 0 and (Sci.linalg.norm(newResidual) <= (1-ALPHA*step)*Sci.linalg.norm(residual))): break step = BETA * step # Update primal and dual optimization parameters z = newz mu1 = newmu1 mu2 = newmu2 f1 = newf1 f2 = newf2 x[:, idx] = (y-D.conj().T.dot(z)).reshape(x.shape[0]) xval = x[:, idx].reshape(x.shape[0], 1) E[idx] = 0.5*np.sum((y-xval)**2)+l*np.sum(np.absolute(D.dot(xval))) # We may have a close solution that does not satisfy the duality gap if iters >= maxiter: s[idx] = 0 if display: if s[idx]: print("Solved to precision of duality gap %5.2e") % gap else: print("Max iterations exceeded - solution may be inaccurate") return x, E, s, lambdamax
gpl-2.0
-8,849,415,337,858,050,000
33.811966
98
0.545912
false
3.385702
false
false
false
tobykurien/MakerDroid
assetsrc/public.mp3/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/gts.py
1
4561
""" This page is in the table of contents. The gts.py script is an import translator plugin to get a carving from an gts file. An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name. The getCarving function takes the file name of an gts file and returns the carving. The GNU Triangulated Surface (.gts) format is described at: http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE Quoted from http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE "All the lines beginning with GTS_COMMENTS (#!) are ignored. The first line contains three unsigned integers separated by spaces. The first integer is the number of vertices, nv, the second is the number of edges, ne and the third is the number of faces, nf. Follows nv lines containing the x, y and z coordinates of the vertices. Follows ne lines containing the two indices (starting from one) of the vertices of each edge. Follows nf lines containing the three ordered indices (also starting from one) of the edges of each face. The format described above is the least common denominator to all GTS files. Consistent with an object-oriented approach, the GTS file format is extensible. Each of the lines of the file can be extended with user-specific attributes accessible through the read() and write() virtual methods of each of the objects written (surface, vertices, edges or faces). When read with different object classes, these extra attributes are just ignored." This example gets a carving for the gts file Screw Holder Bottom.gts. This example is run in a terminal in the folder which contains Screw Holder Bottom.gts and gts.py. > python Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31) [GCC 4.2.1 (SUSE Linux)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import gts >>> gts.getCarving() [11.6000003815, 10.6837882996, 7.80209827423 .. many more lines of the carving .. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import gcodec from fabmetheus_utilities.solids import triangle_mesh __author__ = "Enrique Perez (perez_enrique@yahoo.com)" __credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>' __date__ = "$Date: 2008/21/04 $" __license__ = "GPL 3.0" def getFromGNUTriangulatedSurfaceText( gnuTriangulatedSurfaceText, triangleMesh ): "Initialize from a GNU Triangulated Surface Text." if gnuTriangulatedSurfaceText == '': return None lines = gcodec.getTextLines( gnuTriangulatedSurfaceText ) linesWithoutComments = [] for line in lines: if len( line ) > 0: firstCharacter = line[ 0 ] if firstCharacter != '#' and firstCharacter != '!': linesWithoutComments.append( line ) splitLine = linesWithoutComments[ 0 ].split() numberOfVertices = int( splitLine[ 0 ] ) numberOfEdges = int( splitLine[ 1 ] ) numberOfFaces = int( splitLine[ 2 ] ) faceTriples = [] for vertexIndex in xrange( numberOfVertices ): line = linesWithoutComments[ vertexIndex + 1 ] splitLine = line.split() vertex = Vector3( float( splitLine[ 0 ] ), float( splitLine[ 1 ] ), float( splitLine[ 2 ] ) ) triangleMesh.vertices.append( vertex ) edgeStart = numberOfVertices + 1 for edgeIndex in xrange( numberOfEdges ): line = linesWithoutComments[ edgeIndex + edgeStart ] splitLine = line.split() vertexIndexes = [] for word in splitLine[ : 2 ]: vertexIndexes.append( int( word ) - 1 ) edge = triangle_mesh.Edge().getFromVertexIndexes( edgeIndex, vertexIndexes ) triangleMesh.edges.append( edge ) faceStart = edgeStart + numberOfEdges for faceIndex in xrange( numberOfFaces ): line = linesWithoutComments[ faceIndex + faceStart ] splitLine = line.split() edgeIndexes = [] for word in splitLine[ : 3 ]: edgeIndexes.append( int( word ) - 1 ) face = triangle_mesh.Face().getFromEdgeIndexes( edgeIndexes, triangleMesh.edges, faceIndex ) triangleMesh.faces.append( face ) return triangleMesh def getCarving( fileName ): "Get the carving for the gts file." return getFromGNUTriangulatedSurfaceText( gcodec.getFileText( fileName ), triangle_mesh.TriangleMesh() )
gpl-3.0
1,615,288,114,955,321,900
48.043011
441
0.754221
false
3.497699
false
false
false
DarkFenX/Pyfa
eos/saveddata/targetProfile.py
1
20115
# =============================================================================== # Copyright (C) 2014 Ryan Holmes # # This file is part of eos. # # eos is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # eos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with eos. If not, see <http://www.gnu.org/licenses/>. # =============================================================================== import math import re from collections import OrderedDict from logbook import Logger from sqlalchemy.orm import reconstructor import eos.db pyfalog = Logger(__name__) BUILTINS = OrderedDict([ # 0 is taken by ideal target profile, composed manually in one of TargetProfile methods (-1, ('Uniform (25%)', 0.25, 0.25, 0.25, 0.25)), (-2, ('Uniform (50%)', 0.50, 0.50, 0.50, 0.50)), (-3, ('Uniform (75%)', 0.75, 0.75, 0.75, 0.75)), (-4, ('Uniform (90%)', 0.90, 0.90, 0.90, 0.90)), (-5, ('[T1 Resist]Shield', 0.0, 0.20, 0.40, 0.50)), (-6, ('[T1 Resist]Armor', 0.50, 0.45, 0.25, 0.10)), (-7, ('[T1 Resist]Hull', 0.33, 0.33, 0.33, 0.33)), (-8, ('[T1 Resist]Shield (+T2 DCU)', 0.125, 0.30, 0.475, 0.562)), (-9, ('[T1 Resist]Armor (+T2 DCU)', 0.575, 0.532, 0.363, 0.235)), (-10, ('[T1 Resist]Hull (+T2 DCU)', 0.598, 0.598, 0.598, 0.598)), (-11, ('[T2 Resist]Amarr (Shield)', 0.0, 0.20, 0.70, 0.875)), (-12, ('[T2 Resist]Amarr (Armor)', 0.50, 0.35, 0.625, 0.80)), (-13, ('[T2 Resist]Caldari (Shield)', 0.20, 0.84, 0.76, 0.60)), (-14, ('[T2 Resist]Caldari (Armor)', 0.50, 0.8625, 0.625, 0.10)), (-15, ('[T2 Resist]Gallente (Shield)', 0.0, 0.60, 0.85, 0.50)), (-16, ('[T2 Resist]Gallente (Armor)', 0.50, 0.675, 0.8375, 0.10)), (-17, ('[T2 Resist]Minmatar (Shield)', 0.75, 0.60, 0.40, 0.50)), (-18, ('[T2 Resist]Minmatar (Armor)', 0.90, 0.675, 0.25, 0.10)), (-19, ('[NPC][Asteroid]Angel Cartel', 0.54, 0.42, 0.37, 0.32)), (-20, ('[NPC][Asteroid]Blood Raiders', 0.34, 0.39, 0.45, 0.52)), (-21, ('[NPC][Asteroid]Guristas', 0.55, 0.35, 0.3, 0.48)), (-22, ('[NPC][Asteroid]Rogue Drones', 0.35, 0.38, 0.44, 0.49)), (-23, ('[NPC][Asteroid]Sanshas Nation', 0.35, 0.4, 0.47, 0.53)), (-24, ('[NPC][Asteroid]Serpentis', 0.49, 0.38, 0.29, 0.51)), (-25, ('[NPC][Deadspace]Angel Cartel', 0.59, 0.48, 0.4, 0.32)), (-26, ('[NPC][Deadspace]Blood Raiders', 0.31, 0.39, 0.47, 0.56)), (-27, ('[NPC][Deadspace]Guristas', 0.57, 0.39, 0.31, 0.5)), (-28, ('[NPC][Deadspace]Rogue Drones', 0.42, 0.42, 0.47, 0.49)), (-29, ('[NPC][Deadspace]Sanshas Nation', 0.31, 0.39, 0.47, 0.56)), (-30, ('[NPC][Deadspace]Serpentis', 0.49, 0.38, 0.29, 0.56)), (-31, ('[NPC][Mission]Amarr Empire', 0.34, 0.38, 0.42, 0.46)), (-32, ('[NPC][Mission]Caldari State', 0.51, 0.38, 0.3, 0.51)), (-33, ('[NPC][Mission]CONCORD', 0.47, 0.46, 0.47, 0.47)), (-34, ('[NPC][Mission]Gallente Federation', 0.51, 0.38, 0.31, 0.52)), (-35, ('[NPC][Mission]Khanid', 0.51, 0.42, 0.36, 0.4)), (-36, ('[NPC][Mission]Minmatar Republic', 0.51, 0.46, 0.41, 0.35)), (-37, ('[NPC][Mission]Mordus Legion', 0.32, 0.48, 0.4, 0.62)), (-38, ('[NPC][Other]Sleeper', 0.61, 0.61, 0.61, 0.61)), (-39, ('[NPC][Other]Sansha Incursion', 0.65, 0.63, 0.64, 0.65)), (-40, ('[NPC][Burner]Cruor (Blood Raiders)', 0.8, 0.73, 0.69, 0.67)), (-41, ('[NPC][Burner]Dramiel (Angel)', 0.35, 0.48, 0.61, 0.68)), (-42, ('[NPC][Burner]Daredevil (Serpentis)', 0.69, 0.59, 0.59, 0.43)), (-43, ('[NPC][Burner]Succubus (Sanshas Nation)', 0.35, 0.48, 0.61, 0.68)), (-44, ('[NPC][Burner]Worm (Guristas)', 0.48, 0.58, 0.69, 0.74)), (-45, ('[NPC][Burner]Enyo', 0.58, 0.72, 0.86, 0.24)), (-46, ('[NPC][Burner]Hawk', 0.3, 0.86, 0.79, 0.65)), (-47, ('[NPC][Burner]Jaguar', 0.78, 0.65, 0.48, 0.56)), (-48, ('[NPC][Burner]Vengeance', 0.66, 0.56, 0.75, 0.86)), (-49, ('[NPC][Burner]Ashimmu (Blood Raiders)', 0.8, 0.76, 0.68, 0.7)), (-50, ('[NPC][Burner]Talos', 0.68, 0.59, 0.59, 0.43)), (-51, ('[NPC][Burner]Sentinel', 0.58, 0.45, 0.52, 0.66)), # Source: ticket #2067 (-52, ('[NPC][Invasion]Invading Precursor Entities', 0.422, 0.367, 0.453, 0.411)), (-53, ('[NPC][Invasion]Retaliating Amarr Entities', 0.360, 0.310, 0.441, 0.602)), (-54, ('[NPC][Invasion]Retaliating Caldari Entities', 0.287, 0.610, 0.487, 0.401)), (-55, ('[NPC][Invasion]Retaliating Gallente Entities', 0.383, 0.414, 0.578, 0.513)), (-56, ('[NPC][Invasion]Retaliating Minmatar Entities', 0.620, 0.422, 0.355, 0.399)), (-57, ('[NPC][Abyssal][Dark Matter All Tiers]Drones', 0.439, 0.522, 0.529, 0.435)), (-58, ('[NPC][Abyssal][Dark Matter All Tiers]Overmind', 0.626, 0.576, 0.612, 0.624)), (-59, ('[NPC][Abyssal][Dark Matter All Tiers]Seeker', 0.082, 0.082, 0.082, 0.082)), (-60, ('[NPC][Abyssal][Dark Matter All Tiers]Triglavian', 0.477, 0.401, 0.449, 0.37)), (-61, ('[NPC][Abyssal][Dark Matter All Tiers]Drifter', 0.403, 0.403, 0.403, 0.403)), (-62, ('[NPC][Abyssal][Dark Matter All Tiers]Sleeper', 0.435, 0.435, 0.435, 0.435)), (-63, ('[NPC][Abyssal][Dark Matter All Tiers]All', 0.507, 0.477, 0.502, 0.493)), (-64, ('[NPC][Abyssal][Electrical T1/T2]Drones', 0.323, 0.522, 0.529, 0.435)), (-65, ('[NPC][Abyssal][Electrical T1/T2]Overmind', 0.521, 0.576, 0.612, 0.624)), (-66, ('[NPC][Abyssal][Electrical T1/T2]Seeker', 0, 0.082, 0.082, 0.082)), (-67, ('[NPC][Abyssal][Electrical T1/T2]Triglavian', 0.333, 0.401, 0.449, 0.37)), (-68, ('[NPC][Abyssal][Electrical T1/T2]Drifter', 0.267, 0.403, 0.403, 0.403)), (-69, ('[NPC][Abyssal][Electrical T1/T2]Sleeper', 0.329, 0.435, 0.435, 0.435)), (-70, ('[NPC][Abyssal][Electrical T1/T2]All', 0.385, 0.477, 0.502, 0.493)), (-71, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Drones', 0.255, 0.522, 0.529, 0.435)), (-72, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Overmind', 0.457, 0.576, 0.612, 0.624)), (-73, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Seeker', 0, 0.082, 0.082, 0.082)), (-74, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Triglavian', 0.241, 0.401, 0.449, 0.37)), (-75, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Drifter', 0.184, 0.403, 0.403, 0.403)), (-76, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Sleeper', 0.268, 0.435, 0.435, 0.435)), (-77, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]All', 0.313, 0.477, 0.502, 0.493)), (-78, ('[NPC][Abyssal][Electrical T4/T5]Drones', 0.193, 0.522, 0.529, 0.435)), (-79, ('[NPC][Abyssal][Electrical T4/T5]Overmind', 0.398, 0.576, 0.612, 0.624)), (-80, ('[NPC][Abyssal][Electrical T4/T5]Seeker', 0, 0.082, 0.082, 0.082)), (-81, ('[NPC][Abyssal][Electrical T4/T5]Triglavian', 0.183, 0.401, 0.449, 0.37)), (-82, ('[NPC][Abyssal][Electrical T4/T5]Drifter', 0.107, 0.403, 0.403, 0.403)), (-83, ('[NPC][Abyssal][Electrical T4/T5]Sleeper', 0.215, 0.435, 0.435, 0.435)), (-84, ('[NPC][Abyssal][Electrical T4/T5]All', 0.25, 0.477, 0.502, 0.493)), (-85, ('[NPC][Abyssal][Firestorm T1/T2]Drones', 0.461, 0.425, 0.541, 0.443)), (-86, ('[NPC][Abyssal][Firestorm T1/T2]Overmind', 0.65, 0.469, 0.625, 0.633)), (-87, ('[NPC][Abyssal][Firestorm T1/T2]Seeker', 0.084, 0, 0.084, 0.084)), (-88, ('[NPC][Abyssal][Firestorm T1/T2]Triglavian', 0.534, 0.266, 0.484, 0.366)), (-89, ('[NPC][Abyssal][Firestorm T1/T2]Drifter', 0.422, 0.282, 0.422, 0.422)), (-90, ('[NPC][Abyssal][Firestorm T1/T2]Sleeper', 0.512, 0.402, 0.512, 0.512)), (-91, ('[NPC][Abyssal][Firestorm T1/T2]All', 0.541, 0.365, 0.524, 0.504)), (-92, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Drones', 0.461, 0.36, 0.541, 0.443)), (-93, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Overmind', 0.65, 0.391, 0.625, 0.633)), (-94, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Seeker', 0.084, 0, 0.084, 0.084)), (-95, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Triglavian', 0.534, 0.161, 0.484, 0.366)), (-96, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Drifter', 0.422, 0.196, 0.422, 0.422)), (-97, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Sleeper', 0.512, 0.337, 0.512, 0.512)), (-98, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]All', 0.541, 0.284, 0.524, 0.504)), (-99, ('[NPC][Abyssal][Firestorm T4/T5]Drones', 0.461, 0.305, 0.541, 0.443)), (-100, ('[NPC][Abyssal][Firestorm T4/T5]Overmind', 0.65, 0.323, 0.625, 0.633)), (-101, ('[NPC][Abyssal][Firestorm T4/T5]Seeker', 0.084, 0, 0.084, 0.084)), (-102, ('[NPC][Abyssal][Firestorm T4/T5]Triglavian', 0.534, 0.082, 0.484, 0.366)), (-103, ('[NPC][Abyssal][Firestorm T4/T5]Drifter', 0.422, 0.114, 0.422, 0.422)), (-104, ('[NPC][Abyssal][Firestorm T4/T5]Sleeper', 0.512, 0.276, 0.512, 0.512)), (-105, ('[NPC][Abyssal][Firestorm T4/T5]All', 0.541, 0.214, 0.524, 0.504)), (-106, ('[NPC][Abyssal][Exotic T1/T2]Drones', 0.439, 0.522, 0.417, 0.435)), (-107, ('[NPC][Abyssal][Exotic T1/T2]Overmind', 0.626, 0.576, 0.496, 0.624)), (-108, ('[NPC][Abyssal][Exotic T1/T2]Seeker', 0.082, 0.082, 0, 0.082)), (-109, ('[NPC][Abyssal][Exotic T1/T2]Triglavian', 0.477, 0.401, 0.284, 0.37)), (-110, ('[NPC][Abyssal][Exotic T1/T2]Drifter', 0.403, 0.403, 0.267, 0.403)), (-111, ('[NPC][Abyssal][Exotic T1/T2]Sleeper', 0.435, 0.435, 0.329, 0.435)), (-112, ('[NPC][Abyssal][Exotic T1/T2]All', 0.507, 0.477, 0.373, 0.493)), (-113, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Drones', 0.439, 0.522, 0.351, 0.435)), (-114, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Overmind', 0.626, 0.576, 0.419, 0.624)), (-115, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Seeker', 0.082, 0.082, 0, 0.082)), (-116, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Triglavian', 0.477, 0.401, 0.176, 0.37)), (-117, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Drifter', 0.403, 0.403, 0.184, 0.403)), (-118, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Sleeper', 0.435, 0.435, 0.268, 0.435)), (-119, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]All', 0.507, 0.477, 0.293, 0.493)), (-120, ('[NPC][Abyssal][Exotic T4/T5]Drones', 0.439, 0.522, 0.293, 0.435)), (-121, ('[NPC][Abyssal][Exotic T4/T5]Overmind', 0.626, 0.576, 0.344, 0.624)), (-122, ('[NPC][Abyssal][Exotic T4/T5]Seeker', 0.082, 0.082, 0, 0.082)), (-123, ('[NPC][Abyssal][Exotic T4/T5]Triglavian', 0.477, 0.401, 0.107, 0.37)), (-124, ('[NPC][Abyssal][Exotic T4/T5]Drifter', 0.403, 0.403, 0.107, 0.403)), (-125, ('[NPC][Abyssal][Exotic T4/T5]Sleeper', 0.435, 0.435, 0.215, 0.435)), (-126, ('[NPC][Abyssal][Exotic T4/T5]All', 0.507, 0.477, 0.223, 0.493)), (-127, ('[NPC][Abyssal][Gamma T1/T2]Drones', 0.449, 0.54, 0.549, 0.336)), (-128, ('[NPC][Abyssal][Gamma T1/T2]Overmind', 0.6, 0.557, 0.601, 0.504)), (-129, ('[NPC][Abyssal][Gamma T1/T2]Seeker', 0.085, 0.085, 0.085, 0)), (-130, ('[NPC][Abyssal][Gamma T1/T2]Triglavian', 0.463, 0.392, 0.447, 0.193)), (-131, ('[NPC][Abyssal][Gamma T1/T2]Drifter', 0.428, 0.428, 0.428, 0.287)), (-132, ('[NPC][Abyssal][Gamma T1/T2]Sleeper', 0.435, 0.435, 0.435, 0.329)), (-133, ('[NPC][Abyssal][Gamma T1/T2]All', 0.493, 0.472, 0.5, 0.362)), (-134, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Drones', 0.449, 0.54, 0.549, 0.264)), (-135, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Overmind', 0.6, 0.557, 0.601, 0.428)), (-136, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Seeker', 0.085, 0.085, 0.085, 0)), (-137, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Triglavian', 0.463, 0.392, 0.447, 0.071)), (-138, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Drifter', 0.428, 0.428, 0.428, 0.2)), (-139, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Sleeper', 0.435, 0.435, 0.435, 0.268)), (-140, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]All', 0.493, 0.472, 0.5, 0.28)), (-141, ('[NPC][Abyssal][Gamma T4/T5]Drones', 0.449, 0.54, 0.549, 0.197)), (-142, ('[NPC][Abyssal][Gamma T4/T5]Overmind', 0.6, 0.557, 0.601, 0.356)), (-143, ('[NPC][Abyssal][Gamma T4/T5]Seeker', 0.085, 0.085, 0.085, 0)), (-144, ('[NPC][Abyssal][Gamma T4/T5]Triglavian', 0.463, 0.392, 0.447, 0.029)), (-145, ('[NPC][Abyssal][Gamma T4/T5]Drifter', 0.428, 0.428, 0.428, 0.117)), (-146, ('[NPC][Abyssal][Gamma T4/T5]Sleeper', 0.435, 0.435, 0.435, 0.215)), (-147, ('[NPC][Abyssal][Gamma T4/T5]All', 0.493, 0.472, 0.5, 0.21))]) class TargetProfile: # also determined import/export order - VERY IMPORTANT DAMAGE_TYPES = ('em', 'thermal', 'kinetic', 'explosive') _idealTarget = None _builtins = None def __init__(self, *args, **kwargs): self.builtin = False self.update(*args, **kwargs) @reconstructor def init(self): self.builtin = False def update(self, emAmount=0, thermalAmount=0, kineticAmount=0, explosiveAmount=0, maxVelocity=None, signatureRadius=None, radius=None): self.emAmount = emAmount self.thermalAmount = thermalAmount self.kineticAmount = kineticAmount self.explosiveAmount = explosiveAmount self._maxVelocity = maxVelocity self._signatureRadius = signatureRadius self._radius = radius @classmethod def getBuiltinList(cls): if cls._builtins is None: cls.__generateBuiltins() return list(cls._builtins.values()) @classmethod def getBuiltinById(cls, id): if cls._builtins is None: cls.__generateBuiltins() return cls._builtins.get(id) @classmethod def __generateBuiltins(cls): cls._builtins = OrderedDict() for id, data in BUILTINS.items(): rawName = data[0] data = data[1:] profile = TargetProfile(*data) profile.ID = id profile.rawName = rawName profile.builtin = True cls._builtins[id] = profile @classmethod def getIdeal(cls): if cls._idealTarget is None: cls._idealTarget = cls( emAmount=0, thermalAmount=0, kineticAmount=0, explosiveAmount=0, maxVelocity=0, signatureRadius=None, radius=0) cls._idealTarget.rawName = 'Ideal Target' cls._idealTarget.ID = 0 cls._idealTarget.builtin = True return cls._idealTarget @property def maxVelocity(self): return self._maxVelocity or 0 @maxVelocity.setter def maxVelocity(self, val): self._maxVelocity = val @property def signatureRadius(self): if self._signatureRadius is None or self._signatureRadius == -1: return math.inf return self._signatureRadius @signatureRadius.setter def signatureRadius(self, val): if val is not None and math.isinf(val): val = None self._signatureRadius = val @property def radius(self): return self._radius or 0 @radius.setter def radius(self, val): self._radius = val @classmethod def importPatterns(cls, text): lines = re.split('[\n\r]+', text) patterns = [] numPatterns = 0 # When we import damage profiles, we create new ones and update old ones. To do this, get a list of current # patterns to allow lookup lookup = {} current = eos.db.getTargetProfileList() for pattern in current: lookup[pattern.rawName] = pattern for line in lines: try: if line.strip()[0] == "#": # comments continue line = line.split('#', 1)[0] # allows for comments type, data = line.rsplit('=', 1) type, data = type.strip(), [d.strip() for d in data.split(',')] except (KeyboardInterrupt, SystemExit): raise except: pyfalog.warning("Data isn't in correct format, continue to next line.") continue if type not in ("TargetProfile", "TargetResists"): continue numPatterns += 1 name, dataRes, dataMisc = data[0], data[1:5], data[5:8] fields = {} for index, val in enumerate(dataRes): val = float(val) if val else 0 if math.isinf(val): val = 0 try: assert 0 <= val <= 100 fields["%sAmount" % cls.DAMAGE_TYPES[index]] = val / 100 except (KeyboardInterrupt, SystemExit): raise except: pyfalog.warning("Caught unhandled exception in import patterns.") continue if len(dataMisc) == 3: for index, val in enumerate(dataMisc): try: fieldName = ("maxVelocity", "signatureRadius", "radius")[index] except IndexError: break val = float(val) if val else 0 if fieldName != "signatureRadius" and math.isinf(val): val = 0 fields[fieldName] = val if len(fields) in (4, 7): # Avoid possible blank lines if name.strip() in lookup: pattern = lookup[name.strip()] pattern.update(**fields) eos.db.save(pattern) else: pattern = TargetProfile(**fields) pattern.rawName = name.strip() eos.db.save(pattern) patterns.append(pattern) eos.db.commit() return patterns, numPatterns EXPORT_FORMAT = "TargetProfile = %s,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f\n" @classmethod def exportPatterns(cls, *patterns): out = "# Exported from pyfa\n#\n" out += "# Values are in following format:\n" out += "# TargetProfile = [name],[EM %],[Thermal %],[Kinetic %],[Explosive %],[Max velocity m/s],[Signature radius m],[Radius m]\n\n" for dp in patterns: out += cls.EXPORT_FORMAT % ( dp.rawName, dp.emAmount * 100, dp.thermalAmount * 100, dp.kineticAmount * 100, dp.explosiveAmount * 100, dp.maxVelocity, dp.signatureRadius, dp.radius ) return out.strip() @property def name(self): return self.rawName @property def fullName(self): categories, tail = self.__parseRawName() return '{}{}'.format(''.join('[{}]'.format(c) for c in categories), tail) @property def shortName(self): return self.__parseRawName()[1] @property def hierarchy(self): return self.__parseRawName()[0] def __parseRawName(self): hierarchy = [] remainingName = self.rawName.strip() if self.rawName else '' while True: start, end = remainingName.find('['), remainingName.find(']') if start == -1 or end == -1: return hierarchy, remainingName splitter = remainingName.find('|') if splitter != -1 and splitter == start - 1: return hierarchy, remainingName[1:] hierarchy.append(remainingName[start + 1:end]) remainingName = remainingName[end + 1:].strip() def __deepcopy__(self, memo): p = TargetProfile( self.emAmount, self.thermalAmount, self.kineticAmount, self.explosiveAmount, self._maxVelocity, self._signatureRadius, self._radius) p.rawName = "%s copy" % self.rawName return p
gpl-3.0
6,335,731,424,209,932,000
48.666667
141
0.554412
false
2.653344
false
false
false
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs_/bandwidth_constraints/__init__.py
1
12844
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import state class bandwidth_constraints(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: This container defines bandwidth-constraints. For DS-TE, the existing Maximum Reservable link bandwidth parameter is retained, but its semantics is generalized and interpreted as the aggregate bandwidth constraint across all Class-Types """ __slots__ = ("_path_helper", "_extmethods", "__state") _yang_name = "bandwidth-constraints" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "levels", "level", "link-state-database", "lsp", "tlvs", "tlv", "mt-isn", "neighbors", "neighbor", "subTLVs", "subTLVs", "bandwidth-constraints", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) _pyangbind_elements = OrderedDict([("state", state)]) from . import state class bandwidth_constraints(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: This container defines bandwidth-constraints. For DS-TE, the existing Maximum Reservable link bandwidth parameter is retained, but its semantics is generalized and interpreted as the aggregate bandwidth constraint across all Class-Types """ __slots__ = ("_path_helper", "_extmethods", "__state") _yang_name = "bandwidth-constraints" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "levels", "level", "link-state-database", "lsp", "tlvs", "tlv", "mt-isn", "neighbors", "neighbor", "subTLVs", "subTLVs", "bandwidth-constraints", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) _pyangbind_elements = OrderedDict([("state", state)])
apache-2.0
1,062,767,270,325,623,600
37.570571
375
0.583775
false
4.37317
true
false
false
sserrot/champion_relationships
venv/Lib/site-packages/ipykernel/inprocess/blocking.py
1
3068
""" Implements a fully blocking kernel client. Useful for test suites and blocking terminal interfaces. """ #----------------------------------------------------------------------------- # Copyright (C) 2012 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- import sys try: from queue import Queue, Empty # Py 3 except ImportError: from Queue import Queue, Empty # Py 2 # IPython imports from traitlets import Type # Local imports from .channels import ( InProcessChannel, ) from .client import InProcessKernelClient class BlockingInProcessChannel(InProcessChannel): def __init__(self, *args, **kwds): super(BlockingInProcessChannel, self).__init__(*args, **kwds) self._in_queue = Queue() def call_handlers(self, msg): self._in_queue.put(msg) def get_msg(self, block=True, timeout=None): """ Gets a message if there is one that is ready. """ if timeout is None: # Queue.get(timeout=None) has stupid uninteruptible # behavior, so wait for a week instead timeout = 604800 return self._in_queue.get(block, timeout) def get_msgs(self): """ Get all messages that are currently ready. """ msgs = [] while True: try: msgs.append(self.get_msg(block=False)) except Empty: break return msgs def msg_ready(self): """ Is there a message that has been received? """ return not self._in_queue.empty() class BlockingInProcessStdInChannel(BlockingInProcessChannel): def call_handlers(self, msg): """ Overridden for the in-process channel. This methods simply calls raw_input directly. """ msg_type = msg['header']['msg_type'] if msg_type == 'input_request': _raw_input = self.client.kernel._sys_raw_input prompt = msg['content']['prompt'] print(prompt, end='', file=sys.__stdout__) sys.__stdout__.flush() self.client.input(_raw_input()) class BlockingInProcessKernelClient(InProcessKernelClient): # The classes to use for the various channels. shell_channel_class = Type(BlockingInProcessChannel) iopub_channel_class = Type(BlockingInProcessChannel) stdin_channel_class = Type(BlockingInProcessStdInChannel) def wait_for_ready(self): # Wait for kernel info reply on shell channel while True: msg = self.shell_channel.get_msg(block=True) if msg['msg_type'] == 'kernel_info_reply': self._handle_kernel_info_reply(msg) break # Flush IOPub channel while True: try: msg = self.iopub_channel.get_msg(block=True, timeout=0.2) print(msg['msg_type']) except Empty: break
mit
1,264,620,225,133,396,200
31.989247
78
0.58605
false
4.27894
false
false
false
alfa-addon/addon
plugin.video.alfa/channels/vi2.py
1
11788
# -*- coding: utf-8 -*- # -*- Channel Vi2.co -*- # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int import re import base64 from channelselector import get_thumb from core import httptools from core import jsontools from core import scrapertools from core import servertools from core import tmdb from lib import jsunpack from core.item import Item from channels import filtertools from channels import autoplay from platformcode import config, logger IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE', 'VO': 'VO'} list_language = list(IDIOMAS.values()) list_quality = ['Full HD 1080p', 'HDRip', 'DVDScreener', '720p', 'Ts Screener hq', 'HD Real 720p', 'DVDRip', 'BluRay-1080p', 'BDremux-1080p'] list_servers = [ 'directo', 'openload', 'rapidvideo', 'jawcloud', 'cloudvideo', 'upvid', 'vevio', 'gamovideo' ] host = 'http://vi2.co' def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [] itemlist.append(Item(channel=item.channel, title='Peliculas', action='select_menu', type='peliculas', thumbnail= get_thumb('movies', auto=True))) # itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='select_menu', type='series', # thumbnail= get_thumb('tvshows', auto=True))) autoplay.show_option(item.channel, itemlist) return itemlist def select_menu(item): logger.info() itemlist=[] url = host + '/%s/es/' % item.type itemlist.append(Item(channel=item.channel, title='Streaming', action='sub_menu', thumbnail=get_thumb('all', auto=True), type=item.type)) itemlist.append(Item(channel=item.channel, title='Torrent', action='sub_menu', thumbnail=get_thumb('all', auto=True), type=item.type)) itemlist.append(Item(channel=item.channel, title='Generos', action='section', url=url, thumbnail=get_thumb('genres', auto=True), type='all')) itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url, thumbnail=get_thumb('year', auto=True), type='all')) return itemlist def sub_menu(item): logger.info() itemlist = [] url = host + '/%s/es/ajax/1/' % item.type link_type = item.title.lower() if link_type == 'streaming': link_type = 'flash' movies_options = ['Todas', 'Castellano', 'Latino', 'VOSE'] tv_options = ['Ultimas', 'Ultimas Castellano', 'Ultimas Latino', 'Ultimas VOSE'] if item.type == 'peliculas': title = movies_options thumb_1 = 'all' else: thumb_1 = 'last' title = tv_options itemlist.append(Item(channel=item.channel, title=title[0], url=url+'?q=%s' % link_type, action='list_all', thumbnail=get_thumb(thumb_1, auto=True), type=item.type, link_type=link_type)) itemlist.append(Item(channel=item.channel, title=title[1], url=url + '?q=%s+espanol' % link_type, action='list_all', thumbnail=get_thumb('cast', auto=True), type=item.type, send_lang='Español', link_type=link_type)) itemlist.append(Item(channel=item.channel, title=title[2], url=url + '?q=%s+latino' % link_type, action='list_all', thumbnail=get_thumb('lat', auto=True), type=item.type, send_lang='Latino', link_type=link_type)) itemlist.append(Item(channel=item.channel, title=title[3], url=url + '?q=%s+subtitulado' % link_type, action='list_all', thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE', link_type=link_type)) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + '?q=', thumbnail=get_thumb("search", auto=True), type=item.type, link_type=link_type)) return itemlist def get_source(url, referer=None): logger.info() if referer is None: data = httptools.downloadpage(url).data else: data = httptools.downloadpage(url, headers={'Referer':referer}).data data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) return data def section(item): logger.info() itemlist=[] excluded = ['latino', 'español', 'subtitulado', 'v.o.', 'streaming', 'torrent'] full_data = get_source(item.url) data = scrapertools.find_single_match(full_data, 'toptags-container(.*?)<div class="android-more-section">') patron = 'href="([^"]+)">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle url = host+scrapedurl.replace('/?','/ajax/1/?') if (item.title=='Generos' and title.lower() not in excluded and not title.isdigit()) or (item.title=='Por Año' and title.isdigit()): itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type)) return itemlist def list_all(item): from core import jsontools logger.info() itemlist = [] listed =[] quality='' infoLabels = {} json_data= jsontools.load(get_source(item.url)) data = json_data['render'] data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) #if item.type == 'peliculas': patron = '<img\s*class="cover"[^>]+src="([^"]+)"\s*data-id="\d+"\s*' patron +='alt="Ver\s*([^\(]+)(.*?)">\s*' patron += '<div\s*class="mdl-card__menu">\s*<a\s*class="clean-link"\s*href="([^"]+)">' patron += '.*?<span\s*class="link-size">([^<]*)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedtitle, extra_info, scrapedurl , size in matches: if item.send_lang != '': lang = item.send_lang else: lang = '' year='-' extra_info = extra_info.replace('(', '|').replace('[','|').replace(')','').replace(']','') extra_info = extra_info.split('|') for info in extra_info: info = info.strip() if 'Rip' in info or '1080' in info or '720' in info or 'Screener' in info: quality = info if 'ingl' in info.lower(): info = 'VO' if info in IDIOMAS: lang = info elif info.isdigit(): year = info if lang in IDIOMAS: lang = IDIOMAS[lang] title = '%s' % scrapedtitle.strip() if not config.get_setting('unify'): if year.isdigit(): title = '%s [%s]' % (title, year) if quality != '': title = '%s [%s]' % (title, quality) if lang != '': title = '%s [%s]' % (title, lang) thumbnail = host+scrapedthumbnail url = host+scrapedurl if item.type == 'series': season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)x(\d+)') infoLabels['season'] = season infoLabels['episode'] = episode else: infoLabels['year'] = year if title not in listed: new_item = Item(channel=item.channel, title=title, url=url, action='findvideos', thumbnail=thumbnail, type=item.type, language = lang, quality=quality, link_type=item.link_type, torrent_data= size, infoLabels = infoLabels ) if item.type == 'peliculas' or item.type == 'all': new_item.contentTitle = scrapedtitle else: scrapedtitle = scrapedtitle.split(' - ') new_item.contentSerieName = scrapedtitle[0] itemlist.append(new_item) listed.append(title) tmdb.set_infoLabels(itemlist, seekTmdb=True) itemlist.sort(key=lambda it: it.title) # Paginación if json_data['next']: actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/') next_page =int(actual_page) + 1 url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page) itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type, action='list_all', send_lang=item.send_lang)) return itemlist def findvideos(item): logger.info() import base64 itemlist = [] server = '' data = get_source(item.url) pre_url = scrapertools.find_single_match(data, 'class="inside-link" href="([^"]+)".*?<button type="button"') data = get_source(host+pre_url) patron = 'data-video="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) lang = item.language quality = item.quality for url in matches: title = '' link_type = '' server = '' url = base64.b64decode(url.encode('utf8')).decode('utf8') if 'torrent' in url: if item.link_type == 'torrent' or item.type == 'all': server = 'torrent' link_type = 'torrent' title = ' [%s]' % item.torrent_data elif 'torrent' not in url: link_type = 'flash' if link_type == item.link_type.lower() or item.type == 'all': itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', server=server, language=lang, quality=quality, infoLabels=item.infoLabels)) itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) itemlist = sorted(itemlist, key=lambda it: it.language) if item.contentType != 'episode': if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") url = '%spelicula+%s+%s&o=2' % (item.url, texto, item.link_type) #Parche busqueda global (solo vale para peliculas en streaming) if not item.url: item.type = 'peliculas' item.link_type = 'flash' ajax = '%s/%s/es/ajax/1/' % (host, item.type) url = '%s?q=%s+%s+%s&o=2' % (ajax, item.type, texto, item.link_type) item.url = url try: return list_all(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []
gpl-3.0
-5,339,963,962,902,820,000
34.138037
140
0.54155
false
3.665526
false
false
false
Aravinthu/odoo
addons/mrp_repair/models/mrp_repair.py
4
32545
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from datetime import datetime from odoo import api, fields, models, _ from odoo.addons import decimal_precision as dp from odoo.exceptions import UserError, ValidationError from odoo.tools import float_compare class StockMove(models.Model): _inherit = 'stock.move' repair_id = fields.Many2one('mrp.repair') class Repair(models.Model): _name = 'mrp.repair' _description = 'Repair Order' _inherit = ['mail.thread', 'mail.activity.mixin'] _order = 'create_date desc' @api.model def _default_stock_location(self): warehouse = self.env['stock.warehouse'].search([], limit=1) if warehouse: return warehouse.lot_stock_id.id return False name = fields.Char( 'Repair Reference', default=lambda self: self.env['ir.sequence'].next_by_code('mrp.repair'), copy=False, required=True, states={'confirmed': [('readonly', True)]}) product_id = fields.Many2one( 'product.product', string='Product to Repair', readonly=True, required=True, states={'draft': [('readonly', False)]}) product_qty = fields.Float( 'Product Quantity', default=1.0, digits=dp.get_precision('Product Unit of Measure'), readonly=True, required=True, states={'draft': [('readonly', False)]}) product_uom = fields.Many2one( 'product.uom', 'Product Unit of Measure', readonly=True, required=True, states={'draft': [('readonly', False)]}) partner_id = fields.Many2one( 'res.partner', 'Customer', index=True, states={'confirmed': [('readonly', True)]}, help='Choose partner for whom the order will be invoiced and delivered.') address_id = fields.Many2one( 'res.partner', 'Delivery Address', domain="[('parent_id','=',partner_id)]", states={'confirmed': [('readonly', True)]}) default_address_id = fields.Many2one('res.partner', compute='_compute_default_address_id') state = fields.Selection([ ('draft', 'Quotation'), ('cancel', 'Cancelled'), ('confirmed', 'Confirmed'), ('under_repair', 'Under Repair'), ('ready', 'Ready to Repair'), ('2binvoiced', 'To be Invoiced'), ('invoice_except', 'Invoice Exception'), ('done', 'Repaired')], string='Status', copy=False, default='draft', readonly=True, track_visibility='onchange', help="* The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order.\n" "* The \'Confirmed\' status is used when a user confirms the repair order.\n" "* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed.\n" "* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done.\n" "* The \'Done\' status is set when repairing is completed.\n" "* The \'Cancelled\' status is used when user cancel repair order.") location_id = fields.Many2one( 'stock.location', 'Current Location', default=_default_stock_location, index=True, readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}) location_dest_id = fields.Many2one( 'stock.location', 'Delivery Location', readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}) lot_id = fields.Many2one( 'stock.production.lot', 'Lot/Serial', domain="[('product_id','=', product_id)]", help="Products repaired are all belonging to this lot", oldname="prodlot_id") guarantee_limit = fields.Date('Warranty Expiration', states={'confirmed': [('readonly', True)]}) operations = fields.One2many( 'mrp.repair.line', 'repair_id', 'Parts', copy=True, readonly=True, states={'draft': [('readonly', False)]}) pricelist_id = fields.Many2one( 'product.pricelist', 'Pricelist', default=lambda self: self.env['product.pricelist'].search([], limit=1).id, help='Pricelist of the selected partner.') partner_invoice_id = fields.Many2one('res.partner', 'Invoicing Address') invoice_method = fields.Selection([ ("none", "No Invoice"), ("b4repair", "Before Repair"), ("after_repair", "After Repair")], string="Invoice Method", default='none', index=True, readonly=True, required=True, states={'draft': [('readonly', False)]}, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.') invoice_id = fields.Many2one( 'account.invoice', 'Invoice', copy=False, readonly=True, track_visibility="onchange") move_id = fields.Many2one( 'stock.move', 'Move', copy=False, readonly=True, track_visibility="onchange", help="Move created by the repair order") fees_lines = fields.One2many( 'mrp.repair.fee', 'repair_id', 'Operations', copy=True, readonly=True, states={'draft': [('readonly', False)]}) internal_notes = fields.Text('Internal Notes') quotation_notes = fields.Text('Quotation Notes') company_id = fields.Many2one( 'res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('mrp.repair')) invoiced = fields.Boolean('Invoiced', copy=False, readonly=True) repaired = fields.Boolean('Repaired', copy=False, readonly=True) amount_untaxed = fields.Float('Untaxed Amount', compute='_amount_untaxed', store=True) amount_tax = fields.Float('Taxes', compute='_amount_tax', store=True) amount_total = fields.Float('Total', compute='_amount_total', store=True) tracking = fields.Selection('Product Tracking', related="product_id.tracking") @api.one @api.depends('partner_id') def _compute_default_address_id(self): if self.partner_id: self.default_address_id = self.partner_id.address_get(['contact'])['contact'] @api.one @api.depends('operations.price_subtotal', 'invoice_method', 'fees_lines.price_subtotal', 'pricelist_id.currency_id') def _amount_untaxed(self): total = sum(operation.price_subtotal for operation in self.operations) total += sum(fee.price_subtotal for fee in self.fees_lines) self.amount_untaxed = self.pricelist_id.currency_id.round(total) @api.one @api.depends('operations.price_unit', 'operations.product_uom_qty', 'operations.product_id', 'fees_lines.price_unit', 'fees_lines.product_uom_qty', 'fees_lines.product_id', 'pricelist_id.currency_id', 'partner_id') def _amount_tax(self): val = 0.0 for operation in self.operations: if operation.tax_id: tax_calculate = operation.tax_id.compute_all(operation.price_unit, self.pricelist_id.currency_id, operation.product_uom_qty, operation.product_id, self.partner_id) for c in tax_calculate['taxes']: val += c['amount'] for fee in self.fees_lines: if fee.tax_id: tax_calculate = fee.tax_id.compute_all(fee.price_unit, self.pricelist_id.currency_id, fee.product_uom_qty, fee.product_id, self.partner_id) for c in tax_calculate['taxes']: val += c['amount'] self.amount_tax = val @api.one @api.depends('amount_untaxed', 'amount_tax') def _amount_total(self): self.amount_total = self.pricelist_id.currency_id.round(self.amount_untaxed + self.amount_tax) _sql_constraints = [ ('name', 'unique (name)', 'The name of the Repair Order must be unique!'), ] @api.onchange('product_id') def onchange_product_id(self): self.guarantee_limit = False self.lot_id = False if self.product_id: self.product_uom = self.product_id.uom_id.id @api.onchange('product_uom') def onchange_product_uom(self): res = {} if not self.product_id or not self.product_uom: return res if self.product_uom.category_id != self.product_id.uom_id.category_id: res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')} self.product_uom = self.product_id.uom_id.id return res @api.onchange('location_id') def onchange_location_id(self): self.location_dest_id = self.location_id.id @api.onchange('partner_id') def onchange_partner_id(self): if not self.partner_id: self.address_id = False self.partner_invoice_id = False self.pricelist_id = self.env['product.pricelist'].search([], limit=1).id else: addresses = self.partner_id.address_get(['delivery', 'invoice', 'contact']) self.address_id = addresses['delivery'] or addresses['contact'] self.partner_invoice_id = addresses['invoice'] self.pricelist_id = self.partner_id.property_product_pricelist.id @api.multi def button_dummy(self): # TDE FIXME: this button is very interesting return True @api.multi def action_repair_cancel_draft(self): if self.filtered(lambda repair: repair.state != 'cancel'): raise UserError(_("Repair must be canceled in order to reset it to draft.")) self.mapped('operations').write({'state': 'draft'}) return self.write({'state': 'draft'}) def action_validate(self): self.ensure_one() precision = self.env['decimal.precision'].precision_get('Product Unit of Measure') available_qty = self.env['stock.quant']._get_available_quantity(self.product_id, self.location_id, self.lot_id, strict=True) if float_compare(available_qty, self.product_qty, precision_digits=precision) >= 0: return self.action_repair_confirm() else: return { 'name': _('Insufficient Quantity'), 'view_type': 'form', 'view_mode': 'form', 'res_model': 'stock.warn.insufficient.qty.repair', 'view_id': self.env.ref('mrp_repair.stock_warn_insufficient_qty_repair_form_view').id, 'type': 'ir.actions.act_window', 'context': { 'default_product_id': self.product_id.id, 'default_location_id': self.location_id.id, 'default_repair_id': self.id }, 'target': 'new' } @api.multi def action_repair_confirm(self): """ Repair order state is set to 'To be invoiced' when invoice method is 'Before repair' else state becomes 'Confirmed'. @param *arg: Arguments @return: True """ if self.filtered(lambda repair: repair.state != 'draft'): raise UserError(_("Can only confirm draft repairs.")) before_repair = self.filtered(lambda repair: repair.invoice_method == 'b4repair') before_repair.write({'state': '2binvoiced'}) to_confirm = self - before_repair to_confirm_operations = to_confirm.mapped('operations') to_confirm_operations.write({'state': 'confirmed'}) to_confirm.write({'state': 'confirmed'}) return True @api.multi def action_repair_cancel(self): if self.filtered(lambda repair: repair.state == 'done'): raise UserError(_("Cannot cancel completed repairs.")) if any(repair.invoiced for repair in self): raise UserError(_('Repair order is already invoiced.')) self.mapped('operations').write({'state': 'cancel'}) return self.write({'state': 'cancel'}) @api.multi def action_send_mail(self): self.ensure_one() template_id = self.env.ref('mrp_repair.mail_template_mrp_repair_quotation').id ctx = { 'default_model': 'mrp.repair', 'default_res_id': self.id, 'default_use_template': bool(template_id), 'default_template_id': template_id, 'default_composition_mode': 'comment' } return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'target': 'new', 'context': ctx, } @api.multi def print_repair_order(self): return self.env.ref('mrp_repair.action_report_mrp_repair_order').report_action(self) def action_repair_invoice_create(self): for repair in self: repair.action_invoice_create() if repair.invoice_method == 'b4repair': repair.action_repair_ready() elif repair.invoice_method == 'after_repair': repair.write({'state': 'done'}) return True @api.multi def action_invoice_create(self, group=False): """ Creates invoice(s) for repair order. @param group: It is set to true when group invoice is to be generated. @return: Invoice Ids. """ res = dict.fromkeys(self.ids, False) invoices_group = {} InvoiceLine = self.env['account.invoice.line'] Invoice = self.env['account.invoice'] for repair in self.filtered(lambda repair: repair.state not in ('draft', 'cancel') and not repair.invoice_id): if not repair.partner_id.id and not repair.partner_invoice_id.id: raise UserError(_('You have to select a Partner Invoice Address in the repair form!')) comment = repair.quotation_notes if repair.invoice_method != 'none': if group and repair.partner_invoice_id.id in invoices_group: invoice = invoices_group[repair.partner_invoice_id.id] invoice.write({ 'name': invoice.name + ', ' + repair.name, 'origin': invoice.origin + ', ' + repair.name, 'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''), }) else: if not repair.partner_id.property_account_receivable_id: raise UserError(_('No account defined for partner "%s".') % repair.partner_id.name) invoice = Invoice.create({ 'name': repair.name, 'origin': repair.name, 'type': 'out_invoice', 'account_id': repair.partner_id.property_account_receivable_id.id, 'partner_id': repair.partner_invoice_id.id or repair.partner_id.id, 'currency_id': repair.pricelist_id.currency_id.id, 'comment': repair.quotation_notes, 'fiscal_position_id': repair.partner_id.property_account_position_id.id }) invoices_group[repair.partner_invoice_id.id] = invoice repair.write({'invoiced': True, 'invoice_id': invoice.id}) for operation in repair.operations: if operation.type == 'add': if group: name = repair.name + '-' + operation.name else: name = operation.name if operation.product_id.property_account_income_id: account_id = operation.product_id.property_account_income_id.id elif operation.product_id.categ_id.property_account_income_categ_id: account_id = operation.product_id.categ_id.property_account_income_categ_id.id else: raise UserError(_('No account defined for product "%s".') % operation.product_id.name) invoice_line = InvoiceLine.create({ 'invoice_id': invoice.id, 'name': name, 'origin': repair.name, 'account_id': account_id, 'quantity': operation.product_uom_qty, 'invoice_line_tax_ids': [(6, 0, [x.id for x in operation.tax_id])], 'uom_id': operation.product_uom.id, 'price_unit': operation.price_unit, 'price_subtotal': operation.product_uom_qty * operation.price_unit, 'product_id': operation.product_id and operation.product_id.id or False }) operation.write({'invoiced': True, 'invoice_line_id': invoice_line.id}) for fee in repair.fees_lines: if group: name = repair.name + '-' + fee.name else: name = fee.name if not fee.product_id: raise UserError(_('No product defined on Fees!')) if fee.product_id.property_account_income_id: account_id = fee.product_id.property_account_income_id.id elif fee.product_id.categ_id.property_account_income_categ_id: account_id = fee.product_id.categ_id.property_account_income_categ_id.id else: raise UserError(_('No account defined for product "%s".') % fee.product_id.name) invoice_line = InvoiceLine.create({ 'invoice_id': invoice.id, 'name': name, 'origin': repair.name, 'account_id': account_id, 'quantity': fee.product_uom_qty, 'invoice_line_tax_ids': [(6, 0, [x.id for x in fee.tax_id])], 'uom_id': fee.product_uom.id, 'product_id': fee.product_id and fee.product_id.id or False, 'price_unit': fee.price_unit, 'price_subtotal': fee.product_uom_qty * fee.price_unit }) fee.write({'invoiced': True, 'invoice_line_id': invoice_line.id}) invoice.compute_taxes() res[repair.id] = invoice.id return res @api.multi def action_created_invoice(self): self.ensure_one() return { 'name': _('Invoice created'), 'type': 'ir.actions.act_window', 'view_mode': 'form', 'res_model': 'account.invoice', 'view_id': self.env.ref('account.invoice_form').id, 'target': 'current', 'res_id': self.invoice_id.id, } def action_repair_ready(self): self.mapped('operations').write({'state': 'confirmed'}) return self.write({'state': 'ready'}) @api.multi def action_repair_start(self): """ Writes repair order state to 'Under Repair' @return: True """ if self.filtered(lambda repair: repair.state not in ['confirmed', 'ready']): raise UserError(_("Repair must be confirmed before starting reparation.")) self.mapped('operations').write({'state': 'confirmed'}) return self.write({'state': 'under_repair'}) @api.multi def action_repair_end(self): """ Writes repair order state to 'To be invoiced' if invoice method is After repair else state is set to 'Ready'. @return: True """ if self.filtered(lambda repair: repair.state != 'under_repair'): raise UserError(_("Repair must be under repair in order to end reparation.")) for repair in self: repair.write({'repaired': True}) vals = {'state': 'done'} vals['move_id'] = repair.action_repair_done().get(repair.id) if not repair.invoiced and repair.invoice_method == 'after_repair': vals['state'] = '2binvoiced' repair.write(vals) return True @api.multi def action_repair_done(self): """ Creates stock move for operation and stock move for final product of repair order. @return: Move ids of final products """ if self.filtered(lambda repair: not repair.repaired): raise UserError(_("Repair must be repaired in order to make the product moves.")) res = {} Move = self.env['stock.move'] for repair in self: moves = self.env['stock.move'] for operation in repair.operations: move = Move.create({ 'name': repair.name, 'product_id': operation.product_id.id, 'product_uom_qty': operation.product_uom_qty, 'product_uom': operation.product_uom.id, 'partner_id': repair.address_id.id, 'location_id': operation.location_id.id, 'location_dest_id': operation.location_dest_id.id, 'move_line_ids': [(0, 0, {'product_id': operation.product_id.id, 'lot_id': operation.lot_id.id, 'product_uom_qty': 0, # bypass reservation here 'product_uom_id': operation.product_uom.id, 'qty_done': operation.product_uom_qty, 'package_id': False, 'result_package_id': False, 'location_id': operation.location_id.id, #TODO: owner stuff 'location_dest_id': operation.location_dest_id.id,})], 'repair_id': repair.id, 'origin': repair.name, }) moves |= move operation.write({'move_id': move.id, 'state': 'done'}) move = Move.create({ 'name': repair.name, 'product_id': repair.product_id.id, 'product_uom': repair.product_uom.id or repair.product_id.uom_id.id, 'product_uom_qty': repair.product_qty, 'partner_id': repair.address_id.id, 'location_id': repair.location_id.id, 'location_dest_id': repair.location_dest_id.id, 'move_line_ids': [(0, 0, {'product_id': repair.product_id.id, 'lot_id': repair.lot_id.id, 'product_uom_qty': 0, # bypass reservation here 'product_uom_id': repair.product_uom.id or repair.product_id.uom_id.id, 'qty_done': repair.product_qty, 'package_id': False, 'result_package_id': False, 'location_id': repair.location_id.id, #TODO: owner stuff 'location_dest_id': repair.location_dest_id.id,})], 'repair_id': repair.id, 'origin': repair.name, }) consumed_lines = moves.mapped('move_line_ids') produced_lines = move.move_line_ids moves |= move moves._action_done() produced_lines.write({'consume_line_ids': [(6, 0, consumed_lines.ids)]}) res[repair.id] = move.id return res class RepairLine(models.Model): _name = 'mrp.repair.line' _description = 'Repair Line' name = fields.Char('Description', required=True) repair_id = fields.Many2one( 'mrp.repair', 'Repair Order Reference', index=True, ondelete='cascade') type = fields.Selection([ ('add', 'Add'), ('remove', 'Remove')], 'Type', required=True) product_id = fields.Many2one('product.product', 'Product', required=True) invoiced = fields.Boolean('Invoiced', copy=False, readonly=True) price_unit = fields.Float('Unit Price', required=True, digits=dp.get_precision('Product Price')) price_subtotal = fields.Float('Subtotal', compute='_compute_price_subtotal', digits=0) tax_id = fields.Many2many( 'account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes') product_uom_qty = fields.Float( 'Quantity', default=1.0, digits=dp.get_precision('Product Unit of Measure'), required=True) product_uom = fields.Many2one( 'product.uom', 'Product Unit of Measure', required=True) invoice_line_id = fields.Many2one( 'account.invoice.line', 'Invoice Line', copy=False, readonly=True) location_id = fields.Many2one( 'stock.location', 'Source Location', index=True, required=True) location_dest_id = fields.Many2one( 'stock.location', 'Dest. Location', index=True, required=True) move_id = fields.Many2one( 'stock.move', 'Inventory Move', copy=False, readonly=True) lot_id = fields.Many2one('stock.production.lot', 'Lot/Serial') state = fields.Selection([ ('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', default='draft', copy=False, readonly=True, required=True, help='The status of a repair line is set automatically to the one of the linked repair order.') @api.constrains('lot_id', 'product_id') def constrain_lot_id(self): for line in self.filtered(lambda x: x.product_id.tracking != 'none' and not x.lot_id): raise ValidationError(_("Serial number is required for operation line with product '%s'") % (line.product_id.name)) @api.one @api.depends('price_unit', 'repair_id', 'product_uom_qty', 'product_id', 'repair_id.invoice_method') def _compute_price_subtotal(self): taxes = self.tax_id.compute_all(self.price_unit, self.repair_id.pricelist_id.currency_id, self.product_uom_qty, self.product_id, self.repair_id.partner_id) self.price_subtotal = taxes['total_excluded'] @api.onchange('type', 'repair_id') def onchange_operation_type(self): """ On change of operation type it sets source location, destination location and to invoice field. @param product: Changed operation type. @param guarantee_limit: Guarantee limit of current record. @return: Dictionary of values. """ if not self.type: self.location_id = False self.location_dest_id = False elif self.type == 'add': self.onchange_product_id() args = self.repair_id.company_id and [('company_id', '=', self.repair_id.company_id.id)] or [] warehouse = self.env['stock.warehouse'].search(args, limit=1) self.location_id = warehouse.lot_stock_id self.location_dest_id = self.env['stock.location'].search([('usage', '=', 'production')], limit=1).id else: self.price_unit = 0.0 self.tax_id = False self.location_id = self.env['stock.location'].search([('usage', '=', 'production')], limit=1).id self.location_dest_id = self.env['stock.location'].search([('scrap_location', '=', True)], limit=1).id @api.onchange('repair_id', 'product_id', 'product_uom_qty') def onchange_product_id(self): """ On change of product it sets product quantity, tax account, name, uom of product, unit price and price subtotal. """ partner = self.repair_id.partner_id pricelist = self.repair_id.pricelist_id if not self.product_id or not self.product_uom_qty: return if self.product_id: if partner: self.name = self.product_id.with_context(lang=partner.lang).display_name else: self.name = self.product_id.display_name self.product_uom = self.product_id.uom_id.id if self.type != 'remove': if partner and self.product_id: self.tax_id = partner.property_account_position_id.map_tax(self.product_id.taxes_id, self.product_id, partner).ids warning = False if not pricelist: warning = { 'title': _('No Pricelist!'), 'message': _('You have to select a pricelist in the Repair form !\n Please set one before choosing a product.')} else: price = pricelist.get_product_price(self.product_id, self.product_uom_qty, partner) if price is False: warning = { 'title': _('No valid pricelist line found !'), 'message': _("Couldn't find a pricelist line matching this product and quantity.\nYou have to change either the product, the quantity or the pricelist.")} else: self.price_unit = price if warning: return {'warning': warning} class RepairFee(models.Model): _name = 'mrp.repair.fee' _description = 'Repair Fees Line' repair_id = fields.Many2one( 'mrp.repair', 'Repair Order Reference', index=True, ondelete='cascade', required=True) name = fields.Char('Description', index=True, required=True) product_id = fields.Many2one('product.product', 'Product') product_uom_qty = fields.Float('Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True, default=1.0) price_unit = fields.Float('Unit Price', required=True) product_uom = fields.Many2one('product.uom', 'Product Unit of Measure', required=True) price_subtotal = fields.Float('Subtotal', compute='_compute_price_subtotal', digits=0) tax_id = fields.Many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes') invoice_line_id = fields.Many2one('account.invoice.line', 'Invoice Line', copy=False, readonly=True) invoiced = fields.Boolean('Invoiced', copy=False, readonly=True) @api.one @api.depends('price_unit', 'repair_id', 'product_uom_qty', 'product_id') def _compute_price_subtotal(self): taxes = self.tax_id.compute_all(self.price_unit, self.repair_id.pricelist_id.currency_id, self.product_uom_qty, self.product_id, self.repair_id.partner_id) self.price_subtotal = taxes['total_excluded'] @api.onchange('repair_id', 'product_id', 'product_uom_qty') def onchange_product_id(self): """ On change of product it sets product quantity, tax account, name, uom of product, unit price and price subtotal. """ if not self.product_id: return partner = self.repair_id.partner_id pricelist = self.repair_id.pricelist_id if partner and self.product_id: self.tax_id = partner.property_account_position_id.map_tax(self.product_id.taxes_id, self.product_id, partner).ids if self.product_id: self.name = self.product_id.display_name self.product_uom = self.product_id.uom_id.id warning = False if not pricelist: warning = { 'title': _('No Pricelist!'), 'message': _('You have to select a pricelist in the Repair form !\n Please set one before choosing a product.')} else: price = pricelist.get_product_price(self.product_id, self.product_uom_qty, partner) if price is False: warning = { 'title': _('No valid pricelist line found !'), 'message': _("Couldn't find a pricelist line matching this product and quantity.\nYou have to change either the product, the quantity or the pricelist.")} else: self.price_unit = price if warning: return {'warning': warning}
agpl-3.0
-5,396,696,964,052,102,000
48.161631
227
0.570103
false
4.089595
false
false
false
arsfeld/conduit
conduit/modules/ShutterflyModule/ShutterflyModule.py
1
3532
""" Shutterfly Data Sink """ import logging log = logging.getLogger("modules.Shutterfly") import conduit import conduit.utils as Utils from conduit.datatypes import Rid import conduit.dataproviders.Image as Image import conduit.Exceptions as Exceptions import conduit.datatypes.Photo as Photo Utils.dataprovider_add_dir_to_path(__file__, "shutterfly") from shutterfly import Shutterfly from gettext import gettext as _ MODULES = { "ShutterflySink" : {"type" : "dataprovider"}, } class ShutterflySink(Image.ImageSink): _name_ = _("Shutterfly") _description_ = _("Synchronize your Shutterfly photos") _module_type_ = "sink" _icon_ = "shutterfly" _configurable_ = True def __init__(self, *args): Image.ImageSink.__init__(self) self.username = "" self.password = "" self.album = "" self.sapi = None self.salbum = None self.sphotos = None def _get_raw_photo_url(self, photoInfo): return photoInfo.url def _get_photo_info(self, id): if self.sphotos.has_key(id): return self.sphotos[id] else: return None def _get_photo_formats(self): return ("image/jpeg", ) def refresh(self): Image.ImageSink.refresh(self) self.sapi = Shutterfly(self.username, self.password) albums = self.sapi.getAlbums() if not albums.has_key(self.album): self.salbum = self.sapi.createAlbum(self.album) else: self.salbum = albums[self.album] self.sphotos = self.salbum.getPhotos() def get_all(self): return self.sphotos.keys() def get(self, LUID): #Image.ImageSink.get(self, LUID) sphoto = self.sphotos[LUID] f = Photo.Photo(URI=sphoto.url) f.set_open_URI(sphoto.url) f.set_UID(LUID) return f def delete(self, LUID): """ Delete a photo by ID Deleting a photo invalidates album length and photo index values. We must reload the photos (or do something else...) """ if not self.sphotos.has_key(LUID): log.warn("Photo does not exist") return try: self.salbum.deletePhoto(self.sphotos[LUID]) except Exception, e: raise Exceptions.SyncronizeError("Shutterfly Delete Error - Try Again.") self.sphotos = self.salbum.getPhotos() def _upload_photo(self, uploadInfo): """ Upload to album """ try: ret = self.salbum.uploadPhoto(uploadInfo.url, uploadInfo.mimeType, uploadInfo.name) return Rid(ret.id) except Exception, e: raise Exceptions.SyncronizeError("Shutterfly Upload Error.") def configure(self, window): """ Configures the ShutterflySink """ widget = Utils.dataprovider_glade_get_widget( __file__, "shutterfly.glade", "ShutterflySinkConfigDialog") # Get configuration widgets username = widget.get_widget("username") password = widget.get_widget("password") album = widget.get_widget("album") # Load the widgets with presets username.set_text(self.username) password.set_text(self.password) album.set_text(self.album) dlg = widget.get_widget("ShutterflySinkConfigDialog") response = Utils.run_dialog(dlg, window) if response == True: self.username = username.get_text() self.password = password.get_text() self.album = album.get_text() dlg.destroy() def get_configuration(self): return { "username" : self.username, "password" : self.password, "album" : self.album } def is_configured(self, isSource, isTwoWay): if len(self.username) < 1: return False if len(self.password) < 1: return False if len(self.album) < 1: return False return True def get_UID(self): return self.username+":"+self.album
gpl-2.0
-7,614,566,365,720,435,000
22.084967
86
0.693658
false
2.95071
true
false
false
Southpaw-TACTIC/Team
src/python/Lib/site-packages/PySide/examples/declarative/scrolling.py
1
2311
#!/usr/bin/env python # Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). # All rights reserved. # Contact: PySide Team (pyside@openbossa.org) # # This file is part of the examples of PySide: Python for Qt. # # You may use this file under the terms of the BSD license as follows: # # "Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." from PySide.QtCore import QUrl from PySide.QtGui import QPushButton, QApplication from PySide.QtDeclarative import QDeclarativeView # This example uses a QML file to show a scrolling list containing # all the items listed into dataList. dataList = ["Item 1", "Item 2", "Item 3", "Item 4"] app = QApplication([]) view = QDeclarativeView() ctxt = view.rootContext() ctxt.setContextProperty("myModel", dataList) url = QUrl('view.qml') view.setSource(url) view.show() app.exec_()
epl-1.0
1,676,431,145,130,090,000
39.54386
72
0.762441
false
4.068662
false
false
false
j-marjanovic/myhdl
myhdl/_always_comb.py
1
4522
# This file is part of the myhdl library, a Python package for using # Python as a Hardware Description Language. # # Copyright (C) 2003-2009 Jan Decaluwe # # The myhdl library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of the # License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Module with the always_comb function. """ from __future__ import absolute_import import sys import inspect from types import FunctionType import re import ast from myhdl import AlwaysCombError from myhdl._Signal import _Signal, _isListOfSigs from myhdl._util import _isGenFunc, _dedent from myhdl._Waiter import _Waiter, _SignalWaiter, _SignalTupleWaiter from myhdl._instance import _Instantiator from myhdl._always import _Always from myhdl._resolverefs import _AttrRefTransformer from myhdl._visitors import _SigNameVisitor class _error: pass _error.ArgType = "always_comb argument should be a classic function" _error.NrOfArgs = "always_comb argument should be a function without arguments" _error.Scope = "always_comb argument should be a local function" _error.SignalAsInout = "signal (%s) used as inout in always_comb function argument" _error.EmbeddedFunction = "embedded functions in always_comb function argument not supported" _error.EmptySensitivityList= "sensitivity list is empty" def always_comb(func): if not isinstance( func, FunctionType): raise AlwaysCombError(_error.ArgType) if _isGenFunc(func): raise AlwaysCombError(_error.ArgType) if func.__code__.co_argcount > 0: raise AlwaysCombError(_error.NrOfArgs) c = _AlwaysComb(func) return c # class _AlwaysComb(_Instantiator): class _AlwaysComb(_Always): # def __init__(self, func, symdict): # self.func = func # self.symdict = symdict # s = inspect.getsource(func) # # remove decorators # s = re.sub(r"@.*", "", s) # s = s.lstrip() # tree = compiler.parse(s) # v = _SigNameVisitor(symdict) # compiler.walk(tree, v) # self.inputs = v.inputs # self.outputs = v.outputs # senslist = [] # for n in self.inputs: # s = self.symdict[n] # if isinstance(s, Signal): # senslist.append(s) # else: # list of sigs # senslist.extend(s) # self.senslist = tuple(senslist) # self.gen = self.genfunc() # if len(self.senslist) == 0: # raise AlwaysCombError(_error.EmptySensitivityList) # if len(self.senslist) == 1: # W = _SignalWaiter # else: # W = _SignalTupleWaiter # self.waiter = W(self.gen) def __init__(self, func): senslist = [] super(_AlwaysComb, self).__init__(func, senslist) s = inspect.getsource(func) s = _dedent(s) tree = ast.parse(s) # print ast.dump(tree) v = _AttrRefTransformer(self) v.visit(tree) v = _SigNameVisitor(self.symdict) v.visit(tree) self.inputs = v.results['input'] self.outputs = v.results['output'] inouts = v.results['inout'] | self.inputs.intersection(self.outputs) if inouts: raise AlwaysCombError(_error.SignalAsInout % inouts) if v.results['embedded_func']: raise AlwaysCombError(_error.EmbeddedFunction) for n in self.inputs: s = self.symdict[n] if isinstance(s, _Signal): senslist.append(s) elif _isListOfSigs(s): senslist.extend(s) self.senslist = tuple(senslist) if len(self.senslist) == 0: raise AlwaysCombError(_error.EmptySensitivityList) def genfunc(self): senslist = self.senslist if len(senslist) == 1: senslist = senslist[0] func = self.func while 1: func() yield senslist
lgpl-2.1
-1,478,581,630,403,665,000
33
93
0.638655
false
3.574704
false
false
false
enricobacis/cineca-scopus
src/cineca3.py
1
3928
#!/usr/bin/env python #coding: utf-8 from contextlib import closing from operator import itemgetter from datetime import datetime from argparse import ArgumentParser from unicodecsv import DictWriter from utils import read_cineca_file, csv_to_db import sqlite3 import json import re FIELDS = ['Ateneo', 'Facoltà', 'Fascia', 'Genere', 'S.C.', 'Servizio prestato in altro ateneo', 'Struttura di afferenza', 'author', 'identifier', 'eid', 'title', 'aggregationType', 'citedby-count', 'publicationName', 'isbn', 'issn', 'volume', 'issueIdentifier', 'pageRange', 'pageNum', 'coverDate', 'coverDisplayDate', 'doi', 'numAuthors'] QUERY = 'SELECT entries FROM articles WHERE author = ? AND ateneo = ?' def pagenum(pageRange): try: page = list(map(int, pageRange.split('-'))) return 1 if len(page) == 1 else page[1] - page[0] except: return None def process(entry): for key, value in list(entry.items()): if ':' in key: del entry[key] key = key.partition(':')[2] entry[key] = value match = re.match('Author list of (\d+)', entry.get('message', '')) if match: entry['numAuthors'] = int(match.group(1)) else: entry['numAuthors'] = len(entry.get('author', [])) or None # eid and identifier default to 0 entry['eid'] = entry.get('eid', 0) entry['identifier'] = entry.get('identifier', 0) # validate coverDate (or default to 1900-01-01) date = entry.get('coverDate', '') try: datesplit = list(map(int, date.split('-'))) if len(datesplit) == 3 and datesplit[1] == 0: date = '%d-%d-%s' % (datesplit[0], datesplit[1]+1, datesplit[2]) datetime.strptime(date, '%Y-%m-%d') except: entry['coverDate'] = '1900-01-01' entry['author'] = entry['Cognome e Nome'] entry['pageNum'] = pagenum(entry.get('pageRange', None)) return entry def mergedicts(*dicts): return {k:v for d in dicts for k,v in d.items()} if __name__ == '__main__': from config import FILENAME, DBFILE, OUTFILE, PRODUCTSDB parser = ArgumentParser('convert scopus db to csv') parser.add_argument('--input', default=FILENAME, help='cineca input file') parser.add_argument('--db', default=DBFILE, help='database file') parser.add_argument('--output', default=OUTFILE, help='output csv file') parser.add_argument('--outdb', default=PRODUCTSDB, help='output db file') args = parser.parse_args() with open(args.output, 'wb') as outfile: csvreader = [row.to_dict() for row in read_cineca_file(args.input)] authors = [(row['Cognome e Nome'], row['Ateneo'], row) for row in csvreader] authors.sort(key=itemgetter(0, 1)) csvwriter = DictWriter(outfile, FIELDS, extrasaction='ignore', encoding='utf-8') csvwriter.writeheader() with sqlite3.connect(args.db) as connection: with closing(connection.cursor()) as cursor: for author, ateneo, authordata in authors: entries = cursor.execute(QUERY, (author,ateneo)).fetchall() if not entries: print('Empty entry added for %s' % author) csvwriter.writerow(process(authordata)) else: inserted = set() for entry in json.loads(entries[0][0]): ID = entry.get('dc:identifier', '') print('%s\t%s' % (author, ID)) if ID in inserted: print(' * duplicate ignored *') else: inserted.add(ID) csvwriter.writerow(process(mergedicts(authordata, entry))) print('\n[*] Converting csv to sqlite3db ...') csv_to_db(args.output, args.outdb, 'products')
mit
-8,302,143,502,810,306,000
37.881188
90
0.578813
false
3.729345
false
false
false
j-rock/cs598ps
src/py/main.py
1
4059
import sys import time from cssigps.offlineclassifier import * from cssigps.dataset import * from cssigps.feature import * from cssigps.experiments import * from get_dropbox_path import * def print_usage(): """ Print the usage for the main script. """ print("USAGE: use the run.sh or the main.py directly.") print("") print(" run.sh <EXPERIMENT_NUMBER>") print(" python main.py <EXPERIMENT_NUMBER>") if __name__ == '__main__': # decide which experiment to run based on the command line or user-input response = "" if len(sys.argv) >= 2: response=sys.argv[1] if response in ["-h","--help"]: print_usage() quit() else: prompt = "Which experiment would you like to run? [0-2]" response = raw_input(prompt) # run experiment if response == "0": path=get_dropbox_path()+"old-test/" run_experiment_0(path) elif response == "1": run_experiment_1(include_none=True) elif response == "2": run_experiment_2() elif response == "3": run_experiment_3() elif response == "4": run_experiment_4() elif response == "5": path=get_dropbox_path()+"vowels-test/" run_offline_svm(path) elif response == "S": # run single class classifier c = sys.argv[2] f = sys.argv[3] classes=["NONE"] path=get_dropbox_path()+"yes-no-test/" factory = FBankFeature() # select the class if c == "Y": path=get_dropbox_path()+"yes-no-test/" classes.append("Y") elif c=="N": path=get_dropbox_path()+"yes-no-test/" classes.append("N") elif c=="A": path=get_dropbox_path()+"vowels-test/" classes=["A","NONE"] elif c=="E": path=get_dropbox_path()+"vowels-test/" classes=["E","NONE"] elif c=="I": path=get_dropbox_path()+"vowels-test/" classes=["I","NONE"] elif c=="O": path=get_dropbox_path()+"vowels-test/" classes=["O","NONE"] elif c=="U": path=get_dropbox_path()+"vowels-test/" classes=["U","NONE"] else: print("class argument invalid") quit() # select the feature if f == "fbank": factory=FBankFeature() elif f == "m" or f == "magnitude": factory=MagnitudeFeature() elif f == "t" or f == "template": factory=MultiTemplateFeature(SampleSet(find_testsamples(path),classes=classes).class_rep()) else: print("feature argument invalid") samples = find_testsamples(path) sample_set = SampleSet(samples,classes=classes) sample_set.stats() run_sample_experiment(sample_set,feat_factory=factory) elif response == "M": # run multi class classifier c = sys.argv[2] f = sys.argv[3] classes=["NONE"] path=get_dropbox_path()+"yes-no-test/" factory = FBankFeature() # select the class if c == "Y": path=get_dropbox_path()+"yes-no-test/" classes=["Y","N","NONE"] elif c=="A": path=get_dropbox_path()+"vowels-test/" classes=["A","E","I","O","U","NONE"] else: print("class argument invalid") quit() samples = find_testsamples(path) sample_set = SampleSet(samples,classes=classes) sample_set.stats() # select the feature if f == "fbank": factory=FBankFeature() elif f == "m" or f == "magnitude": factory=MagnitudeFeature() elif f == "t" or f == "template": factory=MultiTemplateFeature(SampleSet(find_testsamples(path),classes=classes).class_rep()) else: print("feature argument invalid") run_sample_experiment(sample_set,feat_factory=factory) else: print("Invalid option. Aborting..")
mit
-7,437,332,897,349,050,000
29.75
103
0.537078
false
3.854701
true
false
false
jtaghiyar/kronos
setup.py
1
1513
''' Created on Jul 10, 2014 @author: jtaghiyar ''' import codecs import os import re from setuptools import setup def read(*paths): here = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(here, *paths)) as f: return f.read() def get_version(): version_file = read("kronos", "kronos_version.py") version_match = re.search(r"^kronos_version = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") long_description = read('README.md') setup(name='kronos_pipeliner', version=get_version(), description='A workflow assembler for genome analytics and informatics', long_description=long_description, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2.7", "Topic :: Scientific/Engineering :: Bio-Informatics", ], keywords='pipeline workflow bioinformatics kronos', author='M. Jafar Taghiyar', author_email='jafar.taghiyar@gmail.com', url='https://github.com/jtaghiyar/kronos', license='MIT', packages=['kronos', 'templates'], entry_points={'console_scripts':['kronos=kronos:main']}, install_requires = ['pyyaml>=3.11', 'ruffus==2.4.1'] )
mit
-666,503,168,781,725,600
30.520833
78
0.621943
false
3.526807
false
false
false
wdv4758h/ZipPy
edu.uci.python.benchmark/src/micro/boolean-logic.py
1
1621
# zwei 04/24/2014 # micro benchmark: method call polymorphic inspired by richards import time iteration = 50000 class Task(object): def __init__(self, p, w, h): self.packet_pending = p self.task_waiting = w self.task_holding = h self.link = None def isTaskHoldingOrWaiting(self): return self.task_holding or (not self.packet_pending and self.task_waiting) def isTaskHoldingOrWaiting(task_holding, packet_pending, task_waiting): return task_holding or (not packet_pending and task_waiting) TASK_LIST = [Task(False, False, True), Task(False, True, False), Task(True, True, False), Task(True, False, True)] def setupTaskQueue(): prev = None for t in TASK_LIST: t.link = prev prev = t return t TASK_QUEUE = setupTaskQueue() def dostuff(): total = 0 for i in range(iteration): t = TASK_QUEUE while t is not None: if (t.isTaskHoldingOrWaiting()): total += 1 t = t.link return total def noObjectDoStuff(): p = True w = False h = True total = 0 for i in range(iteration): h = isTaskHoldingOrWaiting(h, p, w) if (isTaskHoldingOrWaiting(h, p, w)): total += 1 return total def measure(num): print("Start timing...") start = time.time() for i in range(num): # 50000 result = dostuff() print(result) duration = "%.3f\n" % (time.time() - start) print("boolean-logic: " + duration) # warm up for i in range(500): dostuff() measure(1000)
bsd-3-clause
-691,052,674,666,857,300
21.527778
83
0.586675
false
3.448936
false
false
false
pelson/conda-build
tests/test_misc.py
4
2003
import json from os.path import join import pytest from conda_build.utils import on_win import conda_build._link as _link from conda_build.conda_interface import PathType, EntityEncoder, CrossPlatformStLink def test_pyc_f_2(): assert _link.pyc_f('sp/utils.py', (2, 7, 9)) == 'sp/utils.pyc' def test_pyc_f_3(): for f, r in [ ('sp/utils.py', 'sp/__pycache__/utils.cpython-34.pyc'), ('sp/foo/utils.py', 'sp/foo/__pycache__/utils.cpython-34.pyc'), ]: assert _link.pyc_f(f, (3, 4, 2)) == r def test_pathtype(): hardlink = PathType("hardlink") assert str(hardlink) == "hardlink" assert hardlink.__json__() == 'hardlink' softlink = PathType("softlink") assert str(softlink) == "softlink" assert softlink.__json__() == "softlink" def test_entity_encoder(tmpdir): test_file = join(str(tmpdir), "test-file") test_json = {"a": PathType("hardlink"), "b": 1} with open(test_file, "w") as f: json.dump(test_json, f, cls=EntityEncoder) with open(test_file, "r") as f: json_file = json.load(f) assert json_file == {"a": "hardlink", "b": 1} @pytest.mark.skipif(on_win, reason="link not available on win/py2.7") def test_crossplatform_st_link(tmpdir): from os import link test_file = join(str(tmpdir), "test-file") test_file_linked = join(str(tmpdir), "test-file-linked") test_file_link = join(str(tmpdir), "test-file-link") open(test_file, "a").close() open(test_file_link, "a").close() link(test_file_link, test_file_linked) assert 1 == CrossPlatformStLink.st_nlink(test_file) assert 2 == CrossPlatformStLink.st_nlink(test_file_link) assert 2 == CrossPlatformStLink.st_nlink(test_file_linked) @pytest.mark.skipif(not on_win, reason="already tested") def test_crossplatform_st_link_on_win(tmpdir): test_file = join(str(tmpdir), "test-file") open(test_file, "a").close() assert 1 == CrossPlatformStLink.st_nlink(test_file)
bsd-3-clause
-808,085,511,508,831,500
30.296875
84
0.636046
false
2.994021
true
false
false
BiznetGIO/horizon
openstack_dashboard/api/base.py
1
12067
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import Sequence import functools from django.conf import settings import semantic_version import six from horizon import exceptions __all__ = ('APIResourceWrapper', 'APIDictWrapper', 'get_service_from_catalog', 'url_for',) @functools.total_ordering class Version(object): def __init__(self, version): self.version = semantic_version.Version(str(version), partial=True) def __eq__(self, other): return self.version == Version(other).version def __lt__(self, other): return self.version < Version(other).version def __repr__(self): return "Version('%s')" % self.version def __str__(self): return str(self.version) def __hash__(self): return hash(str(self.version)) class APIVersionManager(object): """Object to store and manage API versioning data and utility methods.""" SETTINGS_KEY = "OPENSTACK_API_VERSIONS" def __init__(self, service_type, preferred_version=None): self.service_type = service_type self.preferred = preferred_version self._active = None self.supported = {} # As a convenience, we can drop in a placeholder for APIs that we # have not yet needed to version. This is useful, for example, when # panels such as the admin metadata_defs wants to check the active # version even though it's not explicitly defined. Previously # this caused a KeyError. if self.preferred: self.supported[self.preferred] = {"version": self.preferred} @property def active(self): if self._active is None: self.get_active_version() return self._active def load_supported_version(self, version, data): version = Version(version) self.supported[version] = data def get_active_version(self): if self._active is not None: return self.supported[self._active] key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type) if key is None: # TODO(gabriel): support API version discovery here; we'll leave # the setting in as a way of overriding the latest available # version. key = self.preferred version = Version(key) # Provide a helpful error message if the specified version isn't in the # supported list. if version not in self.supported: choices = ", ".join(str(k) for k in six.iterkeys(self.supported)) msg = ('%s is not a supported API version for the %s service, ' ' choices are: %s' % (version, self.service_type, choices)) raise exceptions.ConfigurationError(msg) self._active = version return self.supported[self._active] def clear_active_cache(self): self._active = None class APIResourceWrapper(object): """Simple wrapper for api objects. Define _attrs on the child class and pass in the api object as the only argument to the constructor """ _attrs = [] _apiresource = None # Make sure _apiresource is there even in __init__. def __init__(self, apiresource): self._apiresource = apiresource def __getattribute__(self, attr): try: return object.__getattribute__(self, attr) except AttributeError: if attr not in self._attrs: raise # __getattr__ won't find properties return getattr(self._apiresource, attr) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, dict((attr, getattr(self, attr)) for attr in self._attrs if hasattr(self, attr))) def to_dict(self): obj = {} for key in self._attrs: obj[key] = getattr(self._apiresource, key, None) return obj class APIDictWrapper(object): """Simple wrapper for api dictionaries Some api calls return dictionaries. This class provides identical behavior as APIResourceWrapper, except that it will also behave as a dictionary, in addition to attribute accesses. Attribute access is the preferred method of access, to be consistent with api resource objects from novaclient. """ _apidict = {} # Make sure _apidict is there even in __init__. def __init__(self, apidict): self._apidict = apidict def __getattribute__(self, attr): try: return object.__getattribute__(self, attr) except AttributeError: if attr not in self._apidict: raise return self._apidict[attr] def __getitem__(self, item): try: return getattr(self, item) except (AttributeError, TypeError) as e: # caller is expecting a KeyError raise KeyError(e) def __contains__(self, item): try: return hasattr(self, item) except TypeError: return False def get(self, item, default=None): try: return getattr(self, item) except (AttributeError, TypeError): return default def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self._apidict) def to_dict(self): return self._apidict class Quota(object): """Wrapper for individual limits in a quota.""" def __init__(self, name, limit): self.name = name self.limit = limit def __repr__(self): return "<Quota: (%s, %s)>" % (self.name, self.limit) class QuotaSet(Sequence): """Wrapper for client QuotaSet objects. This turns the individual quotas into Quota objects for easier handling/iteration. `QuotaSet` objects support a mix of `list` and `dict` methods; you can use the bracket notation (`qs["my_quota"] = 0`) to add new quota values, and use the `get` method to retrieve a specific quota, but otherwise it behaves much like a list or tuple, particularly in supporting iteration. """ def __init__(self, apiresource=None): self.items = [] if apiresource: if hasattr(apiresource, '_info'): items = apiresource._info.items() else: items = apiresource.items() for k, v in items: if k == 'id': continue self[k] = v def __setitem__(self, k, v): v = int(v) if v is not None else v q = Quota(k, v) self.items.append(q) def __getitem__(self, index): return self.items[index] def __add__(self, other): """Merge another QuotaSet into this one. Existing quotas are not overridden. """ if not isinstance(other, QuotaSet): msg = "Can only add QuotaSet to QuotaSet, " \ "but received %s instead" % type(other) raise ValueError(msg) for item in other: if self.get(item.name).limit is None: self.items.append(item) return self def __len__(self): return len(self.items) def __repr__(self): return repr(self.items) def get(self, key, default=None): match = [quota for quota in self.items if quota.name == key] return match.pop() if len(match) else Quota(key, default) def add(self, other): return self.__add__(other) def get_service_from_catalog(catalog, service_type): if catalog: for service in catalog: if 'type' not in service: continue if service['type'] == service_type: return service return None def get_version_from_service(service): if service and service.get('endpoints'): endpoint = service['endpoints'][0] if 'interface' in endpoint: return 3 else: return 2.0 return 2.0 # Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces ENDPOINT_TYPE_TO_INTERFACE = { 'publicURL': 'public', 'internalURL': 'internal', 'adminURL': 'admin', } def get_url_for_service(service, region, endpoint_type): if 'type' not in service: return None identity_version = get_version_from_service(service) service_endpoints = service.get('endpoints', []) available_endpoints = [endpoint for endpoint in service_endpoints if region == _get_endpoint_region(endpoint)] """if we are dealing with the identity service and there is no endpoint in the current region, it is okay to use the first endpoint for any identity service endpoints and we can assume that it is global """ if service['type'] == 'identity' and not available_endpoints: available_endpoints = [endpoint for endpoint in service_endpoints] for endpoint in available_endpoints: try: if identity_version < 3: return endpoint.get(endpoint_type) else: interface = \ ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '') if endpoint.get('interface') == interface: return endpoint.get('url') except (IndexError, KeyError): """it could be that the current endpoint just doesn't match the type, continue trying the next one """ pass return None def url_for(request, service_type, endpoint_type=None, region=None): endpoint_type = endpoint_type or getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL') fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None) catalog = request.user.service_catalog service = get_service_from_catalog(catalog, service_type) if service: if not region: region = request.user.services_region url = get_url_for_service(service, region, endpoint_type) if not url and fallback_endpoint_type: url = get_url_for_service(service, region, fallback_endpoint_type) if url: return url raise exceptions.ServiceCatalogException(service_type) def is_service_enabled(request, service_type): service = get_service_from_catalog(request.user.service_catalog, service_type) if service: region = request.user.services_region for endpoint in service.get('endpoints', []): if 'type' not in service: continue # ignore region for identity if service['type'] == 'identity' or \ _get_endpoint_region(endpoint) == region: return True return False def _get_endpoint_region(endpoint): """Common function for getting the region from endpoint. In Keystone V3, region has been deprecated in favor of region_id. This method provides a way to get region that works for both Keystone V2 and V3. """ return endpoint.get('region_id') or endpoint.get('region')
apache-2.0
6,872,752,707,314,681,000
31.790761
79
0.59642
false
4.405622
false
false
false
ozmartian/tvlinker
tvlinker/threads.py
1
11150
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import time from datetime import datetime, timedelta from tzlocal import get_localzone import pytz import requests from PyQt5.QtCore import QObject, QSettings, QThread, pyqtSignal, pyqtSlot from PyQt5.QtWidgets import QMessageBox, qApp from bs4 import BeautifulSoup from requests.exceptions import HTTPError import cloudscraper from tvlinker.filesize import alternative, size try: # noinspection PyPackageRequirements import simplejson as json except ImportError: import json class ShadowSocks: config = { 'ssocks': { 'procs': ['ss-qt5', 'sslocal'], 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5://127.0.0.1:1080' }, }, 'v2ray': { 'procs': ['v2ray'], 'proxies': { 'http': 'socks5://127.0.0.1:10808', 'https': 'socks5://127.0.0.1:10808' } } } @staticmethod def detect() -> str: if sys.platform.startswith('linux'): ptypes = ShadowSocks.config.keys() ps = os.popen('ps -Af').read() for ptype in ptypes: procs = ShadowSocks.config[ptype]['procs'] for p in procs: if ps.count(p): return ptype return None @staticmethod def proxies() -> dict: proxy_type = ShadowSocks.detect() return ShadowSocks.config[proxy_type]['proxies'] if proxy_type is not None else {} class ScrapeWorker(QObject): addRow = pyqtSignal(list) workFinished = pyqtSignal() def __init__(self, source_url: str, useragent: str, maxpages: int): super(ScrapeWorker, self).__init__() self.maxpages = maxpages self.source_url = source_url self.user_agent = useragent self.scraper = cloudscraper.create_scraper() self.scraper.proxies = ShadowSocks.proxies() self.tz_format = '%b %d %Y %H:%M' self.tz_local = get_localzone() self.complete = False def scrape(self, pagenum: int) -> None: try: url = self.source_url.format(pagenum + 1) req = self.scraper.get(url) bs = BeautifulSoup(req.text, 'lxml') posts = bs('div', class_='post') for post in posts: dt_utc = datetime.strptime(post.find('div', class_='p-c p-c-time').get_text().strip(), self.tz_format) # TODO: fix hardcoded DST adjustment dt_local = dt_utc.replace(tzinfo=pytz.utc).astimezone(self.tz_local) - timedelta(hours=2) dlsize = post.find('h2').get_text().strip() table_row = [ dt_local.strftime(self.tz_format), post.find('a', class_='p-title').get('href').strip(), post.find('a', class_='p-title').get_text().strip(), dlsize[dlsize.rfind('(') + 1:len(dlsize) - 1] ] self.addRow.emit(table_row) except HTTPError: sys.stderr.write(sys.exc_info()[0]) # noinspection PyTypeChecker QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info()[0]) # self.exit() @pyqtSlot() def begin(self): for page in range(self.maxpages): if QThread.currentThread().isInterruptionRequested(): return self.scrape(page) self.complete = True self.workFinished.emit() class HostersThread(QThread): setHosters = pyqtSignal(list) noLinks = pyqtSignal() def __init__(self, link_url: str, useragent: str): QThread.__init__(self) self.link_url = link_url self.user_agent = useragent self.scraper = cloudscraper.create_scraper() self.scraper.proxies = ShadowSocks.proxies() def __del__(self) -> None: self.wait() def get_hoster_links(self) -> None: try: req = self.scraper.get(self.link_url) bs = BeautifulSoup(req.text, 'lxml') links = bs.select('div.post h2[style="text-align: center;"]') self.setHosters.emit(links) except HTTPError: print(sys.exc_info()[0]) # noinspection PyTypeChecker QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info()[0]) QThread.currentThread().quit() except IndexError: self.noLinks.emit() QThread.currentThread().quit() def run(self) -> None: self.get_hoster_links() class RealDebridThread(QThread): unrestrictedLink = pyqtSignal(str) supportedHosts = pyqtSignal(dict) hostStatus = pyqtSignal(dict) errorMsg = pyqtSignal(list) class RealDebridAction: UNRESTRICT_LINK = 0, SUPPORTED_HOSTS = 1, HOST_STATUS = 2 def __init__(self, settings: QSettings, api_url: str, link_url: str, action: RealDebridAction = RealDebridAction.UNRESTRICT_LINK, check_host: str = None): QThread.__init__(self) self.api_url = api_url self.api_token = settings.value('realdebrid_apitoken') self.api_proxy = settings.value('realdebrid_apiproxy', False, bool) self.link_url = link_url self.action = action self.check_host = check_host self.proxies = ShadowSocks.proxies() if self.api_proxy else {} def __del__(self): self.wait() def post(self, endpoint: str, payload: object = None) -> dict: try: res = requests.post('{0}{1}?auth_token={2}'.format(self.api_url, endpoint, self.api_token), data=payload, proxies=self.proxies) return res.json() except HTTPError: print(sys.exc_info()) self.errorMsg.emit([ 'ERROR NOTIFICATION', '<h3>Real-Debrid API Error</h3>' 'A problem occurred whilst communicating with Real-Debrid. Please check your ' 'Internet connection.<br/><br/>' '<b>ERROR LOG:</b><br/>(Error Code %s) %s<br/>%s' % (qApp.applicationName(), HTTPError.code, HTTPError.reason) ]) # self.exit() def unrestrict_link(self) -> None: jsonres = self.post(endpoint='/unrestrict/link', payload={'link': self.link_url}) if 'download' in jsonres.keys(): self.unrestrictedLink.emit(jsonres['download']) else: self.errorMsg.emit([ 'REALDEBRID ERROR', '<h3>Could not unrestrict link</h3>The hoster is most likely ' 'down, please try again later.<br/><br/>{}'.format(jsonres) ]) def supported_hosts(self) -> None: jsonres = self.post(endpoint='/hosts') self.supportedHosts.emit(jsonres) # def host_status(self, host: str) -> None: # jsonres = self.post(endpoint='/hosts/status') # self.hostStatus.emit(jsonres) def run(self) -> None: if self.action == RealDebridThread.RealDebridAction.UNRESTRICT_LINK: self.unrestrict_link() elif self.action == RealDebridThread.RealDebridAction.SUPPORTED_HOSTS: self.supported_hosts() # elif self.action == RealDebridThread.HOST_STATUS: # self.host_status(self.check_host) class Aria2Thread(QThread): aria2Confirmation = pyqtSignal(bool) def __init__(self, settings: QSettings, link_url: str): QThread.__init__(self) self.rpc_host = settings.value('aria2_rpc_host') self.rpc_port = settings.value('aria2_rpc_port') self.rpc_secret = settings.value('aria2_rpc_secret') self.rpc_username = settings.value('aria2_rpc_username') self.rpc_password = settings.value('aria2_rpc_password') self.link_url = link_url def __del__(self) -> None: self.wait() def add_uri(self) -> None: user, passwd = '', '' if len(self.rpc_username) > 0 and len(self.rpc_password) > 0: user = self.rpc_username passwd = self.rpc_password elif len(self.rpc_secret) > 0: user = 'token' passwd = self.rpc_secret aria2_endpoint = '%s:%s/jsonrpc' % (self.rpc_host, self.rpc_port) headers = {'Content-Type': 'application/json'} payload = json.dumps( { 'jsonrpc': '2.0', 'id': 1, 'method': 'aria2.addUri', 'params': ['%s:%s' % (user, passwd), [self.link_url]] }, sort_keys=False).encode('utf-8') try: from urllib.parse import urlencode from urllib.request import Request, urlopen req = Request(aria2_endpoint, headers=headers, data=payload) res = urlopen(req).read().decode('utf-8') jsonres = json.loads(res) # res = requests.post(aria2_endpoint, headers=headers, data=payload) # jsonres = res.json() self.aria2Confirmation.emit('result' in jsonres.keys()) except HTTPError: print(sys.exc_info()) # noinspection PyTypeChecker QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info(), QMessageBox.Ok) self.aria2Confirmation.emit(False) # self.exit() def run(self) -> None: self.add_uri() class DownloadThread(QThread): dlComplete = pyqtSignal() dlProgress = pyqtSignal(int) dlProgressTxt = pyqtSignal(str) def __init__(self, link_url: str, dl_path: str): QThread.__init__(self) self.download_link = link_url self.download_path = dl_path self.cancel_download = False self.proxies = ShadowSocks.proxies() def __del__(self) -> None: self.wait() def download_file(self) -> None: req = requests.get(self.download_link, stream=True, proxies=self.proxies) filesize = int(req.headers['Content-Length']) filename = os.path.basename(self.download_path) downloadedChunk = 0 blockSize = 8192 start = time.clock() with open(self.download_path, 'wb') as f: for chunk in req.iter_content(chunk_size=blockSize): if self.cancel_download or not chunk: req.close() break f.write(chunk) downloadedChunk += len(chunk) progress = float(downloadedChunk) / filesize self.dlProgress.emit(progress * 100) dlspeed = downloadedChunk // (time.clock() - start) / 1000 progressTxt = '<b>Downloading {0}</b>:<br/>{1} of <b>{3}</b> [{2:.2%}] [{4} kbps]' \ .format(filename, downloadedChunk, progress, size(filesize, system=alternative), dlspeed) self.dlProgressTxt.emit(progressTxt) self.dlComplete.emit() def run(self) -> None: self.download_file()
gpl-3.0
5,962,978,612,205,332,000
34.623003
118
0.562332
false
3.804162
false
false
false
xunilrj/sandbox
courses/course-edx-dat2031x/Simulation.py
1
2680
# -*- coding: utf-8 -*- def sim_normal(nums, mean = 600, sd = 30): import numpy as np import numpy.random as nr for n in nums: dist = nr.normal(loc = mean, scale = sd, size = n) titl = 'Normal distribution with ' + str(n) + ' values' print('Summary for ' + str(n) + ' samples') print(dist_summary(dist, titl)) print('Emperical 95% CIs') print(np.percentile(dist, [2.5, 97.5])) print(' ') return('Done!') def sim_poisson(nums, mean = 600): import numpy as np import numpy.random as nr for n in nums: dist = nr.poisson(lam = mean, size = n) titl = 'Poisson distribution with ' + str(n) + ' values' print(dist_summary(dist, titl)) print('Emperical 95% CIs') print(np.percentile(dist, [2.5, 97.5])) print(' ') return('Done!') def dist_summary(dist, names = 'dist_name'): import pandas as pd import matplotlib.pyplot as plt ser = pd.Series(dist) fig = plt.figure(1, figsize=(9, 6)) ax = fig.gca() ser.hist(ax = ax, bins = 120) ax.set_title('Frequency distribution of ' + names) ax.set_ylabel('Frequency') plt.show() return(ser.describe()) def gen_profits(num): import numpy.random as nr unif = nr.uniform(size = num) out = [5 if x < 0.3 else (3.5 if x < 0.6 else 4) for x in unif] return(out) def gen_tips(num): import numpy.random as nr unif = nr.uniform(size = num) out = [0 if x < 0.5 else (0.25 if x < 0.7 else (1.0 if x < 0.9 else 2.0)) for x in unif] return(out) def sim_lemonade(num, mean = 600, sd = 30, pois = False): ## Simulate the profits and tips for ## a lemonade stand. import numpy.random as nr ## number of customer arrivals if pois: arrivals = nr.poisson(lam = mean, size = num) else: arrivals = nr.normal(loc = mean, scale = sd, size = num) print(dist_summary(arrivals, 'customer arrivals per day')) ## Compute distibution of average profit per arrival proft = gen_profits(num) print(dist_summary(proft, 'profit per arrival')) ## Total profits are profit per arrival ## times number of arrivals. total_profit = arrivals * proft print(dist_summary(total_profit, 'total profit per day')) ## Compute distribution of average tips per arrival tps = gen_tips(num) print(dist_summary(tps, 'tips per arrival')) ## Compute average tips per day total_tips = arrivals * tps print(dist_summary(total_tips, 'total tips per day')) ## Compute total profits plus total tips. total_take = total_profit + total_tips return(dist_summary(total_take, 'total net per day'))
apache-2.0
8,129,955,515,365,953,000
29.804598
67
0.614179
false
3.179122
false
false
false
samdroid-apps/sugar-toolkit-gtk3
src/sugar3/bundle/activitybundle.py
1
14091
# Copyright (C) 2007, Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. """Sugar activity bundles UNSTABLE. """ from ConfigParser import ConfigParser from locale import normalize import os import shutil import tempfile import logging from sugar3 import env from sugar3.bundle.bundle import Bundle, \ MalformedBundleException, NotInstalledException from sugar3.bundle.bundleversion import NormalizedVersion from sugar3.bundle.bundleversion import InvalidVersionError def _expand_lang(locale): # Private method from gettext.py locale = normalize(locale) COMPONENT_CODESET = 1 << 0 COMPONENT_TERRITORY = 1 << 1 COMPONENT_MODIFIER = 1 << 2 # split up the locale into its base components mask = 0 pos = locale.find('@') if pos >= 0: modifier = locale[pos:] locale = locale[:pos] mask |= COMPONENT_MODIFIER else: modifier = '' pos = locale.find('.') if pos >= 0: codeset = locale[pos:] locale = locale[:pos] mask |= COMPONENT_CODESET else: codeset = '' pos = locale.find('_') if pos >= 0: territory = locale[pos:] locale = locale[:pos] mask |= COMPONENT_TERRITORY else: territory = '' language = locale ret = [] for i in range(mask + 1): if not (i & ~mask): # if all components for this combo exist ... val = language if i & COMPONENT_TERRITORY: val += territory if i & COMPONENT_CODESET: val += codeset if i & COMPONENT_MODIFIER: val += modifier ret.append(val) ret.reverse() return ret class ActivityBundle(Bundle): """A Sugar activity bundle See http://wiki.laptop.org/go/Activity_bundles for details """ MIME_TYPE = 'application/vnd.olpc-sugar' _zipped_extension = '.xo' _unzipped_extension = '.activity' _infodir = 'activity' def __init__(self, path, translated=True): Bundle.__init__(self, path) self.activity_class = None self.bundle_exec = None self._name = None self._icon = None self._bundle_id = None self._mime_types = None self._show_launcher = True self._tags = None self._activity_version = '0' self._summary = None self._single_instance = False info_file = self.get_file('activity/activity.info') if info_file is None: raise MalformedBundleException('No activity.info file') self._parse_info(info_file) if translated: linfo_file = self._get_linfo_file() if linfo_file: self._parse_linfo(linfo_file) def _parse_info(self, info_file): cp = ConfigParser() cp.readfp(info_file) section = 'Activity' if cp.has_option(section, 'bundle_id'): self._bundle_id = cp.get(section, 'bundle_id') else: if cp.has_option(section, 'service_name'): self._bundle_id = cp.get(section, 'service_name') logging.error('ATTENTION: service_name property in the ' 'activity.info file is deprecated, should be ' ' changed to bundle_id') else: raise MalformedBundleException( 'Activity bundle %s does not specify a bundle id' % self._path) if cp.has_option(section, 'name'): self._name = cp.get(section, 'name') else: raise MalformedBundleException( 'Activity bundle %s does not specify a name' % self._path) if cp.has_option(section, 'exec'): self.bundle_exec = cp.get(section, 'exec') else: raise MalformedBundleException( 'Activity bundle %s must specify either class or exec' % self._path) if cp.has_option(section, 'mime_types'): mime_list = cp.get(section, 'mime_types').strip(';') self._mime_types = [mime.strip() for mime in mime_list.split(';')] if cp.has_option(section, 'show_launcher'): if cp.get(section, 'show_launcher') == 'no': self._show_launcher = False if cp.has_option(section, 'tags'): tag_list = cp.get(section, 'tags').strip(';') self._tags = [tag.strip() for tag in tag_list.split(';')] if cp.has_option(section, 'icon'): self._icon = cp.get(section, 'icon') if cp.has_option(section, 'activity_version'): version = cp.get(section, 'activity_version') try: NormalizedVersion(version) except InvalidVersionError: raise MalformedBundleException( 'Activity bundle %s has invalid version number %s' % (self._path, version)) self._activity_version = version if cp.has_option(section, 'summary'): self._summary = cp.get(section, 'summary') if cp.has_option(section, 'single_instance'): if cp.get(section, 'single_instance') == 'yes': self._single_instance = True def _get_linfo_file(self): # Using method from gettext.py, first find languages from environ languages = [] for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): val = os.environ.get(envar) if val: languages = val.split(':') break # Next, normalize and expand the languages nelangs = [] for lang in languages: for nelang in _expand_lang(lang): if nelang not in nelangs: nelangs.append(nelang) # Finally, select a language for lang in nelangs: linfo_path = os.path.join('locale', lang, 'activity.linfo') linfo_file = self.get_file(linfo_path) if linfo_file is not None: return linfo_file return None def _parse_linfo(self, linfo_file): cp = ConfigParser() cp.readfp(linfo_file) section = 'Activity' if cp.has_option(section, 'name'): self._name = cp.get(section, 'name') if cp.has_option(section, 'summary'): self._summary = cp.get(section, 'summary') if cp.has_option(section, 'tags'): tag_list = cp.get(section, 'tags').strip(';') self._tags = [tag.strip() for tag in tag_list.split(';')] def get_locale_path(self): """Get the locale path inside the (installed) activity bundle.""" if self._zip_file is not None: raise NotInstalledException return os.path.join(self._path, 'locale') def get_icons_path(self): """Get the icons path inside the (installed) activity bundle.""" if self._zip_file is not None: raise NotInstalledException return os.path.join(self._path, 'icons') def get_name(self): """Get the activity user-visible name.""" return self._name def get_bundle_id(self): """Get the activity bundle id""" return self._bundle_id def get_icon(self): """Get the activity icon name""" # FIXME: this should return the icon data, not a filename, so that # we don't need to create a temp file in the zip case icon_path = os.path.join('activity', self._icon + '.svg') if self._zip_file is None: return os.path.join(self._path, icon_path) else: icon_data = self.get_file(icon_path).read() temp_file, temp_file_path = tempfile.mkstemp(prefix=self._icon, suffix='.svg') os.write(temp_file, icon_data) os.close(temp_file) return temp_file_path def get_activity_version(self): """Get the activity version""" return self._activity_version def get_command(self): """Get the command to execute to launch the activity factory""" if self.bundle_exec: command = os.path.expandvars(self.bundle_exec) else: command = 'sugar-activity ' + self.activity_class return command def get_mime_types(self): """Get the MIME types supported by the activity""" return self._mime_types def get_tags(self): """Get the tags that describe the activity""" return self._tags def get_summary(self): """Get the summary that describe the activity""" return self._summary def get_single_instance(self): """Get whether there should be a single instance for the activity""" return self._single_instance def get_show_launcher(self): """Get whether there should be a visible launcher for the activity""" return self._show_launcher def install(self): install_dir = env.get_user_activities_path() self._unzip(install_dir) install_path = os.path.join(install_dir, self._zip_root_dir) self.install_mime_type(install_path) return install_path def install_mime_type(self, install_path): """ Update the mime type database and install the mime type icon """ xdg_data_home = os.getenv('XDG_DATA_HOME', os.path.expanduser('~/.local/share')) mime_path = os.path.join(install_path, 'activity', 'mimetypes.xml') if os.path.isfile(mime_path): mime_dir = os.path.join(xdg_data_home, 'mime') mime_pkg_dir = os.path.join(mime_dir, 'packages') if not os.path.isdir(mime_pkg_dir): os.makedirs(mime_pkg_dir) installed_mime_path = os.path.join(mime_pkg_dir, '%s.xml' % self._bundle_id) self._symlink(mime_path, installed_mime_path) os.spawnlp(os.P_WAIT, 'update-mime-database', 'update-mime-database', mime_dir) mime_types = self.get_mime_types() if mime_types is not None: installed_icons_dir = \ os.path.join(xdg_data_home, 'icons/sugar/scalable/mimetypes') if not os.path.isdir(installed_icons_dir): os.makedirs(installed_icons_dir) for mime_type in mime_types: mime_icon_base = os.path.join(install_path, 'activity', mime_type.replace('/', '-')) svg_file = mime_icon_base + '.svg' info_file = mime_icon_base + '.icon' self._symlink(svg_file, os.path.join(installed_icons_dir, os.path.basename(svg_file))) self._symlink(info_file, os.path.join(installed_icons_dir, os.path.basename(info_file))) def _symlink(self, src, dst): if not os.path.isfile(src): return if not os.path.islink(dst) and os.path.exists(dst): raise RuntimeError('Do not remove %s if it was not ' 'installed by sugar', dst) logging.debug('Link resource %s to %s', src, dst) if os.path.lexists(dst): logging.debug('Relink %s', dst) os.unlink(dst) os.symlink(src, dst) def uninstall(self, force=False, delete_profile=False): install_path = self.get_path() if os.path.islink(install_path): # Don't remove the actual activity dir if it's a symbolic link # because we may be removing user data. os.unlink(install_path) return xdg_data_home = os.getenv('XDG_DATA_HOME', os.path.expanduser('~/.local/share')) mime_dir = os.path.join(xdg_data_home, 'mime') installed_mime_path = os.path.join(mime_dir, 'packages', '%s.xml' % self._bundle_id) if os.path.exists(installed_mime_path): os.remove(installed_mime_path) os.spawnlp(os.P_WAIT, 'update-mime-database', 'update-mime-database', mime_dir) mime_types = self.get_mime_types() if mime_types is not None: installed_icons_dir = \ os.path.join(xdg_data_home, 'icons/sugar/scalable/mimetypes') if os.path.isdir(installed_icons_dir): for f in os.listdir(installed_icons_dir): path = os.path.join(installed_icons_dir, f) if os.path.islink(path) and \ os.readlink(path).startswith(install_path): os.remove(path) if delete_profile: bundle_profile_path = env.get_profile_path(self._bundle_id) if os.path.exists(bundle_profile_path): os.chmod(bundle_profile_path, 0775) shutil.rmtree(bundle_profile_path, ignore_errors=True) self._uninstall(install_path) def is_user_activity(self): return self.get_path().startswith(env.get_user_activities_path())
lgpl-2.1
4,325,474,794,642,606,600
34.583333
78
0.560145
false
4.063149
false
false
false
uclouvain/osis
learning_unit/ddd/domain/description_fiche.py
1
2627
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## import datetime import attr @attr.s(slots=True) class DescriptionFiche: resume = attr.ib(type=str, default=None) resume_en = attr.ib(type=str, default=None) teaching_methods = attr.ib(type=str, default=None) teaching_methods_en = attr.ib(type=str, default=None) evaluation_methods = attr.ib(type=str, default=None) evaluation_methods_en = attr.ib(type=str, default=None) other_informations = attr.ib(type=str, default=None) other_informations_en = attr.ib(type=str, default=None) online_resources = attr.ib(type=str, default=None) online_resources_en = attr.ib(type=str, default=None) bibliography = attr.ib(type=str, default=None) mobility = attr.ib(type=str, default=None) last_update = attr.ib(type=datetime.datetime, default=None) author = attr.ib(type=str, default=None) @attr.s(slots=True) class DescriptionFicheForceMajeure: teaching_methods = attr.ib(type=str, default=None) teaching_methods_en = attr.ib(type=str, default=None) evaluation_methods = attr.ib(type=str, default=None) evaluation_methods_en = attr.ib(type=str, default=None) other_informations = attr.ib(type=str, default=None) other_informations_en = attr.ib(type=str, default=None) last_update = attr.ib(type=datetime.datetime, default=None) author = attr.ib(type=str, default=None)
agpl-3.0
-6,738,978,772,855,739,000
45.070175
87
0.680883
false
3.558266
false
false
false
Aplopio/document-converter
converters/utilities.py
1
4138
import sys import re import os import shutil import logging as log sys.path.append('..') from config import OUTPUT_FOLDER, UPLOAD_FOLDER PARENT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) TMP_DIR = os.path.join(PARENT_DIR, UPLOAD_FOLDER) from html_pdf import HtmlPdf from html_txt import HtmlTxt from pdf_html import PdfHtml from txt_html import TxtHtml from doc_pdf import DocPdf from ppt_pdf import PptPdf from rtf_pdf import RtfPdf from utils import get_file_extension from file_manager import FileManager AVAILABLE_CONVERTERS = [(HtmlPdf, 'htmlpdf'), (HtmlTxt, 'htmltxt'), (PdfHtml, 'pdfhtml'), (TxtHtml, 'txthtml'), (DocPdf, 'docpdf'), (PptPdf, 'pptpdf'), (RtfPdf, 'rtfpdf'), ] def class_selector(input_format, output_format, result=None): result = result or [] if input_format == output_format: return result direct_converter = get_direct_converter(input_format, output_format) if direct_converter: result.append(direct_converter) return result input_regex = make_regex(input_format) input_matches = get_input_matches(input_regex) for input_match in input_matches: converter, converter_expression = input_match intermediate_format = get_intermediate_format(converter_expression, input_format) result.append(input_match) converter_list = class_selector(intermediate_format, output_format, result) if converter_list: return converter_list else: result.pop() def get_intermediate_format(converter_expression, input_format): return re.sub(input_format, '', converter_expression) def get_input_matches(input_regex): return [(converter, expression) for converter, expression in AVAILABLE_CONVERTERS if input_regex.match(expression)] def make_regex(format_string): return re.compile('^%s'%format_string) def get_direct_converter(input_format, output_format): converter_expression = '%s%s'%(input_format, output_format) for converter, expression in AVAILABLE_CONVERTERS: if re.match(converter_expression, expression): return (converter, expression) def get_input_format(input_files_objects): sample_input_file = input_files_objects[0].get_input_file_path() input_format = get_file_extension(sample_input_file) return input_format def set_flags_of_file_objects(input_files_objects, output_files_objects): for input_file_object, output_file_object in zip(input_files_objects, output_files_objects): if (not output_file_object) or output_file_object == input_file_object: input_file_object.converted = False else: output_file_name = os.path.basename( output_file_object.get_input_file_path()) os.system('mv %s %s' % ( output_file_object.get_input_file_path(), OUTPUT_FOLDER)) input_file_object.set_output_file_path( os.path.join(OUTPUT_FOLDER, output_file_name)) input_file_object.converted = True return input_files_objects def get_files_objects(files_paths): files_objects = [] for file_path in files_paths: if file_path: file_object = FileManager(None, input_file_path=file_path) files_objects.append(file_object) else: files_objects.append(None) return files_objects def handle_failed_conversion(input_file): if not input_file or not os.path.isfile(input_file): return failed_conversion_dir = os.path.join(TMP_DIR, 'failed_conversions') if not os.path.isdir(failed_conversion_dir): os.makedirs(failed_conversion_dir) filename = os.path.basename(input_file) try: shutil.copyfile(input_file, os.path.join(failed_conversion_dir, filename)) except IOError, ie: log.error(ie)
mit
4,393,052,712,470,979,600
33.483333
79
0.639198
false
3.803309
false
false
false
codilime/veles
python/veles/scli/client.py
1
13837
# Copyright 2017 CodiLime # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import socket import ssl import msgpack from veles.proto import messages, msgpackwrap from veles.proto.messages import PROTO_VERSION from veles.schema import nodeid from veles.util import helpers class Client(object): def __init__(self, sock, key, name='scli', version='1.0', description='', type='scli', quit_on_close=False): self.sock = sock wrapper = msgpackwrap.MsgpackWrapper() self.unpacker = wrapper.unpacker self.packer = wrapper.packer self.client_name = name self.client_version = version self.client_description = description self.client_type = type self.quit_on_close = quit_on_close self._authorize(helpers.prepare_auth_key(key)) def _authorize(self, key): self.sock.sendall(key) self.send_msg(messages.MsgConnect( proto_version=PROTO_VERSION, client_name=self.client_name, client_version=self.client_version, client_description=self.client_description, client_type=self.client_type, quit_on_close=self.quit_on_close, )) pkt = self.getpkt() if isinstance(pkt, messages.MsgConnected): print('Connected to server: {}'.format(pkt.server_name)) elif isinstance(pkt, messages.MsgConnectionError): raise pkt.err else: print(pkt) raise Exception('weird reply when attempting to connect') def getpkt(self): while True: try: return messages.MsgpackMsg.load(self.unpacker.unpack()) except msgpack.OutOfData: pass data = self.sock.recv(1024) if not data: raise Exception("end of file") self.unpacker.feed(data) def send_msg(self, msg): self.sock.sendall(self.packer.pack(msg.dump())) def request(self, msg): self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgRequestAck) and pkt.rid == 0: return msg.id elif isinstance(pkt, messages.MsgRequestError) and pkt.rid == 0: raise pkt.err else: print(pkt) raise Exception('weird reply to request') def create(self, parent, tags=set(), attr={}, data={}, bindata={}, pos=(None, None)): msg = messages.MsgCreate( id=nodeid.NodeID(), parent=parent, pos_start=pos[0], pos_end=pos[1], tags=tags, attr=attr, data=data, bindata=bindata, rid=0, ) self.request(msg) return msg.id def delete(self, obj): msg = messages.MsgDelete( id=obj, rid=0 ) self.request(msg) def set_parent(self, obj, parent): msg = messages.MsgSetParent( id=obj, parent=parent, rid=0 ) self.request(msg) def set_pos(self, obj, start, end): msg = messages.MsgSetPos( id=obj, pos_start=start, pos_end=end, rid=0 ) self.request(msg) def add_tag(self, obj, tag): msg = messages.MsgAddTag( id=obj, tag=tag, rid=0 ) self.request(msg) def del_tag(self, obj, tag): msg = messages.MsgDelTag( id=obj, tag=tag, rid=0 ) self.request(msg) def set_attr(self, obj, key, data): msg = messages.MsgSetAttr( id=obj, key=key, data=data, rid=0 ) self.request(msg) def set_data(self, obj, key, data): msg = messages.MsgSetData( id=obj, rid=0, key=key, data=data, ) self.request(msg) def set_bindata(self, obj, key, start, data, truncate=False): msg = messages.MsgSetBinData( id=obj, rid=0, key=key, start=start, data=data, truncate=truncate, ) self.request(msg) def get(self, obj): msg = messages.MsgGet( id=obj, qid=0, ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0: return pkt.obj elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: raise Exception('weird reply to get') def get_sub(self, obj): msg = messages.MsgGet( id=obj, qid=0, sub=True, ) self.send_msg(msg) while True: pkt = self.getpkt() if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0: yield pkt.obj elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: raise Exception('weird reply to get') def get_data(self, obj, key): msg = messages.MsgGetData( id=obj, qid=0, key=key, ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0: return pkt.data elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: raise Exception('weird reply to get_data') def get_data_sub(self, obj, key): msg = messages.MsgGetData( id=obj, qid=0, key=key, sub=True ) self.send_msg(msg) while True: pkt = self.getpkt() if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0: yield pkt.data elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: raise Exception('weird reply to get_data') def get_bindata(self, obj, key, start=0, end=None): msg = messages.MsgGetBinData( id=obj, qid=0, key=key, start=start, end=end, ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0: return pkt.data elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: raise Exception('weird reply to get_bindata') def get_bindata_sub(self, obj, key, start=0, end=None): msg = messages.MsgGetBinData( id=obj, qid=0, key=key, start=start, end=end, sub=True, ) self.send_msg(msg) while True: pkt = self.getpkt() if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0: yield pkt.data elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: raise Exception('weird reply to get_bindata') def list(self, obj): msg = messages.MsgGetList( qid=0, parent=obj, ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0: return pkt.objs elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: print(pkt) raise Exception('weird reply to list') def list_sub(self, obj): msg = messages.MsgGetList( qid=0, parent=obj, sub=True ) self.send_msg(msg) while True: pkt = self.getpkt() if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0: yield pkt elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: print(pkt) raise Exception('weird reply to list') def query(self, obj, sig, params, checks=None): params = sig.params.dump(params) msg = messages.MsgGetQuery( qid=0, node=obj, query=sig.name, params=params, trace=checks is not None ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0: if checks is not None: checks += pkt.checks return sig.result.load(pkt.result) elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: if checks is not None: checks += pkt.checks raise pkt.err else: print(pkt) raise Exception('weird reply to get_query') def query_sub(self, obj, sig, params, checks=None): params = sig.params.dump(params) msg = messages.MsgGetQuery( qid=0, node=obj, query=sig.name, params=params, trace=checks is not None, sub=True ) self.send_msg(msg) while True: pkt = self.getpkt() if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0: if checks is not None: checks += pkt.checks yield sig.result.load(pkt.result) elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: if checks is not None: checks += pkt.checks raise pkt.err else: print(pkt) raise Exception('weird reply to get_query') def run_method(self, obj, sig, params): params = sig.params.dump(params) msg = messages.MsgMethodRun( mid=0, node=obj, method=sig.name, params=params ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgMethodResult) and pkt.mid == 0: return sig.result.load(pkt.result) elif isinstance(pkt, messages.MsgMethodError) and pkt.mid == 0: raise pkt.err else: print(pkt) raise Exception('weird reply to run_method') def run_broadcast(self, sig, params): params = sig.params.dump(params) msg = messages.MsgBroadcastRun( bid=0, broadcast=sig.name, params=params ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgBroadcastResult) and pkt.bid == 0: return [sig.result.load(result) for result in pkt.results] else: print(pkt) raise Exception('weird reply to run_broadcast') def list_connections(self): msg = messages.MsgListConnections( qid=0, ) self.send_msg(msg) pkt = self.getpkt() if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0: return pkt.connections elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: print(pkt) raise Exception('weird reply to list_connections') def list_connections_sub(self): msg = messages.MsgListConnections( qid=0, sub=True ) self.send_msg(msg) while True: pkt = self.getpkt() if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0: yield pkt elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0: raise pkt.err else: print(pkt) raise Exception('weird reply to list_connections') class UnixClient(Client): def __init__(self, path, key, **kwargs): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(path) super(UnixClient, self).__init__(sock, key, **kwargs) class TcpClient(Client): def __init__(self, ip, port, key, **kwargs): sock = socket.create_connection((ip, port)) super(TcpClient, self).__init__(sock, key, **kwargs) class SslClient(Client): def __init__(self, ip, port, key, fingerprint, **kwargs): sock = socket.create_connection((ip, port)) sc = ssl.SSLContext() sock = sc.wrap_socket(sock) cert = sock.getpeercert(True) helpers.validate_cert(cert, fingerprint) super(SslClient, self).__init__(sock, key, **kwargs) def create_client(url): url = helpers.parse_url(url) if url.scheme == helpers.UrlScheme.UNIX_SCHEME: return UnixClient(url.path, url.auth_key) elif url.scheme == helpers.UrlScheme.TCP_SCHEME: return TcpClient(url.host, url.port, url.auth_key) elif url.scheme == helpers.UrlScheme.SSL_SCHEME: return SslClient(url.host, url.port, url.auth_key, url.fingerprint) else: raise ValueError('Wrong scheme provided!')
apache-2.0
-1,839,075,806,865,339,100
30.094382
78
0.539206
false
3.984164
false
false
false
askin/GNazar
GNazar/gnazar.py
1
6314
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Licensed under GPL v2 # Copyright 2010, Aşkın Yollu <askin@askin.ws> # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Please read the COPYING file. # import pygtk import gtk import gettext import pynotify import time import os import sys import locale import random import platform gtk.gdk.threads_init() #Translation stuff localedir = "/usr/share/gnazar/locale" gettext.bindtextdomain('gnazar', localedir) gettext.textdomain('gnazar') sharedirs = '/usr/share' _ = gettext.gettext class GNazar(): def __init__(self): # create a new Status Icon self.gnazar = gtk.StatusIcon() self.gnazar.set_from_file( '%s/icons/hicolor/22x22/apps/gnazar-deactive.png' % sharedirs) self.gnazar.set_tooltip( _("GNazar - You are completely demilitarized...")) self.gnazar.set_visible(True) self.status = False # create menu self.menu = gtk.Menu() self.gnazar.connect("popup_menu", self.show_menu) # connect _quit = gtk.ImageMenuItem(gtk.STOCK_QUIT) _quit.connect("activate", self.destroy) _about = gtk.ImageMenuItem(gtk.STOCK_ABOUT) _about.connect("activate", self.show_about) _protect = gtk.ImageMenuItem(gtk.STOCK_OK) _protect.connect("activate", self.protect) _protect.set_label(_("Protect")) _release = gtk.ImageMenuItem(gtk.STOCK_CANCEL) _release.set_label(_("Release")) _release.connect("activate", self.release) # add to menu self.menu.add(_protect) self.menu.add(_release) self.menu.add(_about) self.menu.add(_quit) self.menu.show_all() # notification pynotify.init(_("GNazar Application")) # init attack self.total_attack = 0 self.defated_attack = 0 self.running = True import thread thread.start_new_thread(self._notification, ()) def main(self): # gtk main gtk.main() ''' show popup menu ''' def show_menu(self, status_icon, button, activate_time): self.menu.popup(None, None, gtk.status_icon_position_menu, button, activate_time, status_icon) # random notification def _notification(self): while(self.running): time.sleep(random.randrange(3600, 18000)) #time.sleep(4) # testing self.notification() ''' show about ''' def show_about(self, widget): about = gtk.AboutDialog() about.set_program_name("GNazar") about.set_icon_from_file("%s/icons/hicolor/22x22/apps/gnazar.png" % sharedirs) about.set_version("0.1") about.set_copyright("(c) Aşkın Yollu") # FIXME: make it generic (mac, bsd, win etc..) dist_name = platform.dist()[0] about.set_comments(_("GNazar is a useful part of the %s" % dist_name)) about.set_website("http://www.askin.ws") about.set_logo(gtk.gdk.pixbuf_new_from_file( "%s/icons/hicolor/32x32/apps/gnazar.png" % sharedirs)) about.set_translator_credits(_("TRANSLATORS")) about.set_artists([_("THANKSFORICONS")]) about.run() about.destroy() # destroy callback def destroy(self, widget): self.gnazar.set_visible(False) self.running = False gtk.main_quit() # popup callback def protect(self, widget): if self.status == False: dialog = gtk.MessageDialog( parent=None, flags=gtk.DIALOG_DESTROY_WITH_PARENT, type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK, message_format=_("GNazar is starting to protect your " "computer from harmful looks...") ) dialog.set_title(_("GNazar Application")) dialog.connect('response', self.dialog_destroyer) dialog.show() self.status = True self.gnazar.set_tooltip(_("GNazar - No harmful look allowed!")) self.gnazar.set_from_file("%s/icons/hicolor/22x22/apps/gnazar.png" % sharedirs) def release(self, widget): if self.status == True: dialog = gtk.MessageDialog( parent=None, flags=gtk.DIALOG_DESTROY_WITH_PARENT, type=gtk.MESSAGE_WARNING, buttons=gtk.BUTTONS_OK, message_format=_("GNazar is stopping to protect your computer" " from harmful looks...") ) dialog.set_title(_("GNazar Application")) dialog.connect('response', self.dialog_destroyer) dialog.show() self.status = False self.gnazar.set_tooltip( _("GNazar - You are completely demilitarized...")) self.gnazar.set_from_file( "%s/icons/hicolor/22x22/apps/gnazar-deactive.png" % sharedirs) def notification(self): self.total_attack += 1 if self.status == True: self.defated_attack += 1 title = _("Nazar eliminated") body = _("Nazar Received and eliminated successfuly") icon = "gtk-apply" else: title = _("Nazar harmed") body = _("Nazar Received and it HARMED!") icon = "dialog-warning" self.gnazar.set_tooltip( _("GNazar - %s attacks received so far, %s" " are defated and %s are received...") % (self.total_attack, self.defated_attack, self.total_attack - self.defated_attack)) notify = pynotify.Notification(title, body, icon) notify.set_urgency(pynotify.URGENCY_NORMAL) notify.set_timeout(pynotify.EXPIRES_NEVER) notify.show() def dialog_destroyer(self, dialog, widget): dialog.destroy() def main(): si = GNazar() si.main()
gpl-2.0
1,321,648,834,766,344,000
31.525773
79
0.572266
false
3.778443
false
false
false
laurmurclar/mitmproxy
mitmproxy/tools/console/flowview.py
1
23737
import math import os import sys from functools import lru_cache from typing import Optional, Union # noqa import urwid from mitmproxy import contentviews from mitmproxy import exceptions from mitmproxy import export from mitmproxy import http from mitmproxy.net.http import Headers from mitmproxy.net.http import status_codes from mitmproxy.tools.console import common from mitmproxy.tools.console import flowdetailview from mitmproxy.tools.console import grideditor from mitmproxy.tools.console import searchable from mitmproxy.tools.console import signals from mitmproxy.tools.console import tabs class SearchError(Exception): pass def _mkhelp(): text = [] keys = [ ("A", "accept all intercepted flows"), ("a", "accept this intercepted flow"), ("b", "save request/response body"), ("C", "export flow to clipboard"), ("D", "duplicate flow"), ("d", "delete flow"), ("e", "edit request/response"), ("f", "load full body data"), ("m", "change body display mode for this entity\n(default mode can be changed in the options)"), (None, common.highlight_key("automatic", "a") + [("text", ": automatic detection")] ), (None, common.highlight_key("hex", "e") + [("text", ": Hex")] ), (None, common.highlight_key("html", "h") + [("text", ": HTML")] ), (None, common.highlight_key("image", "i") + [("text", ": Image")] ), (None, common.highlight_key("javascript", "j") + [("text", ": JavaScript")] ), (None, common.highlight_key("json", "s") + [("text", ": JSON")] ), (None, common.highlight_key("urlencoded", "u") + [("text", ": URL-encoded data")] ), (None, common.highlight_key("raw", "r") + [("text", ": raw data")] ), (None, common.highlight_key("xml", "x") + [("text", ": XML")] ), ("E", "export flow to file"), ("r", "replay request"), ("V", "revert changes to request"), ("v", "view body in external viewer"), ("w", "save all flows matching current view filter"), ("W", "save this flow"), ("x", "delete body"), ("z", "encode/decode a request/response"), ("tab", "next tab"), ("h, l", "previous tab, next tab"), ("space", "next flow"), ("|", "run script on this flow"), ("/", "search (case sensitive)"), ("n", "repeat search forward"), ("N", "repeat search backwards"), ] text.extend(common.format_keyvals(keys, key="key", val="text", indent=4)) return text help_context = _mkhelp() footer = [ ('heading_key', "?"), ":help ", ('heading_key', "q"), ":back ", ] class FlowViewHeader(urwid.WidgetWrap): def __init__(self, master: "mitmproxy.console.master.ConsoleMaster", f: http.HTTPFlow): self.master = master self.flow = f self._w = common.format_flow( f, False, extended=True, hostheader=self.master.options.showhost ) signals.flow_change.connect(self.sig_flow_change) def sig_flow_change(self, sender, flow): if flow == self.flow: self._w = common.format_flow( flow, False, extended=True, hostheader=self.master.options.showhost ) TAB_REQ = 0 TAB_RESP = 1 class FlowView(tabs.Tabs): highlight_color = "focusfield" def __init__(self, master, view, flow, tab_offset): self.master, self.view, self.flow = master, view, flow super().__init__( [ (self.tab_request, self.view_request), (self.tab_response, self.view_response), (self.tab_details, self.view_details), ], tab_offset ) self.show() self.last_displayed_body = None signals.flow_change.connect(self.sig_flow_change) def tab_request(self): if self.flow.intercepted and not self.flow.response: return "Request intercepted" else: return "Request" def tab_response(self): if self.flow.intercepted and self.flow.response: return "Response intercepted" else: return "Response" def tab_details(self): return "Detail" def view_request(self): return self.conn_text(self.flow.request) def view_response(self): return self.conn_text(self.flow.response) def view_details(self): return flowdetailview.flowdetails(self.view, self.flow) def sig_flow_change(self, sender, flow): if flow == self.flow: self.show() def content_view(self, viewmode, message): if message.raw_content is None: msg, body = "", [urwid.Text([("error", "[content missing]")])] return msg, body else: s = self.view.settings[self.flow] full = s.get((self.tab_offset, "fullcontents"), False) if full: limit = sys.maxsize else: limit = contentviews.VIEW_CUTOFF flow_modify_cache_invalidation = hash(( message.raw_content, message.headers.fields, getattr(message, "path", None), )) # we need to pass the message off-band because it's not hashable self._get_content_view_message = message return self._get_content_view(viewmode, limit, flow_modify_cache_invalidation) @lru_cache(maxsize=200) def _get_content_view(self, viewmode, max_lines, _): message = self._get_content_view_message self._get_content_view_message = None description, lines, error = contentviews.get_message_content_view( viewmode, message ) if error: signals.add_log(error, "error") # Give hint that you have to tab for the response. if description == "No content" and isinstance(message, http.HTTPRequest): description = "No request content (press tab to view response)" # If the users has a wide terminal, he gets fewer lines; this should not be an issue. chars_per_line = 80 max_chars = max_lines * chars_per_line total_chars = 0 text_objects = [] for line in lines: txt = [] for (style, text) in line: if total_chars + len(text) > max_chars: text = text[:max_chars - total_chars] txt.append((style, text)) total_chars += len(text) if total_chars == max_chars: break # round up to the next line. total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line) text_objects.append(urwid.Text(txt)) if total_chars == max_chars: text_objects.append(urwid.Text([ ("highlight", "Stopped displaying data after %d lines. Press " % max_lines), ("key", "f"), ("highlight", " to load all data.") ])) break return description, text_objects def viewmode_get(self): override = self.view.settings[self.flow].get( (self.tab_offset, "prettyview"), None ) return self.master.options.default_contentview if override is None else override def conn_text(self, conn): if conn: txt = common.format_keyvals( [(h + ":", v) for (h, v) in conn.headers.items(multi=True)], key = "header", val = "text" ) viewmode = self.viewmode_get() msg, body = self.content_view(viewmode, conn) cols = [ urwid.Text( [ ("heading", msg), ] ), urwid.Text( [ " ", ('heading', "["), ('heading_key', "m"), ('heading', (":%s]" % viewmode)), ], align="right" ) ] title = urwid.AttrWrap(urwid.Columns(cols), "heading") txt.append(title) txt.extend(body) else: txt = [ urwid.Text(""), urwid.Text( [ ("highlight", "No response. Press "), ("key", "e"), ("highlight", " and edit any aspect to add one."), ] ) ] return searchable.Searchable(self.view, txt) def set_method_raw(self, m): if m: self.flow.request.method = m signals.flow_change.send(self, flow = self.flow) def edit_method(self, m): if m == "e": signals.status_prompt.send( prompt = "Method", text = self.flow.request.method, callback = self.set_method_raw ) else: for i in common.METHOD_OPTIONS: if i[1] == m: self.flow.request.method = i[0].upper() signals.flow_change.send(self, flow = self.flow) def set_url(self, url): request = self.flow.request try: request.url = str(url) except ValueError: return "Invalid URL." signals.flow_change.send(self, flow = self.flow) def set_resp_status_code(self, status_code): try: status_code = int(status_code) except ValueError: return None self.flow.response.status_code = status_code if status_code in status_codes.RESPONSES: self.flow.response.reason = status_codes.RESPONSES[status_code] signals.flow_change.send(self, flow = self.flow) def set_resp_reason(self, reason): self.flow.response.reason = reason signals.flow_change.send(self, flow = self.flow) def set_headers(self, fields, conn): conn.headers = Headers(fields) signals.flow_change.send(self, flow = self.flow) def set_query(self, lst, conn): conn.query = lst signals.flow_change.send(self, flow = self.flow) def set_path_components(self, lst, conn): conn.path_components = lst signals.flow_change.send(self, flow = self.flow) def set_form(self, lst, conn): conn.urlencoded_form = lst signals.flow_change.send(self, flow = self.flow) def edit_form(self, conn): self.master.view_grideditor( grideditor.URLEncodedFormEditor( self.master, conn.urlencoded_form.items(multi=True), self.set_form, conn ) ) def edit_form_confirm(self, key, conn): if key == "y": self.edit_form(conn) def set_cookies(self, lst, conn): conn.cookies = lst signals.flow_change.send(self, flow = self.flow) def set_setcookies(self, data, conn): conn.cookies = data signals.flow_change.send(self, flow = self.flow) def edit(self, part): if self.tab_offset == TAB_REQ: message = self.flow.request else: if not self.flow.response: self.flow.response = http.HTTPResponse.make(200, b"") message = self.flow.response self.flow.backup() if message == self.flow.request and part == "c": self.master.view_grideditor( grideditor.CookieEditor( self.master, message.cookies.items(multi=True), self.set_cookies, message ) ) if message == self.flow.response and part == "c": self.master.view_grideditor( grideditor.SetCookieEditor( self.master, message.cookies.items(multi=True), self.set_setcookies, message ) ) if part == "r": # Fix an issue caused by some editors when editing a # request/response body. Many editors make it hard to save a # file without a terminating newline on the last line. When # editing message bodies, this can cause problems. For now, I # just strip the newlines off the end of the body when we return # from an editor. c = self.master.spawn_editor(message.get_content(strict=False) or b"") message.content = c.rstrip(b"\n") elif part == "f": if not message.urlencoded_form and message.raw_content: signals.status_prompt_onekey.send( prompt = "Existing body is not a URL-encoded form. Clear and edit?", keys = [ ("yes", "y"), ("no", "n"), ], callback = self.edit_form_confirm, args = (message,) ) else: self.edit_form(message) elif part == "h": self.master.view_grideditor( grideditor.HeaderEditor( self.master, message.headers.fields, self.set_headers, message ) ) elif part == "p": p = message.path_components self.master.view_grideditor( grideditor.PathEditor( self.master, p, self.set_path_components, message ) ) elif part == "q": self.master.view_grideditor( grideditor.QueryEditor( self.master, message.query.items(multi=True), self.set_query, message ) ) elif part == "u": signals.status_prompt.send( prompt = "URL", text = message.url, callback = self.set_url ) elif part == "m" and message == self.flow.request: signals.status_prompt_onekey.send( prompt = "Method", keys = common.METHOD_OPTIONS, callback = self.edit_method ) elif part == "o": signals.status_prompt.send( prompt = "Code", text = str(message.status_code), callback = self.set_resp_status_code ) elif part == "m" and message == self.flow.response: signals.status_prompt.send( prompt = "Message", text = message.reason, callback = self.set_resp_reason ) signals.flow_change.send(self, flow = self.flow) def view_flow(self, flow): signals.pop_view_state.send(self) self.master.view_flow(flow, self.tab_offset) def _view_nextprev_flow(self, idx, flow): if not self.view.inbounds(idx): signals.status_message.send(message="No more flows") return self.view_flow(self.view[idx]) def view_next_flow(self, flow): return self._view_nextprev_flow(self.view.index(flow) + 1, flow) def view_prev_flow(self, flow): return self._view_nextprev_flow(self.view.index(flow) - 1, flow) def change_this_display_mode(self, t): view = contentviews.get_by_shortcut(t) if view: self.view.settings[self.flow][(self.tab_offset, "prettyview")] = view.name else: self.view.settings[self.flow][(self.tab_offset, "prettyview")] = None signals.flow_change.send(self, flow=self.flow) def keypress(self, size, key): conn = None # type: Optional[Union[http.HTTPRequest, http.HTTPResponse]] if self.tab_offset == TAB_REQ: conn = self.flow.request elif self.tab_offset == TAB_RESP: conn = self.flow.response key = super().keypress(size, key) # Special case: Space moves over to the next flow. # We need to catch that before applying common.shortcuts() if key == " ": self.view_next_flow(self.flow) return key = common.shortcuts(key) if key in ("up", "down", "page up", "page down"): # Pass scroll events to the wrapped widget self._w.keypress(size, key) elif key == "a": self.flow.resume() self.master.view.update(self.flow) elif key == "A": for f in self.view: if f.intercepted: f.resume() self.master.view.update(self.flow) elif key == "d": if self.flow.killable: self.flow.kill() self.view.remove(self.flow) if not self.view.focus.flow: self.master.view_flowlist() else: self.view_flow(self.view.focus.flow) elif key == "D": cp = self.flow.copy() self.master.view.add(cp) self.master.view.focus.flow = cp self.view_flow(cp) signals.status_message.send(message="Duplicated.") elif key == "p": self.view_prev_flow(self.flow) elif key == "r": try: self.master.replay_request(self.flow) except exceptions.ReplayException as e: signals.add_log("Replay error: %s" % e, "warn") signals.flow_change.send(self, flow = self.flow) elif key == "V": if self.flow.modified(): self.flow.revert() signals.flow_change.send(self, flow = self.flow) signals.status_message.send(message="Reverted.") else: signals.status_message.send(message="Flow not modified.") elif key == "W": signals.status_prompt_path.send( prompt = "Save this flow", callback = self.master.save_one_flow, args = (self.flow,) ) elif key == "|": signals.status_prompt_path.send( prompt = "Send flow to script", callback = self.master.run_script_once, args = (self.flow,) ) elif key == "e": if self.tab_offset == TAB_REQ: signals.status_prompt_onekey.send( prompt="Edit request", keys=( ("cookies", "c"), ("query", "q"), ("path", "p"), ("url", "u"), ("header", "h"), ("form", "f"), ("raw body", "r"), ("method", "m"), ), callback=self.edit ) elif self.tab_offset == TAB_RESP: signals.status_prompt_onekey.send( prompt="Edit response", keys=( ("cookies", "c"), ("code", "o"), ("message", "m"), ("header", "h"), ("raw body", "r"), ), callback=self.edit ) else: signals.status_message.send( message="Tab to the request or response", expire=1 ) elif key in set("bfgmxvzEC") and not conn: signals.status_message.send( message = "Tab to the request or response", expire = 1 ) return elif key == "b": if self.tab_offset == TAB_REQ: common.ask_save_body("q", self.flow) else: common.ask_save_body("s", self.flow) elif key == "f": self.view.settings[self.flow][(self.tab_offset, "fullcontents")] = True signals.flow_change.send(self, flow = self.flow) signals.status_message.send(message="Loading all body data...") elif key == "m": p = list(contentviews.view_prompts) p.insert(0, ("Clear", "C")) signals.status_prompt_onekey.send( self, prompt = "Display mode", keys = p, callback = self.change_this_display_mode ) elif key == "E": if self.tab_offset == TAB_REQ: scope = "q" else: scope = "s" signals.status_prompt_onekey.send( self, prompt = "Export to file", keys = [(e[0], e[1]) for e in export.EXPORTERS], callback = common.export_to_clip_or_file, args = (scope, self.flow, common.ask_save_path) ) elif key == "C": if self.tab_offset == TAB_REQ: scope = "q" else: scope = "s" signals.status_prompt_onekey.send( self, prompt = "Export to clipboard", keys = [(e[0], e[1]) for e in export.EXPORTERS], callback = common.export_to_clip_or_file, args = (scope, self.flow, common.copy_to_clipboard_or_prompt) ) elif key == "x": conn.content = None signals.flow_change.send(self, flow=self.flow) elif key == "v": if conn.raw_content: t = conn.headers.get("content-type") if "EDITOR" in os.environ or "PAGER" in os.environ: self.master.spawn_external_viewer(conn.get_content(strict=False), t) else: signals.status_message.send( message = "Error! Set $EDITOR or $PAGER." ) elif key == "z": self.flow.backup() e = conn.headers.get("content-encoding", "identity") if e != "identity": try: conn.decode() except ValueError: signals.status_message.send( message = "Could not decode - invalid data?" ) else: signals.status_prompt_onekey.send( prompt = "Select encoding: ", keys = ( ("gzip", "z"), ("deflate", "d"), ("brotli", "b"), ), callback = self.encode_callback, args = (conn,) ) signals.flow_change.send(self, flow = self.flow) else: # Key is not handled here. return key def encode_callback(self, key, conn): encoding_map = { "z": "gzip", "d": "deflate", "b": "br", } conn.encode(encoding_map[key]) signals.flow_change.send(self, flow = self.flow)
mit
642,012,547,737,181,300
33.551674
104
0.487256
false
4.237993
false
false
false
kuzmoyev/Google-Calendar-Simple-API
tests/test_attachment.py
1
3788
from unittest import TestCase from gcsa.attachment import Attachment from gcsa.serializers.attachment_serializer import AttachmentSerializer DOC_URL = 'https://docs.google.com/document/d/1uDvwcxOsXkzl2Bod0YIfrIQ5MqfBhnc1jusYdH1xCZo/edit?usp=sharing' class TestAttachment(TestCase): def test_create(self): attachment = Attachment('My doc', file_url=DOC_URL, mime_type="application/vnd.google-apps.document") self.assertEqual(attachment.title, 'My doc') with self.assertRaises(ValueError): Attachment('My doc', file_url=DOC_URL, mime_type="application/vnd.google-apps.something") class TestAttachmentSerializer(TestCase): def test_to_json(self): attachment = Attachment('My doc', file_url=DOC_URL, mime_type="application/vnd.google-apps.document") attachment_json = { 'title': 'My doc', 'fileUrl': DOC_URL, 'mimeType': "application/vnd.google-apps.document" } self.assertDictEqual(AttachmentSerializer.to_json(attachment), attachment_json) attachment = Attachment('My doc2', file_url=DOC_URL, mime_type="application/vnd.google-apps.drawing", icon_link="https://some_link.com", file_id='abc123') attachment_json = { 'title': 'My doc2', 'fileUrl': DOC_URL, 'mimeType': "application/vnd.google-apps.drawing", 'iconLink': "https://some_link.com", 'fileId': 'abc123' } serializer = AttachmentSerializer(attachment) self.assertDictEqual(serializer.get_json(), attachment_json) def test_to_object(self): attachment_json = { 'title': 'My doc', 'fileUrl': DOC_URL, 'mimeType': "application/vnd.google-apps.document" } attachment = AttachmentSerializer.to_object(attachment_json) self.assertEqual(attachment.title, 'My doc') self.assertEqual(attachment.file_url, DOC_URL) self.assertEqual(attachment.mime_type, "application/vnd.google-apps.document") self.assertIsNone(attachment.icon_link) self.assertIsNone(attachment.file_id) attachment_json = { 'title': 'My doc2', 'fileUrl': DOC_URL, 'mimeType': "application/vnd.google-apps.drawing", 'iconLink': "https://some_link.com", 'fileId': 'abc123' } serializer = AttachmentSerializer(attachment_json) attachment = serializer.get_object() self.assertEqual(attachment.title, 'My doc2') self.assertEqual(attachment.file_url, DOC_URL) self.assertEqual(attachment.mime_type, "application/vnd.google-apps.drawing") self.assertEqual(attachment.icon_link, "https://some_link.com") self.assertEqual(attachment.file_id, 'abc123') attachment_json_str = """{ "title": "My doc3", "fileUrl": "%s", "mimeType": "application/vnd.google-apps.drawing", "iconLink": "https://some_link.com", "fileId": "abc123" } """ % DOC_URL attachment = AttachmentSerializer.to_object(attachment_json_str) self.assertEqual(attachment.title, 'My doc3') self.assertEqual(attachment.file_url, DOC_URL) self.assertEqual(attachment.mime_type, "application/vnd.google-apps.drawing") self.assertEqual(attachment.icon_link, "https://some_link.com") self.assertEqual(attachment.file_id, 'abc123')
mit
5,226,027,302,797,445,000
38.873684
108
0.587381
false
4.029787
true
false
false
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_web_application_firewall_policies_operations.py
1
20908
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class WebApplicationFirewallPoliciesOperations(object): """WebApplicationFirewallPoliciesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_04_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"] """Lists all of the protection policies within a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore def list_all( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"] """Gets all the WAF policies in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_all.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore def get( self, resource_group_name, # type: str policy_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.WebApplicationFirewallPolicy" """Retrieve protection policy with specified name within a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param policy_name: The name of the policy. :type policy_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: WebApplicationFirewallPolicy, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore def create_or_update( self, resource_group_name, # type: str policy_name, # type: str parameters, # type: "_models.WebApplicationFirewallPolicy" **kwargs # type: Any ): # type: (...) -> "_models.WebApplicationFirewallPolicy" """Creates or update policy with specified rule set name within a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param policy_name: The name of the policy. :type policy_name: str :param parameters: Policy to be created. :type parameters: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy :keyword callable cls: A custom type or function that will be passed the direct response :return: WebApplicationFirewallPolicy, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore def _delete_initial( self, resource_group_name, # type: str policy_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str policy_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes Policy. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param policy_name: The name of the policy. :type policy_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the ARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, policy_name=policy_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
mit
-6,169,865,617,561,209,000
48.079812
215
0.643486
false
4.363105
true
false
false
abhinavsingh/proxy.py
examples/websocket_client.py
1
1465
# -*- coding: utf-8 -*- """ proxy.py ~~~~~~~~ ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on Network monitoring, controls & Application development, testing, debugging. :copyright: (c) 2013-present by Abhinav Singh and contributors. :license: BSD, see LICENSE for more details. """ import time from proxy.http.websocket import WebsocketClient, WebsocketFrame, websocketOpcodes # globals client: WebsocketClient last_dispatch_time: float static_frame = memoryview(WebsocketFrame.text(b'hello')) num_echos = 10 def on_message(frame: WebsocketFrame) -> None: """WebsocketClient on_message callback.""" global client, num_echos, last_dispatch_time print('Received %r after %d millisec' % (frame.data, (time.time() - last_dispatch_time) * 1000)) assert(frame.data == b'hello' and frame.opcode == websocketOpcodes.TEXT_FRAME) if num_echos > 0: client.queue(static_frame) last_dispatch_time = time.time() num_echos -= 1 else: client.close() if __name__ == '__main__': # Constructor establishes socket connection client = WebsocketClient( b'echo.websocket.org', 80, b'/', on_message=on_message) # Perform handshake client.handshake() # Queue some data for client client.queue(static_frame) last_dispatch_time = time.time() # Start event loop client.run()
bsd-3-clause
-4,967,191,948,487,152,000
28.18
86
0.654558
false
3.665829
false
false
false
sebalas/fake-useragent
fake_useragent/utils.py
1
2970
import os import re from . import settings try: # Python 2 from urllib import urlopen, quote_plus except ImportError: # Python 3 from urllib.request import urlopen from urllib.parse import quote_plus try: import json except ImportError: import simplejson as json def get(url, annex=None): if annex is not None: url = url % (quote_plus(annex), ) return urlopen(url).read() def get_browsers(): """ very very hardcoded/dirty re/split stuff, but no dependencies """ html = get(settings.BROWSERS_STATS_PAGE) html = html.decode('windows-1252') html = html.split('<table class="reference notranslate">')[1] html = html.split('</table>')[0] browsers = re.findall(r'\.asp">(.+?)<', html, re.UNICODE) for value, override in settings.OVERRIDES: browsers = [ value if browser == override else browser for browser in browsers ] browsers_statistics = re.findall( r'td\sclass="right">(.+?)\s', html, re.UNICODE ) # TODO: ensure encoding return list(zip(browsers, browsers_statistics)) def get_browser_versions(browser): """ very very hardcoded/dirty re/split stuff, but no dependencies """ html = get(settings.BROWSER_BASE_PAGE, browser) html = html.decode('iso-8859-1') html = html.split('<div id=\'liste\'>')[1] html = html.split('</div>')[0] browsers_iter = re.finditer(r'\.php\'>(.+?)</a', html, re.UNICODE) count = 0 browsers = [] for browser in browsers_iter: if 'more' in browser.group(1).lower(): continue # TODO: ensure encoding browsers.append(browser.group(1)) count += 1 if count == settings.BROWSERS_COUNT_LIMIT: break return browsers def load(): browsers_dict = {} randomize_dict = {} for item in get_browsers(): browser, percent = item browser_key = browser for replacement in settings.REPLACEMENTS: browser_key = browser_key.replace(replacement, '') browser_key = browser_key.lower() browsers_dict[browser_key] = get_browser_versions(browser) for counter in range(int(float(percent))): randomize_dict[str(len(randomize_dict))] = browser_key db = {} db['browsers'] = browsers_dict db['randomize'] = randomize_dict return db def write(data): data = json.dumps(data, ensure_ascii=False) # no codecs\with for python 2.5 f = open(settings.DB, 'w+') f.write(data) f.close() def read(): # no codecs\with for python 2.5 f = open(settings.DB, 'r') data = f.read() f.close() return json.loads(data) def exist(): return os.path.isfile(settings.DB) def rm(): if exist(): os.remove(settings.DB) def update(): if exist(): rm() write(load()) def load_cached(): if not exist(): update() return read()
apache-2.0
6,406,799,067,328,781,000
19.915493
70
0.6
false
3.69863
false
false
false
Daeinar/norx-py
norx.py
1
7942
""" Python2 implementation of NORX. ------ :author: Philipp Jovanovic <philipp@jovanovic.io>, 2014-2015. :license: CC0, see LICENSE for more details. """ from struct import pack, unpack class NORX(object): def __init__(self, w=64, r=4, d=1, t=256): assert w in [32, 64] assert r >= 1 assert d >= 0 assert 10 * w >= t >= 0 self.NORX_W = w self.NORX_R = r self.NORX_D = d self.NORX_T = t self.NORX_N = w * 2 self.NORX_K = w * 4 self.NORX_B = w * 16 self.NORX_C = w * 6 self.RATE = self.NORX_B - self.NORX_C self.HEADER_TAG = 1 << 0 self.PAYLOAD_TAG = 1 << 1 self.TRAILER_TAG = 1 << 2 self.FINAL_TAG = 1 << 3 self.BRANCH_TAG = 1 << 4 self.MERGE_TAG = 1 << 5 self.BYTES_WORD = w / 8 self.BYTES_TAG = t / 8 self.WORDS_RATE = self.RATE / w self.BYTES_RATE = self.WORDS_RATE * self.BYTES_WORD if w == 32: self.R = (8, 11, 16, 31) self.U = (0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0x254F537A, 0x38531D48, 0x839C6E83, 0xF97A3AE5, 0x8C91D88C, 0x11EAFB59) self.M = 0xffffffff self.fmt = '<L' elif w == 64: self.R = (8, 19, 40, 63) self.U = (0x243F6A8885A308D3, 0x13198A2E03707344, 0xA4093822299F31D0, 0x082EFA98EC4E6C89, 0xAE8858DC339325A1, 0x670A134EE52D7FA6, 0xC4316D80CD967541, 0xD21DFBF8B630B762, 0x375A18D261E7F892, 0x343D1F187D92285B) self.M = 0xffffffffffffffff self.fmt = '<Q' def load(self, x): return unpack(self.fmt, x)[0] def store(self, x): return pack(self.fmt, x) def ROTR(self, a, r): return ((a >> r) | (a << (self.NORX_W - r))) & self.M def H(self, a, b): return ((a ^ b) ^ ((a & b) << 1)) & self.M def G(self, a, b, c, d): a = self.H(a, b) d = self.ROTR(a ^ d, self.R[0]) c = self.H(c, d) b = self.ROTR(b ^ c, self.R[1]) a = self.H(a, b) d = self.ROTR(a ^ d, self.R[2]) c = self.H(c, d) b = self.ROTR(b ^ c, self.R[3]) return a, b, c, d def F(self, S): # Column step S[0], S[4], S[8], S[12] = self.G(S[0], S[4], S[8], S[12]) S[1], S[5], S[9], S[13] = self.G(S[1], S[5], S[9], S[13]) S[2], S[6], S[10], S[14] = self.G(S[2], S[6], S[10], S[14]) S[3], S[7], S[11], S[15] = self.G(S[3], S[7], S[11], S[15]) # Diagonal step S[0], S[5], S[10], S[15] = self.G(S[0], S[5], S[10], S[15]) S[1], S[6], S[11], S[12] = self.G(S[1], S[6], S[11], S[12]) S[2], S[7], S[8], S[13] = self.G(S[2], S[7], S[8], S[13]) S[3], S[4], S[9], S[14] = self.G(S[3], S[4], S[9], S[14]) def permute(self, S): for i in xrange(self.NORX_R): self.F(S) def pad(self, x): y = bytearray(self.BYTES_RATE) y[:len(x)] = x y[len(x)] = 0x01 y[self.BYTES_RATE-1] |= 0x80 return y def init(self, S, n, k): b = self.BYTES_WORD K = [self.load(k[b*i:b*(i+1)]) for i in xrange(self.NORX_K / self.NORX_W)] N = [self.load(n[b*i:b*(i+1)]) for i in xrange(self.NORX_N / self.NORX_W)] U = self.U S[0], S[1], S[2], S[3] = U[0], N[0], N[1], U[1] S[4], S[5], S[6], S[7] = K[0], K[1], K[2], K[3] S[8], S[9], S[10], S[11] = U[2], U[3], U[4], U[5] S[12], S[13], S[14], S[15] = U[6], U[7], U[8], U[9] S[12] ^= self.NORX_W S[13] ^= self.NORX_R S[14] ^= self.NORX_D S[15] ^= self.NORX_T self.permute(S) def inject_tag(self, S, tag): S[15] ^= tag def process_header(self, S, x): return self.absorb_data(S, x, self.HEADER_TAG) def process_trailer(self, S, x): return self.absorb_data(S, x, self.TRAILER_TAG) def absorb_data(self, S, x, tag): inlen = len(x) if inlen > 0: i, n = 0, self.BYTES_RATE while inlen >= n: self.absorb_block(S, x[n*i:n*(i+1)], tag) inlen -= n i += 1 self.absorb_lastblock(S, x[n*i:n*i+inlen], tag) def absorb_block(self, S, x, tag): b = self.BYTES_WORD self.inject_tag(S, tag) self.permute(S) for i in xrange(self.WORDS_RATE): S[i] ^= self.load(x[b*i:b*(i+1)]) def absorb_lastblock(self, S, x, tag): y = self.pad(x) self.absorb_block(S, y, tag) def encrypt_data(self, S, x): c = bytearray() inlen = len(x) if inlen > 0: i, n = 0, self.BYTES_RATE while inlen >= n: c += self.encrypt_block(S, x[n*i:n*(i+1)]) inlen -= n i += 1 c += self.encrypt_lastblock(S, x[n*i:n*i+inlen]) return c def encrypt_block(self, S, x): c = bytearray() b = self.BYTES_WORD self.inject_tag(S, self.PAYLOAD_TAG) self.permute(S) for i in xrange(self.WORDS_RATE): S[i] ^= self.load(x[b*i:b*(i+1)]) c += self.store(S[i]) return c[:self.BYTES_RATE] def encrypt_lastblock(self, S, x): y = self.pad(x) c = self.encrypt_block(S, y) return c[:len(x)] def decrypt_data(self, S, x): m = bytearray() inlen = len(x) if inlen > 0: i, n = 0, self.BYTES_RATE while inlen >= n: m += self.decrypt_block(S, x[n*i:n*(i+1)]) inlen -= n i += 1 m += self.decrypt_lastblock(S, x[n*i:n*i+inlen]) return m def decrypt_block(self, S, x): m = bytearray() b = self.BYTES_WORD self.inject_tag(S, self.PAYLOAD_TAG) self.permute(S) for i in xrange(self.WORDS_RATE): c = self.load(x[b*i:b*(i+1)]) m += self.store(S[i] ^ c) S[i] = c return m[:self.BYTES_RATE] def decrypt_lastblock(self, S, x): m = bytearray() y = bytearray() b = self.BYTES_WORD self.inject_tag(S, self.PAYLOAD_TAG) self.permute(S) for i in xrange(self.WORDS_RATE): y += self.store(S[i]) y[:len(x)] = bytearray(x) y[len(x)] ^= 0x01 y[self.BYTES_RATE-1] ^= 0x80 for i in xrange(self.WORDS_RATE): c = self.load(y[b*i:b*(i+1)]) m += self.store(S[i] ^ c) S[i] = c return m[:len(x)] def generate_tag(self, S): t = bytearray() self.inject_tag(S, self.FINAL_TAG) self.permute(S) self.permute(S) for i in xrange(self.WORDS_RATE): t += self.store(S[i]) return t[:self.BYTES_TAG] def verify_tag(self, t0, t1): acc = 0 for i in xrange(self.BYTES_TAG): acc |= t0[i] ^ t1[i] return (((acc - 1) >> 8) & 1) - 1 def aead_encrypt(self, h, m, t, n, k): assert len(k) == self.NORX_K / 8 assert len(n) == self.NORX_N / 8 c = bytearray() S = [0] * 16 self.init(S, n, k) self.process_header(S, h) c += self.encrypt_data(S, m) self.process_trailer(S, t) c += self.generate_tag(S) return str(c) def aead_decrypt(self, h, c, t, n, k): assert len(k) == self.NORX_K / 8 assert len(n) == self.NORX_N / 8 assert len(c) >= self.BYTES_TAG m = bytearray() c = bytearray(c) S = [0] * 16 d = len(c)-self.BYTES_TAG c, t0 = c[:d], c[d:] self.init(S, n, k) self.process_header(S, h) m += self.decrypt_data(S, c) self.process_trailer(S, t) t1 = self.generate_tag(S) if self.verify_tag(t0, t1) != 0: m = '' return str(m)
cc0-1.0
-3,776,232,993,985,714,000
30.515873
121
0.467137
false
2.640293
false
false
false
timfreund/pycontrol-shed
pycontrolshed/model.py
1
13233
# Copyright (C) 2011 Tim Freund and contributors. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from functools import wraps from pycontrol import pycontrol import logging import pycontrolshed import socket # In [1]: route_domains = bigip.Networking.RouteDomain.get_list() # In [2]: route_domains # Out[2]: [2220L] log = logging.getLogger('pycontrolshed.model') def partitioned(f): @wraps(f) def wrapper(self, *args, **kwargs): partition = kwargs.get('partition', None) if partition: orig_partition = self.bigip.Management.Partition.get_active_partition() self.bigip.active_partition = partition rc = f(self, *args, **kwargs) self.bigip.active_partition = orig_partition return rc else: return f(self, *args, **kwargs) return wrapper class NodeAssistant(object): def __init__(self, bigip): self.bigip = bigip def disable(self, nodes, partition=None): self.enable_disable_nodes(nodes, 'STATE_DISABLED', partition=partition) def enable(self, nodes, partition=None): self.enable_disable_nodes(nodes, 'STATE_ENABLED', partition=partition) @partitioned def enable_disable_nodes(self, nodes, target_state, partition=None): if isinstance(nodes, basestring): nodes = [nodes] targets = [] states = [] for node in nodes: targets.append(self.bigip.host_to_node(node)) states.append(target_state) self.bigip.LocalLB.NodeAddress.set_session_enabled_state(node_addresses=targets, states=states) return self.status(nodes) @partitioned def status(self, nodes, partition=None): if isinstance(nodes, basestring): nodes = [nodes] targets = [self.bigip.host_to_node(node) for node in nodes] statuses = self.bigip.LocalLB.NodeAddress.get_session_enabled_state(node_addresses=targets) rc = [] for node, status in zip(targets, statuses): rc.append({'node': node, 'fqdn': self.bigip.node_to_host(node), 'status': status}) return rc class VirtualAssistant(object): def __init__(self, bigip): self.bigip = bigip @partitioned def servers(self, partition=None): return self.bigip.LocalLB.VirtualServer.get_list() @partitioned def all_server_statistics(self, partition=None): return self.bigip.LocalLB.VirtualServer.get_all_statistics() @partitioned def addresses(self, partition=None): return self.bigip.LocalLB.VirtualAddress.get_list() @partitioned def all_address_statistics(self, partition=None): return self.bigip.LocalLB.VirtualAddress.get_all_statistics() class PoolAssistant(object): def __init__(self, bigip): self.bigip = bigip def create_type(self, type_name): return self.bigip.LocalLB.PoolMember.typefactory.create(type_name) @partitioned def pools(self, partition=None): return self.bigip.LocalLB.Pool.get_list() @partitioned def members(self, pools, partition=None): if isinstance(pools, basestring): pools = [pools] session_status_list = self.bigip.LocalLB.PoolMember.get_session_enabled_state(pools) monitor_status_list = self.bigip.LocalLB.PoolMember.get_monitor_status(pools) rc = {} for pool, sessions, monitors in zip(pools, session_status_list, monitor_status_list): members = [] for session, monitor in zip(sessions, monitors): members.append({'address': session.member.address, 'port': session.member.port, 'monitor': monitor, 'session': session}) rc[pool] = {'members': members} return rc @partitioned def multi_member_statistics(self, pools, members, partition=None): seq_members = [] ippd_seq_seq = self.create_type('Common.IPPortDefinitionSequenceSequence') ippd_seq_seq.item = seq_members empty_pools = [] if isinstance(members, list): pass elif isinstance(members, dict): mlist = [] for k in pools: if len(members[k]['members']) == 0: empty_pools.append(k) else: mlist.append(members[k]['members']) for ep in empty_pools: pools.remove(ep) members = mlist for member_list in members: seq_members.append(self.pool_members_to_ippd_seq(member_list)) stats = self.bigip.LocalLB.PoolMember.get_statistics(pool_names=pools, members=ippd_seq_seq) rc = {} for p, s in zip(pools, stats): s = self.collapse_member_statistics(s) rc[p] = s return rc @partitioned def member_statistics(self, pool, member, partition=None): # TODO refactor this to be a special case of multi_member_statistics pools = [pool] if isinstance(member, basestring): ipp_member = self.bigip.host_port_to_ipportdef(*member.split(':')) member = ipp_member ippd_seq_seq = self.create_type('Common.IPPortDefinitionSequenceSequence') ippd_seq = self.create_type('Common.IPPortDefinitionSequence') ippd_seq_seq.item = ippd_seq ippd_seq.item = member # this is kind of garbage too... see TODO above stats = self.bigip.LocalLB.PoolMember.get_statistics(pool_names=pools, members=ippd_seq_seq)[0].statistics[0] return stats def disable_member(self, pool_name, members, partition=None): return self.enable_disable_members(pool_name, members, 'STATE_DISABLED', partition=partition) def enable_member(self, pool_name, members, partition=None): return self.enable_disable_members(pool_name, members, 'STATE_ENABLED', partition=partition) @partitioned def enable_disable_members(self, pool_name, members, target_state, partition=None): pools = [pool_name] if isinstance(members, basestring) or members.__class__.__name__.count('IPPortDefinition'): members = [members] session_states = self.create_type('LocalLB.PoolMember.MemberSessionStateSequence') session_states.item = [] for member in members: if isinstance(member, basestring): ipp_member = self.bigip.host_port_to_ipportdef(*member.split(':')) member = ipp_member state = self.create_type('LocalLB.PoolMember.MemberSessionState') state.member = member state.session_state = target_state session_states.item.append(state) self.bigip.LocalLB.PoolMember.set_session_enabled_state(pool_names=pools, session_states=[session_states]) return self.members(pools, partition=partition) def pool_members_to_ippd_seq(self, members): ippd_seq = self.create_type('Common.IPPortDefinitionSequence') ippd_members = [] ippd_seq.item = ippd_members for member in members: address = None port = None if isinstance(member, dict): address = member['address'] port = member['port'] elif isinstance(member, basestring): address, port = member.split(':') else: raise Exception("Unknown member type") ippd_members.append(self.bigip.host_port_to_ipportdef(address, port)) return ippd_seq def collapse_member_statistics(self, pool_stats): stats = {} # LocalLB.PoolMember.MemberStatisticEntry for mse in pool_stats.statistics: member_id = "%s:%d" % (mse.member.address, mse.member.port) stats[member_id] = {} for stat in mse.statistics: stats[member_id][stat.type] = {'high': stat.value.high, 'low': stat.value.low} return stats class PyCtrlShedBIGIP(pycontrol.BIGIP): def __init__(self, *args, **kwargs): pycontrol.BIGIP.__init__(self, *args, **kwargs) self.nodes = NodeAssistant(self) self.pools = PoolAssistant(self) self.virtual = VirtualAssistant(self) self._active_partition = None @property def active_partition(self): if self._active_partition: return self._active_partition self._active_partition = str(self.Management.Partition.get_active_partition()) return self._active_partition @active_partition.setter def active_partition(self, partition): self.Management.Partition.set_active_partition(partition) self._active_partition = partition self._route_domains = self.Networking.RouteDomain.get_list() def host_port_to_ipportdef(self, host, port): ipp = self.LocalLB.PoolMember.typefactory.create('Common.IPPortDefinition') ipp.address = self.host_to_node(host) ipp.port = int(port) return ipp def host_to_node(self, host): # If someone provides us with a route domain, we're going to trust # that they know what route domain to use. if host.count('%'): host, route_domain = host.split('%', 1) return "%s%%%s" % (socket.gethostbyname(host), route_domain) node = socket.gethostbyname(host) if (len(self.route_domains) == 1) and self.route_domains[0] != 0: node += "%%%d" % self.route_domains[0] return node def node_to_ip(self, node): if node.count('%'): return node.split('%')[0] return node def node_to_host(self, node): return socket.getfqdn(self.node_to_ip(node)) @property def route_domains(self): if hasattr(self, '_route_domains'): return self._route_domains self._route_domains = self.Networking.RouteDomain.get_list() return self._route_domains @property def partitions(self): partitions = [] for partition in self.Management.Partition.get_partition_list(): partitions.append({ 'name': partition['partition_name'], 'description': partition["description"] }) return partitions class Environment(object): def __init__(self, name, hosts=[], wsdls=None, username=None): self.name = name self.hosts = hosts self.bigips = {} self.username = username self.wsdls = wsdls if self.wsdls is None: self.wsdls = [ 'LocalLB.NodeAddress', 'LocalLB.Pool', 'LocalLB.PoolMember', 'LocalLB.Rule', 'LocalLB.VirtualAddress', 'LocalLB.VirtualServer', 'Management.Partition', 'Networking.RouteDomain', 'System.Failover', ] for host in self.hosts: self.connect_to_bigip(host) def __setattr__(self, name, value): if name in ['hosts', 'wsdls']: if isinstance(value, str) or isinstance(value, unicode): object.__setattr__(self, name, [host.strip() for host in value.split(',')]) else: object.__setattr__(self, name, value) else: object.__setattr__(self, name, value) def configure(self, config): for k, v in config.items(self.name): setattr(self, k, v) @property def all_bigip_connections(self): return [self.bigips[bigip] for bigip in self.bigips] @property def active_bigip_connection(self): for host in self.hosts: bigip = self.connect_to_bigip(host) if bigip.System.Failover.get_failover_state() == 'FAILOVER_STATE_ACTIVE': return bigip raise Exception('No active BIGIP devices were found in this environment (%s)' % self.name) def connect_to_bigip(self, host, wsdls=None, force_reconnect=False): if not(wsdls): wsdls = self.wsdls if not hasattr(self, 'password'): log.debug('No password has been set, attempting to retrive via keychain capabilities') password = pycontrolshed.get_password(self.name, self.username) if password: log.debug('Password retrived from the keychain') self.password = password else: log.error('No password is available') if host not in self.bigips or force_reconnect: self.bigips[host] = PyCtrlShedBIGIP(host, self.username, self.password, fromurl=True, wsdls=wsdls) return self.bigips[host]
gpl-2.0
8,453,095,348,816,026,000
35.555249
117
0.590796
false
4.154788
false
false
false
Scratchcat1/AATC
flask_app/Flask_Test_App.py
1
5816
from flask import Flask, flash, redirect, render_template, request, session, abort import random,os,ast,prettytable from flask_app import forms import AATC_Server_002 as AATC_Server import HedaBot COMMANDS = HedaBot.CreateCommandDictionary() COMMANDS["AddFlight"][2]["Type"] = lambda x: HedaBot.SplitWaypoints(x,":") COMMANDS["AddFlight"][2]["Query"] = COMMANDS["AddFlight"][2]["Query"].replace("returns","colons") app = Flask(__name__) app.config.from_object('flask_app.config') @app.route("/") def home(): ## session["UserID"] = random.randint(0,1000) return render_template("base.html",user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},Commands = COMMANDS) @app.route("/help") def help_page(): return render_template("help.html",name = session.get("UserID"),user = {"Username":session.get("UserID"), "UserID":session.get("UserID")}) @app.route("/base") def base(): return render_template("base.html",user = {"Username":session.get("UserID"), "UserID":session.get("UserID")}) @app.route("/quote") def quote(): quotes = ObtainQuote(3) return render_template("quote.html", quotes = quotes,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")}) @app.route("/login", methods=['GET', 'POST']) def login(): form = forms.LoginForm() if form.validate_on_submit(): print("Loggin in ...") if form.Username.data == form.Password.data: session["UserID"] = form.Username.data else: session["UserID"] = -1 return render_template("LoginForm.html",title = "Login",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")}) @app.route("/dyno", methods=['GET', 'POST']) def dyno(): items = [{"name":"Username"},{"name":"Password"}] fields = [{"name":"Username","form":forms.wtforms.StringField('Username', validators=[forms.DataRequired()])}, {"name":"Password","form":forms.wtforms.StringField('Password', validators=[forms.DataRequired()])}] #form = forms.DynoForm(fields = items) form = forms.update_form(fields) print(form.__dict__) if form.validate_on_submit(): print("Loggin in ...") print(form.fields.data) if form.Username.data == form.Password.data: session["UserID"] = form.Username.data else: session["UserID"] = -1 #print(form.fields.__dict__) return render_template("DynamicForm.html",title = "Login",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = fields) @app.route("/command/<string:command>",methods=['GET', 'POST']) def Dynamic_Form(command): if command not in COMMANDS: return "FAILURE COMMAND DOES NOT EXIST" Fields = Generate_Fields(command) form = forms.update_form(Fields) if form.validate_on_submit(): packet = Evaluate_Form(command,form) WebConnection = AATC_Server.WebConnection(session.get("UserID",-1)) Sucess,Message,Data = WebConnection.Main(packet) if command == "Login": session["UserID"] = Data Data = [] rendered = RenderResults(Sucess,Message,Data) print(rendered) return render_template("DynamicForm2.html",title = "Output",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = Fields ,Commands = COMMANDS, OUTPUT = True, rendered_result = rendered) return render_template("DynamicForm2.html",title = "command",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = Fields,Commands = COMMANDS) def Generate_Fields(command): Queries = COMMANDS[command] Fields = [] for x in range(1,len(Queries)+1): query_name = Queries[x]["Query"] field = {"name":query_name ,"form":forms.wtforms.StringField(query_name, validators=[forms.DataRequired()])} Fields.append(field) return Fields def Evaluate_Form(command,form): Queries = COMMANDS[command] Arguments = [] for x in range(1,len(Queries)+1): Arguments.append( Queries[x]["Type"](form.__dict__[Queries[x]["Query"]].data)) packet = (command,Arguments) return packet def RenderResults(Sucess,Message,Data = None): render = "" render += "Sucess >>"+str(Sucess)+"\n" render += "Message >>"+str(Message) +"\n" if Data not in [None,[]]: try: Columns = ast.literal_eval(Message) Table = prettytable.PrettyTable(Columns) for row in Data: Table.add_row(row) render += str(Table) except Exception as e: render += "Error creating asthetic table"+str(e) +"\n" for row in Data: render += str(row)+"\n" render += "" rendered = render.split("\n") return rendered ##def ObtainQuote(number = 1): ## with open(os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)),"SkyrimDialogue.txt"),"r") as f: ## for i,line in enumerate(f): ## pass ## ## responses = [] ## for f in range(number): ## lineNum = random.randint(0,i+1) ## with open(os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)),"SkyrimDialogue.txt"),"r") as f: ## for x in range(lineNum): ## line = f.readline() ## responses.append( line.rstrip().split("\t")[-1:][0]) ## return responses def main_app(app): app.secret_key = "abcewhfuhiwuhef" app.run(host = "0.0.0.0") if __name__ == "__main__": main_app(app)
gpl-3.0
1,400,593,260,159,722,000
31.813953
234
0.597146
false
3.576876
false
false
false
ragupta-git/ImcSdk
imcsdk/mometa/comm/CommSnmp.py
1
8759
"""This module contains the general information for CommSnmp ManagedObject.""" from ...imcmo import ManagedObject from ...imccoremeta import MoPropertyMeta, MoMeta from ...imcmeta import VersionMeta class CommSnmpConsts: ADMIN_STATE_DISABLED = "disabled" ADMIN_STATE_ENABLED = "enabled" COM2_SEC_NONE = "None" COM2_SEC_DISABLED = "disabled" COM2_SEC_FULL = "full" COM2_SEC_LIMITED = "limited" PROTO_ALL = "all" PROTO_NONE = "none" PROTO_TCP = "tcp" PROTO_UDP = "udp" class CommSnmp(ManagedObject): """This is CommSnmp class.""" consts = CommSnmpConsts() naming_props = set([]) mo_meta = { "classic": MoMeta("CommSnmp", "commSnmp", "snmp-svc", VersionMeta.Version151f, "InputOutput", 0xfff, [], ["admin", "read-only", "user"], [u'commSvcEp'], [u'commSnmpTrap', u'commSnmpUser'], ["Get", "Set"]), "modular": MoMeta("CommSnmp", "commSnmp", "snmp-svc", VersionMeta.Version2013e, "InputOutput", 0xfff, [], ["admin", "read-only", "user"], [u'commSvcEp'], [u'commSnmpTrap', u'commSnmpUser'], ["Get", "Set"]) } prop_meta = { "classic": { "admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []), "com2_sec": MoPropertyMeta("com2_sec", "com2Sec", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["None", "disabled", "full", "limited"], []), "community": MoPropertyMeta("community", "community", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []), "engine_id_key": MoPropertyMeta("engine_id_key", "engineIdKey", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 27, r"""[^#!&]{0,27}""", [], []), "port": MoPropertyMeta("port", "port", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-65535"]), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []), "sys_contact": MoPropertyMeta("sys_contact", "sysContact", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x200, 0, 64, None, [], []), "sys_location": MoPropertyMeta("sys_location", "sysLocation", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x400, 0, 64, None, [], []), "trap_community": MoPropertyMeta("trap_community", "trapCommunity", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []), "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []), "engine_id": MoPropertyMeta("engine_id", "engineId", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, 0, 255, None, [], []), "name": MoPropertyMeta("name", "name", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []), "proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["all", "none", "tcp", "udp"], []), }, "modular": { "admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []), "com2_sec": MoPropertyMeta("com2_sec", "com2Sec", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["None", "disabled", "full", "limited"], []), "community": MoPropertyMeta("community", "community", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []), "engine_id_key": MoPropertyMeta("engine_id_key", "engineIdKey", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 27, r"""[^#!&]{0,27}""", [], []), "port": MoPropertyMeta("port", "port", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-65535"]), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []), "sys_contact": MoPropertyMeta("sys_contact", "sysContact", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200, 0, 64, None, [], []), "sys_location": MoPropertyMeta("sys_location", "sysLocation", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400, 0, 64, None, [], []), "trap_community": MoPropertyMeta("trap_community", "trapCommunity", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []), "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []), "engine_id": MoPropertyMeta("engine_id", "engineId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 255, None, [], []), "name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []), "proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["all", "none", "tcp", "udp"], []), }, } prop_map = { "classic": { "adminState": "admin_state", "com2Sec": "com2_sec", "community": "community", "dn": "dn", "engineIdKey": "engine_id_key", "port": "port", "rn": "rn", "status": "status", "sysContact": "sys_contact", "sysLocation": "sys_location", "trapCommunity": "trap_community", "childAction": "child_action", "descr": "descr", "engineId": "engine_id", "name": "name", "proto": "proto", }, "modular": { "adminState": "admin_state", "com2Sec": "com2_sec", "community": "community", "dn": "dn", "engineIdKey": "engine_id_key", "port": "port", "rn": "rn", "status": "status", "sysContact": "sys_contact", "sysLocation": "sys_location", "trapCommunity": "trap_community", "childAction": "child_action", "descr": "descr", "engineId": "engine_id", "name": "name", "proto": "proto", }, } def __init__(self, parent_mo_or_dn, **kwargs): self._dirty_mask = 0 self.admin_state = None self.com2_sec = None self.community = None self.engine_id_key = None self.port = None self.status = None self.sys_contact = None self.sys_location = None self.trap_community = None self.child_action = None self.descr = None self.engine_id = None self.name = None self.proto = None ManagedObject.__init__(self, "CommSnmp", parent_mo_or_dn, **kwargs)
apache-2.0
1,721,278,884,973,908,700
63.881481
230
0.574837
false
3.296575
false
false
false
wevote/WebAppPublic
apis_v1/documentation_source/positions_count_for_one_ballot_item_doc.py
1
2560
# apis_v1/documentation_source/positions_count_for_one_ballot_item_doc.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- def positions_count_for_one_ballot_item_doc_template_values(url_root): """ Show documentation about positionsCountForOneBallotItem """ required_query_parameter_list = [ { 'name': 'voter_device_id', 'value': 'string', # boolean, integer, long, string 'description': 'An 88 character unique identifier linked to a voter record on the server', }, { 'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string 'description': 'The unique key provided to any organization using the WeVoteServer APIs', }, { 'name': 'ballot_item_we_vote_id', 'value': 'string', # boolean, integer, long, string 'description': 'The unique identifier for one ballot item.', }, ] optional_query_parameter_list = [ ] potential_status_codes_list = [ ] try_now_link_variables_dict = { } api_response = '{\n' \ ' "success": boolean,\n' \ ' "status": string,\n' \ ' "ballot_item_we_vote_id: string,\n' \ ' "ballot_item_list": list ' \ '(we return a list so this API can be consumed like positionsCountForAllBallotItems)\n' \ ' [\n' \ ' "ballot_item_we_vote_id": string,\n' \ ' "support_count": integer,\n' \ ' "oppose_count": integer,\n' \ ' ],\n' \ '}' template_values = { 'api_name': 'positionsCountForOneBallotItem', 'api_slug': 'positionsCountForOneBallotItem', 'api_introduction': "Retrieve all positions held by this voter in one list.", 'try_now_link': 'apis_v1:positionsCountForOneBallotItemView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': "", 'potential_status_codes_list': potential_status_codes_list, } return template_values
bsd-3-clause
5,725,262,911,566,859,000
38.384615
115
0.5375
false
3.803863
false
false
false
longde123/MultiversePlatform
server/config/common/character_factory.py
1
4399
# # The Multiverse Platform is made available under the MIT License. # # Copyright (c) 2012 The Multiverse Foundation # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # from multiverse.mars import * from multiverse.mars.objects import * from multiverse.mars.core import * from multiverse.mars.events import * from multiverse.mars.util import * from multiverse.mars.plugins import * from multiverse.server.plugins import * from multiverse.server.math import * from multiverse.server.events import * from multiverse.server.objects import * from multiverse.server.engine import * from java.lang import * displayContext = DisplayContext("human_female.mesh") displayContext.addSubmesh(DisplayContext.Submesh("bodyShape-lib.0", "human_female.skin_material")) displayContext.addSubmesh(DisplayContext.Submesh("head_aShape-lib.0", "human_female.head_a_material")) displayContext.addSubmesh(DisplayContext.Submesh("hair_bShape-lib.0", "human_female.hair_b_material")) # default player template player = Template("DefaultPlayer") player.put(WorldManagerClient.NAMESPACE, WorldManagerClient.TEMPL_DISPLAY_CONTEXT, displayContext) player.put(WorldManagerClient.NAMESPACE, WorldManagerClient.TEMPL_OBJECT_TYPE, ObjectTypes.player) player.put(InventoryClient.NAMESPACE, InventoryClient.TEMPL_ITEMS, "") ObjectManagerClient.registerTemplate(player) # character factory class SampleFactory (CharacterFactory): def createCharacter(self, worldName, uid, properties): name = properties.get("characterName"); # Player start location loc = Point(-135343, 0, -202945) # Player start instance; assumes you have an instance named "default" instanceOid = InstanceClient.getInstanceOid("default") overrideTemplate = Template() if name: overrideTemplate.put(WorldManagerClient.NAMESPACE, WorldManagerClient.TEMPL_NAME, name) overrideTemplate.put(WorldManagerClient.NAMESPACE, WorldManagerClient.TEMPL_INSTANCE, Long(instanceOid)) overrideTemplate.put(WorldManagerClient.NAMESPACE, WorldManagerClient.TEMPL_LOC, loc) # Initialize the player's instance restore stack restorePoint = InstanceRestorePoint("default", loc) restorePoint.setFallbackFlag(True) restoreStack = LinkedList() restoreStack.add(restorePoint) overrideTempate.put(Namespace.OBJECT_MANAGER, ObjectManagerClient.TEMPL_INSTANCE_RESTORE_STACK, restoreStack) overrideTempate.put(Namespace.OBJECT_MANAGER, ObjectManagerClient.TEMPL_CURRENT_INSTANCE_NAME, "default") # Make the player persistent (will be saved in database) overrideTemplate.put(Namespace.OBJECT_MANAGER, ObjectManagerClient.TEMPL_PERSISTENT, Boolean(True)); # Create the player object objOid = ObjectManagerClient.generateObject( "DefaultPlayer", overrideTemplate) Log.debug("SampleFactory: generated obj oid=" + str(objOid)) return objOid sampleFactory = SampleFactory() LoginPlugin.getCharacterGenerator().setCharacterFactory(sampleFactory);
mit
2,857,242,688,523,992,600
39.357798
81
0.711525
false
4.246139
false
false
false
shoopio/shoop
shuup/importer/admin_module/import_views.py
1
7325
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the OSL-3.0 license found in the # LICENSE file in the root directory of this source tree. import hashlib import logging import os from datetime import datetime from django.contrib import messages from django.core.urlresolvers import reverse from django.db.transaction import atomic from django.http.response import Http404, HttpResponse, HttpResponseBadRequest from django.shortcuts import redirect from django.utils.translation import ugettext_lazy as _ from django.views.generic import FormView, TemplateView, View from shuup.admin.shop_provider import get_shop from shuup.importer.admin_module.forms import ImportForm, ImportSettingsForm from shuup.importer.transforms import transform_file from shuup.importer.utils import ( get_import_file_path, get_importer, get_importer_choices ) from shuup.utils.excs import Problem logger = logging.getLogger(__name__) class ImportProcessView(TemplateView): template_name = "shuup/importer/admin/import_process.jinja" importer = None def dispatch(self, request, *args, **kwargs): self.importer_cls = get_importer(request.GET.get("importer")) self.model_str = request.GET.get("importer") self.lang = request.GET.get("lang") return super(ImportProcessView, self).dispatch(request, *args, **kwargs) def _transform_request_file(self): try: filename = get_import_file_path(self.request.GET.get("n")) if not os.path.isfile(filename): raise ValueError(_("%s is not a file") % self.request.GET.get("n")) except: raise Problem(_("File missing.")) try: mode = "xls" if filename.endswith("xlsx"): mode = "xlsx" if filename.endswith("csv"): mode = "csv" if self.importer_cls.custom_file_transformer: return self.importer_cls.transform_file(mode, filename) return transform_file(mode, filename) except (Exception, RuntimeError) as e: messages.error(self.request, e) def prepare(self): self.data = self._transform_request_file() if self.data is None: return False self.importer = self.importer_cls(self.data, get_shop(self.request), self.lang) self.importer.process_data() if self.request.method == "POST": # check if mapping was done for field in self.importer.unmatched_fields: key = "remap[%s]" % field vals = self.request.POST.getlist(key) if len(vals): self.importer.manually_match(field, vals[0]) self.importer.do_remap() self.settings_form = ImportSettingsForm(data=self.request.POST if self.request.POST else None) if self.settings_form.is_bound: self.settings_form.is_valid() return True def post(self, request, *args, **kwargs): prepared = self.prepare() if not prepared: return redirect(reverse("shuup_admin:importer.import")) try: with atomic(): self.importer.do_import(self.settings_form.cleaned_data["import_mode"]) except Exception: logger.exception("Failed to import data") messages.error(request, _("Failed to import the file.")) return redirect(reverse("shuup_admin:importer.import")) self.template_name = "shuup/importer/admin/import_process_complete.jinja" return self.render_to_response(self.get_context_data(**kwargs)) def get_context_data(self, **kwargs): context = super(ImportProcessView, self).get_context_data(**kwargs) context["data"] = self.data context["importer"] = self.importer context["form"] = self.settings_form context["model_fields"] = self.importer.get_fields_for_mapping() context["visible_rows"] = self.data.rows[1:5] return context def get(self, request, *args, **kwargs): prepared = self.prepare() if not prepared: return redirect(reverse("shuup_admin:importer.import")) return self.render_to_response(self.get_context_data(**kwargs)) class ImportView(FormView): template_name = "shuup/importer/admin/import.jinja" form_class = ImportForm def post(self, request, *args, **kwargs): file = self.request.FILES["file"] basename, ext = os.path.splitext(file.name) import_name = "%s%s" % (hashlib.sha256(("%s" % datetime.now()).encode("utf-8")).hexdigest(), ext) full_path = get_import_file_path(import_name) if not os.path.isdir(os.path.dirname(full_path)): os.makedirs(os.path.dirname(full_path)) with open(full_path, 'wb+') as destination: for chunk in file.chunks(): destination.write(chunk) next_url = request.POST.get("next") importer = request.POST.get("importer") lang = request.POST.get("language") return redirect("%s?n=%s&importer=%s&lang=%s" % (next_url, import_name, importer, lang)) def get_form_kwargs(self): kwargs = super(ImportView, self).get_form_kwargs() initial = kwargs.get("initial", {}) initial["importer"] = self.request.GET.get("importer", initial.get("initial")) kwargs.update({ "request": self.request, "initial": initial }) return kwargs def get_context_data(self, **kwargs): context = super(ImportView, self).get_context_data(**kwargs) # check whether the importer has a example file template # if so, we also add a url to download the example file importer = self.request.GET.get("importer") # no importer passed, get the first choice available if not importer: importers = list(get_importer_choices()) if importers: importer = importers[0][0] if importer: importer_cls = get_importer(importer) context.update(importer_cls.get_help_context_data(self.request)) context["importer"] = importer_cls return context class ExampleFileDownloadView(View): def get(self, request, *args, **kwargs): importer = request.GET.get("importer") file_name = request.GET.get("file_name") if not importer or not file_name: return HttpResponseBadRequest(_("Invalid parameters")) importer_cls = get_importer(importer) if not importer_cls or not importer_cls.has_example_file(): raise Http404(_("Invalid importer")) example_file = importer_cls.get_example_file(file_name) if not example_file: raise Http404(_("Invalid file name")) response = HttpResponse(content_type=example_file.content_type) response['Content-Disposition'] = 'attachment; filename=%s' % example_file.file_name data = importer_cls.get_example_file_content(example_file, request) if not data: raise Http404(_("File not found")) data.seek(0) response.write(data.getvalue()) return response
agpl-3.0
-5,630,403,041,267,978,000
37.151042
105
0.632628
false
4.020307
false
false
false
PeridotYouClod/gRPC-Makerboards
generated/proto_out/sensors_pb2_grpc.py
1
20413
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc import generated.proto_out.sensors_pb2 as sensors__pb2 class FrontEndStub(object): """http://www.grpc.io/docs/guides/concepts.html is good reference for #tags #FrontEnd #Simple The FrontEnd server is the endpoint that most client interactions should use. These are public facing and used by servers in the outside world. Note: Currently there is no security in place so this should only be used for localhost applications only be used behind a firewall. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetLux = channel.unary_unary( '/FrontEnd/GetLux', request_serializer=sensors__pb2.GetLuxRequest.SerializeToString, response_deserializer=sensors__pb2.GetLuxReply.FromString, ) self.GetTemperature = channel.unary_unary( '/FrontEnd/GetTemperature', request_serializer=sensors__pb2.GetTemperatureRequest.SerializeToString, response_deserializer=sensors__pb2.GetTemperatureReply.FromString, ) self.GetSound = channel.unary_unary( '/FrontEnd/GetSound', request_serializer=sensors__pb2.GetSoundRequest.SerializeToString, response_deserializer=sensors__pb2.GetSoundReply.FromString, ) self.GetIrButtonPressed = channel.unary_unary( '/FrontEnd/GetIrButtonPressed', request_serializer=sensors__pb2.GetIrButtonPressedRequest.SerializeToString, response_deserializer=sensors__pb2.GetIrButtonPressedReply.FromString, ) self.GetSonar = channel.unary_unary( '/FrontEnd/GetSonar', request_serializer=sensors__pb2.GetSonarRequest.SerializeToString, response_deserializer=sensors__pb2.GetSonarReply.FromString, ) self.SetLedStrip = channel.unary_unary( '/FrontEnd/SetLedStrip', request_serializer=sensors__pb2.SetLedStripRequest.SerializeToString, response_deserializer=sensors__pb2.SetLedStripReply.FromString, ) self.GetButtonPressed = channel.unary_unary( '/FrontEnd/GetButtonPressed', request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString, response_deserializer=sensors__pb2.GetButtonPressedReply.FromString, ) self.SendToRfBlaster = channel.unary_unary( '/FrontEnd/SendToRfBlaster', request_serializer=sensors__pb2.SendToRfBlasterRequest.SerializeToString, response_deserializer=sensors__pb2.SendToRfBlasterReply.FromString, ) class FrontEndServicer(object): """http://www.grpc.io/docs/guides/concepts.html is good reference for #tags #FrontEnd #Simple The FrontEnd server is the endpoint that most client interactions should use. These are public facing and used by servers in the outside world. Note: Currently there is no security in place so this should only be used for localhost applications only be used behind a firewall. """ def GetLux(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetTemperature(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSound(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetIrButtonPressed(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSonar(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetLedStrip(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetButtonPressed(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SendToRfBlaster(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_FrontEndServicer_to_server(servicer, server): rpc_method_handlers = { 'GetLux': grpc.unary_unary_rpc_method_handler( servicer.GetLux, request_deserializer=sensors__pb2.GetLuxRequest.FromString, response_serializer=sensors__pb2.GetLuxReply.SerializeToString, ), 'GetTemperature': grpc.unary_unary_rpc_method_handler( servicer.GetTemperature, request_deserializer=sensors__pb2.GetTemperatureRequest.FromString, response_serializer=sensors__pb2.GetTemperatureReply.SerializeToString, ), 'GetSound': grpc.unary_unary_rpc_method_handler( servicer.GetSound, request_deserializer=sensors__pb2.GetSoundRequest.FromString, response_serializer=sensors__pb2.GetSoundReply.SerializeToString, ), 'GetIrButtonPressed': grpc.unary_unary_rpc_method_handler( servicer.GetIrButtonPressed, request_deserializer=sensors__pb2.GetIrButtonPressedRequest.FromString, response_serializer=sensors__pb2.GetIrButtonPressedReply.SerializeToString, ), 'GetSonar': grpc.unary_unary_rpc_method_handler( servicer.GetSonar, request_deserializer=sensors__pb2.GetSonarRequest.FromString, response_serializer=sensors__pb2.GetSonarReply.SerializeToString, ), 'SetLedStrip': grpc.unary_unary_rpc_method_handler( servicer.SetLedStrip, request_deserializer=sensors__pb2.SetLedStripRequest.FromString, response_serializer=sensors__pb2.SetLedStripReply.SerializeToString, ), 'GetButtonPressed': grpc.unary_unary_rpc_method_handler( servicer.GetButtonPressed, request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString, response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString, ), 'SendToRfBlaster': grpc.unary_unary_rpc_method_handler( servicer.SendToRfBlaster, request_deserializer=sensors__pb2.SendToRfBlasterRequest.FromString, response_serializer=sensors__pb2.SendToRfBlasterReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'FrontEnd', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class PushFrontEndStub(object): """#FrontEnd #ServerStreaming The FrontEnd server is the endpoint that most client interactions should use. These are public facing and used by servers in the outside world. This server is for streaming events. Note: Currently there is no security in place so this should only be used for localhost applications only be used behind a firewall. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Subscribe = channel.unary_unary( '/PushFrontEnd/Subscribe', request_serializer=sensors__pb2.SubscribeRequest.SerializeToString, response_deserializer=sensors__pb2.SubscribeReply.FromString, ) self.StreamButtonPressed = channel.unary_stream( '/PushFrontEnd/StreamButtonPressed', request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString, response_deserializer=sensors__pb2.GetButtonPressedReply.FromString, ) class PushFrontEndServicer(object): """#FrontEnd #ServerStreaming The FrontEnd server is the endpoint that most client interactions should use. These are public facing and used by servers in the outside world. This server is for streaming events. Note: Currently there is no security in place so this should only be used for localhost applications only be used behind a firewall. """ def Subscribe(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StreamButtonPressed(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_PushFrontEndServicer_to_server(servicer, server): rpc_method_handlers = { 'Subscribe': grpc.unary_unary_rpc_method_handler( servicer.Subscribe, request_deserializer=sensors__pb2.SubscribeRequest.FromString, response_serializer=sensors__pb2.SubscribeReply.SerializeToString, ), 'StreamButtonPressed': grpc.unary_stream_rpc_method_handler( servicer.StreamButtonPressed, request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString, response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'PushFrontEnd', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class ArduinoStub(object): """#Backend #Simple Arduino server handles interactions between Arduino brand devices & other servers. (New to Arduino: https://www.arduino.cc/en/Guide/Introduction) Note: Do not have clients depend on this it should be behind a FrontEnd. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetIrButtonPressed = channel.unary_unary( '/Arduino/GetIrButtonPressed', request_serializer=sensors__pb2.GetIrButtonPressedRequest.SerializeToString, response_deserializer=sensors__pb2.GetIrButtonPressedReply.FromString, ) self.GetSonar = channel.unary_unary( '/Arduino/GetSonar', request_serializer=sensors__pb2.GetSonarRequest.SerializeToString, response_deserializer=sensors__pb2.GetSonarReply.FromString, ) self.SendToRfBlaster = channel.unary_unary( '/Arduino/SendToRfBlaster', request_serializer=sensors__pb2.SendToRfBlasterRequest.SerializeToString, response_deserializer=sensors__pb2.SendToRfBlasterReply.FromString, ) class ArduinoServicer(object): """#Backend #Simple Arduino server handles interactions between Arduino brand devices & other servers. (New to Arduino: https://www.arduino.cc/en/Guide/Introduction) Note: Do not have clients depend on this it should be behind a FrontEnd. """ def GetIrButtonPressed(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSonar(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SendToRfBlaster(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ArduinoServicer_to_server(servicer, server): rpc_method_handlers = { 'GetIrButtonPressed': grpc.unary_unary_rpc_method_handler( servicer.GetIrButtonPressed, request_deserializer=sensors__pb2.GetIrButtonPressedRequest.FromString, response_serializer=sensors__pb2.GetIrButtonPressedReply.SerializeToString, ), 'GetSonar': grpc.unary_unary_rpc_method_handler( servicer.GetSonar, request_deserializer=sensors__pb2.GetSonarRequest.FromString, response_serializer=sensors__pb2.GetSonarReply.SerializeToString, ), 'SendToRfBlaster': grpc.unary_unary_rpc_method_handler( servicer.SendToRfBlaster, request_deserializer=sensors__pb2.SendToRfBlasterRequest.FromString, response_serializer=sensors__pb2.SendToRfBlasterReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'Arduino', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class WioLinkStub(object): """#Backend #Simple WioLink server handles interactions between Wio Link brand devices & other servers. (New to Wio Link: http://wiki.seeed.cc/Wio_Link/) Note: Do not have clients depend on this it should be behind a FrontEnd. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetLux = channel.unary_unary( '/WioLink/GetLux', request_serializer=sensors__pb2.GetLuxRequest.SerializeToString, response_deserializer=sensors__pb2.GetLuxReply.FromString, ) self.GetTemperature = channel.unary_unary( '/WioLink/GetTemperature', request_serializer=sensors__pb2.GetTemperatureRequest.SerializeToString, response_deserializer=sensors__pb2.GetTemperatureReply.FromString, ) self.GetSound = channel.unary_unary( '/WioLink/GetSound', request_serializer=sensors__pb2.GetSoundRequest.SerializeToString, response_deserializer=sensors__pb2.GetSoundReply.FromString, ) self.SetLedStrip = channel.unary_unary( '/WioLink/SetLedStrip', request_serializer=sensors__pb2.SetLedStripRequest.SerializeToString, response_deserializer=sensors__pb2.SetLedStripReply.FromString, ) self.GetButtonPressed = channel.unary_unary( '/WioLink/GetButtonPressed', request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString, response_deserializer=sensors__pb2.GetButtonPressedReply.FromString, ) class WioLinkServicer(object): """#Backend #Simple WioLink server handles interactions between Wio Link brand devices & other servers. (New to Wio Link: http://wiki.seeed.cc/Wio_Link/) Note: Do not have clients depend on this it should be behind a FrontEnd. """ def GetLux(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetTemperature(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSound(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetLedStrip(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetButtonPressed(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_WioLinkServicer_to_server(servicer, server): rpc_method_handlers = { 'GetLux': grpc.unary_unary_rpc_method_handler( servicer.GetLux, request_deserializer=sensors__pb2.GetLuxRequest.FromString, response_serializer=sensors__pb2.GetLuxReply.SerializeToString, ), 'GetTemperature': grpc.unary_unary_rpc_method_handler( servicer.GetTemperature, request_deserializer=sensors__pb2.GetTemperatureRequest.FromString, response_serializer=sensors__pb2.GetTemperatureReply.SerializeToString, ), 'GetSound': grpc.unary_unary_rpc_method_handler( servicer.GetSound, request_deserializer=sensors__pb2.GetSoundRequest.FromString, response_serializer=sensors__pb2.GetSoundReply.SerializeToString, ), 'SetLedStrip': grpc.unary_unary_rpc_method_handler( servicer.SetLedStrip, request_deserializer=sensors__pb2.SetLedStripRequest.FromString, response_serializer=sensors__pb2.SetLedStripReply.SerializeToString, ), 'GetButtonPressed': grpc.unary_unary_rpc_method_handler( servicer.GetButtonPressed, request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString, response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'WioLink', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class PushStub(object): """#ServerStreaming #Backend Push server pushes data when a sensor event occurs for the client to react to. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Subscribe = channel.unary_unary( '/Push/Subscribe', request_serializer=sensors__pb2.SubscribeRequest.SerializeToString, response_deserializer=sensors__pb2.SubscribeReply.FromString, ) self.StreamButtonPressed = channel.unary_stream( '/Push/StreamButtonPressed', request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString, response_deserializer=sensors__pb2.GetButtonPressedReply.FromString, ) class PushServicer(object): """#ServerStreaming #Backend Push server pushes data when a sensor event occurs for the client to react to. """ def Subscribe(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StreamButtonPressed(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_PushServicer_to_server(servicer, server): rpc_method_handlers = { 'Subscribe': grpc.unary_unary_rpc_method_handler( servicer.Subscribe, request_deserializer=sensors__pb2.SubscribeRequest.FromString, response_serializer=sensors__pb2.SubscribeReply.SerializeToString, ), 'StreamButtonPressed': grpc.unary_stream_rpc_method_handler( servicer.StreamButtonPressed, request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString, response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'Push', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
mit
-6,543,290,364,991,223,000
38.483559
85
0.725371
false
4.157434
false
false
false
skirpichev/omg
diofant/vector/dyadic.py
1
8076
from ..core import AtomicExpr, Integer, Pow from ..matrices import ImmutableMatrix from .basisdependent import (BasisDependent, BasisDependentAdd, BasisDependentMul, BasisDependentZero) class Dyadic(BasisDependent): """ Super class for all Dyadic-classes. References ========== * https://en.wikipedia.org/wiki/Dyadic_tensor * Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill """ _op_priority = 13.0 @property def components(self): """ Returns the components of this dyadic in the form of a Python dictionary mapping BaseDyadic instances to the corresponding measure numbers. """ # The '_components' attribute is defined according to the # subclass of Dyadic the instance belongs to. return self._components def dot(self, other): """ Returns the dot product(also called inner product) of this Dyadic, with another Dyadic or Vector. If 'other' is a Dyadic, this returns a Dyadic. Else, it returns a Vector (unless an error is encountered). Parameters ========== other : Dyadic/Vector The other Dyadic or Vector to take the inner product with Examples ======== >>> from diofant.vector import CoordSysCartesian >>> N = CoordSysCartesian('N') >>> D1 = N.i.outer(N.j) >>> D2 = N.j.outer(N.j) >>> D1.dot(D2) (N.i|N.j) >>> D1.dot(N.j) N.i """ from .vector import Vector if isinstance(other, BasisDependentZero): return Vector.zero elif isinstance(other, Vector): outvec = Vector.zero for k, v in self.components.items(): vect_dot = k.args[1].dot(other) outvec += vect_dot * v * k.args[0] return outvec elif isinstance(other, Dyadic): outdyad = Dyadic.zero for k1, v1 in self.components.items(): for k2, v2 in other.components.items(): vect_dot = k1.args[1].dot(k2.args[0]) outer_product = k1.args[0].outer(k2.args[1]) outdyad += vect_dot * v1 * v2 * outer_product return outdyad else: raise TypeError('Inner product is not defined for ' + str(type(other)) + ' and Dyadics.') def __and__(self, other): return self.dot(other) __and__.__doc__ = dot.__doc__ def cross(self, other): """ Returns the cross product between this Dyadic, and a Vector, as a Vector instance. Parameters ========== other : Vector The Vector that we are crossing this Dyadic with Examples ======== >>> from diofant.vector import CoordSysCartesian >>> N = CoordSysCartesian('N') >>> d = N.i.outer(N.i) >>> d.cross(N.j) (N.i|N.k) """ from .vector import Vector if other == Vector.zero: return Dyadic.zero elif isinstance(other, Vector): outdyad = Dyadic.zero for k, v in self.components.items(): cross_product = k.args[1].cross(other) outer = k.args[0].outer(cross_product) outdyad += v * outer return outdyad else: raise TypeError(str(type(other)) + ' not supported for ' + 'cross with dyadics') def __xor__(self, other): return self.cross(other) __xor__.__doc__ = cross.__doc__ def to_matrix(self, system, second_system=None): """ Returns the matrix form of the dyadic with respect to one or two coordinate systems. Parameters ========== system : CoordSysCartesian The coordinate system that the rows and columns of the matrix correspond to. If a second system is provided, this only corresponds to the rows of the matrix. second_system : CoordSysCartesian, optional, default=None The coordinate system that the columns of the matrix correspond to. Examples ======== >>> from diofant.vector import CoordSysCartesian >>> N = CoordSysCartesian('N') >>> v = N.i + 2*N.j >>> d = v.outer(N.i) >>> d.to_matrix(N) Matrix([ [1, 0, 0], [2, 0, 0], [0, 0, 0]]) >>> q = Symbol('q') >>> P = N.orient_new_axis('P', q, N.k) >>> d.to_matrix(N, P) Matrix([ [ cos(q), -sin(q), 0], [2*cos(q), -2*sin(q), 0], [ 0, 0, 0]]) """ if second_system is None: second_system = system return ImmutableMatrix([i.dot(self).dot(j) for i in system for j in second_system]).reshape(3, 3) class BaseDyadic(Dyadic, AtomicExpr): """Class to denote a base dyadic tensor component.""" def __new__(cls, vector1, vector2): from .vector import Vector, BaseVector, VectorZero # Verify arguments if not isinstance(vector1, (BaseVector, VectorZero)) or \ not isinstance(vector2, (BaseVector, VectorZero)): raise TypeError('BaseDyadic cannot be composed of non-base ' + 'vectors') # Handle special case of zero vector elif vector1 == Vector.zero or vector2 == Vector.zero: return Dyadic.zero # Initialize instance obj = super().__new__(cls, vector1, vector2) obj._base_instance = obj obj._measure_number = 1 obj._components = {obj: Integer(1)} obj._sys = vector1._sys obj._pretty_form = ('(' + vector1._pretty_form + '|' + vector2._pretty_form + ')') obj._latex_form = ('(' + vector1._latex_form + '{|}' + vector2._latex_form + ')') return obj def __str__(self, printer=None): return '(' + str(self.args[0]) + '|' + str(self.args[1]) + ')' _diofantstr = __str__ _diofantrepr = _diofantstr class DyadicMul(BasisDependentMul, Dyadic): """Products of scalars and BaseDyadics.""" def __new__(cls, *args, **options): obj = BasisDependentMul.__new__(cls, *args, **options) return obj @property def base_dyadic(self): """The BaseDyadic involved in the product.""" return self._base_instance @property def measure_number(self): """The scalar expression involved in the definition of this DyadicMul. """ return self._measure_number class DyadicAdd(BasisDependentAdd, Dyadic): """Class to hold dyadic sums.""" def __new__(cls, *args, **options): obj = BasisDependentAdd.__new__(cls, *args, **options) return obj def __str__(self, printer=None): ret_str = '' items = list(self.components.items()) items.sort(key=lambda x: x[0].__str__()) for k, v in items: temp_dyad = k * v ret_str += temp_dyad.__str__(printer) + ' + ' return ret_str[:-3] __repr__ = __str__ _diofantstr = __str__ class DyadicZero(BasisDependentZero, Dyadic): """Class to denote a zero dyadic.""" _op_priority = 13.1 _pretty_form = '(0|0)' _latex_form = r'(\mathbf{\hat{0}}|\mathbf{\hat{0}})' def __new__(cls): obj = BasisDependentZero.__new__(cls) return obj def _dyad_div(one, other): """Helper for division involving dyadics.""" if isinstance(other, Dyadic): raise TypeError('Cannot divide two dyadics') else: return DyadicMul(one, Pow(other, -1)) Dyadic._expr_type = Dyadic Dyadic._mul_func = DyadicMul Dyadic._add_func = DyadicAdd Dyadic._zero_func = DyadicZero Dyadic._base_func = BaseDyadic Dyadic._div_helper = _dyad_div Dyadic.zero = DyadicZero()
bsd-3-clause
-3,575,280,719,085,110,000
28.911111
79
0.541605
false
3.768549
false
false
false
UCBerkeleySETI/blimpy
blimpy/plotting/plot_time_series.py
1
1628
from .config import * from ..utils import rebin, db from .plot_utils import calc_extent def plot_time_series(wf, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs): """ Plot the time series. Args: f_start (float): start frequency, in MHz f_stop (float): stop frequency, in MHz logged (bool): Plot in linear (False) or dB units (True), kwargs: keyword args to be passed to matplotlib imshow() """ ax = plt.gca() plot_f, plot_data = wf.grab_data(f_start, f_stop, if_id) # Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1 if len(plot_data.shape) > 1: plot_data = np.nanmean(plot_data, axis=1) else: plot_data = np.nanmean(plot_data) if logged and wf.header['nbits'] >= 8: plot_data = db(plot_data) # Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive. extent = calc_extent(wf, plot_f=plot_f, plot_t=wf.timestamps, MJD_time=MJD_time) plot_t = np.linspace(extent[2], extent[3], len(wf.timestamps)) if MJD_time: tlabel = "Time [MJD]" else: tlabel = "Time [s]" if logged: plabel = "Power [dB]" else: plabel = "Power [counts]" # Reverse oder if vertical orientation. if 'v' in orientation: plt.plot(plot_data, plot_t, **kwargs) plt.xlabel(plabel) else: plt.plot(plot_t, plot_data, **kwargs) plt.xlabel(tlabel) plt.ylabel(plabel) ax.autoscale(axis='both', tight=True)
bsd-3-clause
-8,949,116,032,728,782,000
30.921569
117
0.616093
false
3.211045
false
false
false
openstack/horizon
openstack_dashboard/dashboards/admin/volumes/forms.py
1
10388
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from horizon.utils import validators as utils_validators from openstack_dashboard.api import cinder from openstack_dashboard.dashboards.admin.snapshots.forms \ import populate_status_choices from openstack_dashboard.dashboards.project.volumes \ import forms as project_forms from openstack_dashboard.dashboards.project.volumes.tables \ import VolumesTableBase as volumes_table # This set of states was pulled from cinder's admin_actions.py SETTABLE_STATUSES = ( 'attaching', 'available', 'creating', 'deleting', 'detaching', 'error', 'error_deleting', 'in-use', 'maintenance', 'reserved') STATUS_CHOICES = tuple( status for status in volumes_table.STATUS_DISPLAY_CHOICES if status[0] in SETTABLE_STATUSES ) class ManageVolume(forms.SelfHandlingForm): identifier = forms.CharField( max_length=255, label=_("Identifier"), help_text=_("Name or other identifier for existing volume")) id_type = forms.ThemableChoiceField( label=_("Identifier Type"), help_text=_("Type of backend device identifier provided")) host = forms.CharField( max_length=255, label=_("Host"), help_text=_("Cinder host on which the existing volume resides; " "takes the form: host@backend-name#pool")) name = forms.CharField( max_length=255, label=_("Volume Name"), required=False, help_text=_("Volume name to be assigned")) description = forms.CharField(max_length=255, widget=forms.Textarea( attrs={'rows': 4}), label=_("Description"), required=False) metadata = forms.CharField(max_length=255, widget=forms.Textarea( attrs={'rows': 2}), label=_("Metadata"), required=False, help_text=_("Comma-separated key=value pairs"), validators=[utils_validators.validate_metadata]) volume_type = forms.ThemableChoiceField( label=_("Volume Type"), required=False) availability_zone = forms.ThemableChoiceField( label=_("Availability Zone"), required=False) bootable = forms.BooleanField( label=_("Bootable"), required=False, help_text=_("Specifies that the newly created volume " "should be marked as bootable")) def __init__(self, request, *args, **kwargs): super().__init__(request, *args, **kwargs) self.fields['id_type'].choices = [("source-name", _("Name"))] + \ [("source-id", _("ID"))] volume_types = cinder.volume_type_list(request) self.fields['volume_type'].choices = [("", _("No volume type"))] + \ [(type.name, type.name) for type in volume_types] self.fields['availability_zone'].choices = \ project_forms.availability_zones(request) def handle(self, request, data): try: az = data.get('availability_zone') # assume user enters metadata with "key1=val1,key2=val2" # convert to dictionary metadataDict = {} metadata = data.get('metadata') if metadata: metadata.replace(" ", "") for item in metadata.split(','): key, value = item.split('=') metadataDict[key] = value cinder.volume_manage(request, host=data['host'], identifier=data['identifier'], id_type=data['id_type'], name=data['name'], description=data['description'], volume_type=data['volume_type'], availability_zone=az, metadata=metadataDict, bootable=data['bootable']) # for success message, use identifier if user does not # provide a volume name volume_name = data['name'] if not volume_name: volume_name = data['identifier'] messages.success( request, _('Successfully sent the request to manage volume: %s') % volume_name) return True except Exception: redirect = reverse("horizon:admin:volumes:index") exceptions.handle(request, _("Unable to manage volume."), redirect=redirect) class UnmanageVolume(forms.SelfHandlingForm): name = forms.CharField(label=_("Volume Name"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'})) host = forms.CharField(label=_("Host"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'})) volume_id = forms.CharField(label=_("ID"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'})) def handle(self, request, data): try: cinder.volume_unmanage(request, self.initial['volume_id']) messages.success( request, _('Successfully sent the request to unmanage volume: %s') % data['name']) return True except Exception: redirect = reverse("horizon:admin:volumes:index") exceptions.handle(request, _("Unable to unmanage volume."), redirect=redirect) class MigrateVolume(forms.SelfHandlingForm): name = forms.CharField(label=_("Volume Name"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'})) current_host = forms.CharField(label=_("Current Host"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'})) host = forms.ThemableChoiceField( label=_("Destination Host"), help_text=_("Choose a Host to migrate to.")) force_host_copy = forms.BooleanField(label=_("Force Host Copy"), initial=False, required=False) def __init__(self, request, *args, **kwargs): super().__init__(request, *args, **kwargs) initial = kwargs.get('initial', {}) self.fields['host'].choices = self.populate_host_choices(request, initial) def populate_host_choices(self, request, initial): hosts = initial.get('hosts') current_host = initial.get('current_host') host_list = [(host.name, host.name) for host in hosts if host.name != current_host] if host_list: host_list.insert(0, ("", _("Select a new host"))) else: host_list.insert(0, ("", _("No other hosts available"))) return sorted(host_list) def handle(self, request, data): try: cinder.volume_migrate(request, self.initial['volume_id'], data['host'], data['force_host_copy']) messages.success( request, _('Successfully sent the request to migrate volume: %s') % data['name']) return True except Exception: redirect = reverse("horizon:admin:volumes:index") exceptions.handle(request, _("Failed to migrate volume."), redirect=redirect) class UpdateStatus(forms.SelfHandlingForm): status = forms.ThemableChoiceField(label=_("Status")) def __init__(self, request, *args, **kwargs): # Initial values have to be operated before super() otherwise the # initial values will get overwritten back to the raw value current_status = kwargs['initial']['status'] kwargs['initial'].pop('status') super().__init__(request, *args, **kwargs) self.fields['status'].choices = populate_status_choices( current_status, STATUS_CHOICES) def handle(self, request, data): # Obtain the localized status for including in the message for choice in self.fields['status'].choices: if choice[0] == data['status']: new_status = choice[1] break else: new_status = data['status'] try: cinder.volume_reset_state(request, self.initial['volume_id'], data['status']) messages.success(request, _('Successfully updated volume status to "%s".') % new_status) return True except Exception: redirect = reverse("horizon:admin:volumes:index") exceptions.handle(request, _('Unable to update volume status to "%s".') % new_status, redirect=redirect)
apache-2.0
679,940,254,032,079,700
40.386454
79
0.545148
false
4.958473
false
false
false
ijmarshall/robotreviewer3
robotreviewer/robots/rationale_robot.py
1
13649
""" the BiasRobot class takes the full text of a clinical trial as input as a robotreviewer.data_structures.MultiDict, and returns bias information in the same format, which can easily be converted to JSON. there are multiple ways to build a MultiDict, however the most common way used in this project is as a PDF binary. pdf_binary = ... pdfr = PDFReader() data = pdfr.convert(pdf_binary) robot = BiasRobot() annotations = robot.annotate(data) """ # Authors: Iain Marshall <mail@ijmarshall.com> # Joel Kuiper <me@joelkuiper.com> # Byron Wallace <byron@ccs.neu.edu> import uuid import operator import pickle import numpy as np from collections import OrderedDict, defaultdict import robotreviewer import logging log = logging.getLogger(__name__) import sys sys.path.append('robotreviewer/ml') # need this for loading the rationale_CNN module from celery.contrib import rdb __version__ = {"name": "Risk of bias (CNN/SVM ensemble)", "version_number": "3", "publication_url": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5300751/", "cite_bibtex": """@inproceedings{zhang2016rationale, title={Rationale-augmented convolutional neural networks for text classification}, author={Zhang, Ye and Marshall, Iain and Wallace, Byron C}, booktitle={Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing}, volume={2016}, pages={795}, year={2016}, organization={NIH Public Access} }""", "cite_text": "Zhang, Ye, Iain J Marshall, and Byron C. Wallace. “Rationale-Augmented Convolutional Neural Networks for Text Classification.” Proceedings of Empirical Methods in Natural Language Processing (EMNLP), 2016." } class BiasRobot: def __init__(self, top_k=3): """ `top_k` refers to 'top-k recall'. top-1 recall will return the single most relevant sentence in the document, and top-3 recall the 3 most relevant. The validation study assessed the accuracy of top-3 and top-1 and we suggest top-3 as default """ self.bias_domains = ['Random sequence generation'] self.top_k = top_k self.bias_domains = {'RSG': 'Random sequence generation', 'AC': 'Allocation concealment', 'BPP': 'Blinding of participants and personnel', 'BOA': 'Blinding of outcome assessment', 'IOD': 'Incomplete outcome data', 'SR': 'Selective reporting' } ### # Here we take a simple ensembling approach in which we combine the # predictions made by our rationaleCNN model and the JAMIA (linear) # multi task variant. ### self.all_domains = ['RSG', 'AC', 'BPP', 'BOA'] from robotreviewer.ml.classifier import MiniClassifier from robotreviewer.ml.vectorizer import ModularVectorizer from robotreviewer.ml.rationale_CNN import RationaleCNN, Document global RationaleCNN, Document, MiniClassifier, ModularVectorizer # CNN domains vectorizer_str = 'robotreviewer/data/keras/vectorizers/{}.pickle' arch_str = 'robotreviewer/data/keras/models/{}.json' weight_str = 'robotreviewer/data/keras/models/{}.hdf5' self.CNN_models = OrderedDict() for bias_domain in ['RSG', 'AC', 'BPP', 'BOA']: # Load vectorizer and keras model vectorizer_loc = vectorizer_str.format(bias_domain) arch_loc = arch_str.format(bias_domain) weight_loc = weight_str.format(bias_domain) preprocessor = pickle.load(open(vectorizer_loc, 'rb')) preprocessor.tokenizer.oov_token = None # TODO check with Byron self.CNN_models[bias_domain] = RationaleCNN(preprocessor, document_model_architecture_path=arch_loc, document_model_weights_path=weight_loc) # Linear domains (these are joint models!) self.linear_sent_clf = MiniClassifier(robotreviewer.get_data('bias/bias_sent_level.npz')) self.linear_doc_clf = MiniClassifier(robotreviewer.get_data('bias/bias_doc_level.npz')) self.linear_vec = ModularVectorizer(norm=None, non_negative=True, binary=True, ngram_range=(1, 2), n_features=2**26) def simple_borda_count(self, a, b, weights=None): ''' Basic Borda count implementation for just two lists. Assumes that a and b are lists of indices sorted in *increasing* preference (so top-ranked sentence should be the last element). ''' rank_scores_dict = defaultdict(int) if weights is None: weights = np.ones(2) # ensure list sizes are equal. note that the CNN # model will always assume/force 200 sentences, # whereas BoW model will not. so here we trim if # necessary, effectively taking the max_index # top sentences from each model and pooling these. a_n, b_n = len(a), len(b) max_index = min(a_n, b_n) a = a[-max_index:] b = b[-max_index:] for i in range(max_index): score = i+1 # 1 ... m rank_scores_dict[a[i]] += weights[0]*score rank_scores_dict[b[i]] += weights[1]*score sorted_indices = sorted(rank_scores_dict.items(), key=operator.itemgetter(1), reverse=True) return [index[0] for index in sorted_indices] def annotate(self, doc_text, top_k=None, threshold=0.5): """ Annotate full text of clinical trial report `top_k` can be overridden here, else defaults to the class default set in __init__ """ log.info('getting top k') top_k = self.top_k if not top_k else top_k doc_len = len(doc_text.text) doc_sents = [sent.text for sent in doc_text.sents] doc_sent_start_i = [sent.start_char for sent in doc_text.sents] doc_sent_end_i = [sent.end_char for sent in doc_text.sents] structured_data = [] #for domain, model in self.models.items(): log.info('starting modeling') for domain in self.all_domains: log.info('STARTING DOMAIN {}'.format(domain)) ### # linear model predictions (all domains) #if type(model) == tuple: # linear model log.info('doing linear predictions') (vec, sent_clf, doc_clf) = (self.linear_vec, self.linear_sent_clf, self.linear_doc_clf) doc_domains = [self.bias_domains[domain]] * len(doc_sents) doc_X_i = zip(doc_sents, doc_domains) vec.builder_clear() vec.builder_add_docs(doc_sents) vec.builder_add_docs(doc_X_i) doc_sents_X = vec.builder_transform() doc_sents_preds = sent_clf.decision_function(doc_sents_X) linear_high_prob_sent_indices = np.argsort(doc_sents_preds) ### # CNN predictions log.info('doing cnn predictions') bias_prob_CNN = None if domain in self.CNN_models: model = self.CNN_models[domain] log.info('model selected for {}'.format(domain)) doc = Document(doc_id=None, sentences=doc_sents) # make consumable for RA-CNN log.info('Doc done {}'.format(domain)) # this never comes back bias_prob_CNN, high_prob_sent_indices_CNN = model.predict_and_rank_sentences_for_doc(doc, num_rationales=len(doc), return_rationale_indices=True) log.info('got probs {}'.format(domain)) high_prob_sent_indices = self.simple_borda_count(high_prob_sent_indices_CNN, linear_high_prob_sent_indices)[:top_k] # and now the overall (doc-level) prediction from the CNN model. # bias_prob = 1 --> low risk # from riskofbias2: # doc_y[mapped_domain] = 1 if domain["RATING"] == "YES" else -1 # # simplifying to LOW risk of bias = 1 *v* HIGH/UNKNOWN risk = -1 #### bias_pred = int(bias_prob_CNN >= threshold) # low risk if True and high/unclear otherwise else: # no aggregation here (since no CNN model for this domain) high_prob_sent_indices = linear_high_prob_sent_indices[-top_k:] high_prob_sent_indices = linear_high_prob_sent_indices[::-1] # put highest prob sentence first #if domain == "BOA": # high_prob_sents_CNN = [doc_sents[i] for i in high_prob_sent_indices_CNN] # Find high probability sentences #from celery.contrib import rdb #rdb.set_trace() high_prob_sents = [doc_sents[i] for i in high_prob_sent_indices] high_prob_start_i = [doc_sent_start_i[i] for i in high_prob_sent_indices] high_prob_end_i = [doc_sent_end_i[i] for i in high_prob_sent_indices] high_prob_prefixes = [doc_text.text[max(0, offset-20):offset] for offset in high_prob_start_i] high_prob_suffixes = [doc_text.text[offset: min(doc_len, offset+20)] for offset in high_prob_end_i] high_prob_sents_j = " ".join(high_prob_sents) # overall pred from linear model vec.builder_clear() vec.builder_add_docs([doc_text.text]) vec.builder_add_docs([(doc_text.text, self.bias_domains[domain])]) sent_domain_interaction = "-s-" + self.bias_domains[domain] vec.builder_add_docs([(high_prob_sents_j, sent_domain_interaction)]) X = vec.builder_transform() bias_prob_linear = doc_clf.predict_proba(X)[0] # if we have a CNN pred, too, then average; otherwise # rely on linear model. bias_prob = bias_prob_linear if bias_prob_CNN is not None: bias_prob = (bias_prob_CNN + bias_prob_linear) / 2.0 bias_pred = int(bias_prob >= threshold) bias_class = ["high/unclear", "low"][bias_pred] # prediction annotation_metadata = [] for sent in zip(high_prob_sents, high_prob_start_i, high_prob_prefixes, high_prob_suffixes): sent_metadata = {"content": sent[0], "position": sent[1], "uuid": str(uuid.uuid1()), "prefix": sent[2], "suffix": sent[3]} annotation_metadata.append(sent_metadata) structured_data.append({"domain": self.bias_domains[domain], "judgement": bias_class, "annotations": annotation_metadata}) return structured_data def pdf_annotate(self, data): log.info('retrieving text') doc_text = data.get('parsed_text') if not doc_text: return data # we've got to know the text at least.. structured_data = self.annotate(doc_text) data.ml["bias"] = structured_data log.info('done predictions, ready to return answers') return data def api_annotate(self, articles): if not all(('parsed_fullText' in article for article in articles)): raise Exception('Bias model requires full text to be able to complete annotation') annotations = [] for article in articles: if article.get('skip_annotation'): annotations.append([]) else: annotations.append(self.annotate(article['parsed_fullText'])) # reformat annotations to API formatting api_domain_titles = { 'Random sequence generation': 'random_sequence_generation', 'Allocation concealment': 'allocation_concealment', 'Blinding of participants and personnel': 'blinding_participants_personnel', 'Blinding of outcome assessment': 'blinding_outcome_assessment'} out = [] for r in annotations: row = {} for b in r: row[api_domain_titles[b['domain']]] = { "judgement": b['judgement'], "annotations": [{"text": an['content'], "start_index":an['position'] } for an in b['annotations']] } out.append(row) return out @staticmethod def get_marginalia(data): """ Get marginalia formatted for Spa from structured data """ marginalia = [] for row in data['bias']: marginalia.append({ "type": "Risk of Bias", "title": row['domain'], "annotations": row['annotations'], "description": "**Overall risk of bias prediction**: {}".format(row['judgement']) }) return marginalia @staticmethod def get_domains(): return [u'Random sequence generation', u'Allocation concealment', u'Blinding of participants and personnel', u'Blinding of outcome assessment'] #u'Incomplete outcome data', #u'Selective reporting']
gpl-3.0
-8,401,132,343,254,432,000
38.781341
236
0.577281
false
3.963114
false
false
false
zork9/pygame-pyMM
bombertoad.py
1
3050
# Copyright (c) 2013 Johan Ceuppens. # All rights reserved. # Redistribution and use in source and binary forms are permitted # provided that the above copyright notice and this paragraph are # duplicated in all such forms and that any documentation, # advertising materials, and other materials related to such # distribution and use acknowledge that the software was developed # by the Johan Ceuppens. The name of the # Johan Ceuppens may not be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # Copyright (C) Johan Ceuppens 2010 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import pygame from pygame.locals import * from gameobject import * from bullet import * from stateimagelibrary import * import random from time import * from math import * from random import * from rng import * class BomberToad(Gameobject): "Dude on Toad throwing Bombs" def __init__(self,xx,yy): Gameobject.__init__(self, xx, yy) self.w = 100 self.h = 100 self.hitpoints = 2 self.yy = yy self.stimlib = Stateimagelibrary() image = pygame.image.load('./pics/bomber-left-1.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-2.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-3.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-4.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) self.counter = 0 def draw(self, screen, room): if randint(0,100) != 100 and self.counter == 0: self.counter = 0 self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, 0) else: self.counter += 1 self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, self.counter) if self.counter >= 3: self.counter = 0 room.gameobjects.append(Bullet(self.x+room.relativex,self.y+room.relativey, "left")) def update(self,room,player): 1 def fight(self,room,player,keydown = -1): 1
gpl-2.0
8,758,164,902,192,934,000
34.882353
95
0.71377
false
3.497706
false
false
false
MadsJensen/agency_connectivity
make_df_hilbert_data.py
1
1383
import numpy as np import pandas as pd import scipy.io as sio from my_settings import * data = sio.loadmat("/home/mje/Projects/agency_connectivity/Data/data_all.mat")[ "data_all"] column_keys = ["subject", "trial", "condition", "shift"] result_df = pd.DataFrame(columns=column_keys) for k, subject in enumerate(subjects): p8_invol_shift = data[k, 3] - np.mean(data[k, 0]) p8_vol_shift = data[k, 2] - np.mean(data[k, 0]) p8_vol_bs_shift = data[k, 1] - np.mean(data[k, 0]) for j in range(89): row = pd.DataFrame([{"trial": int(j), "subject": subject, "condition": "vol_bs", "shift": p8_vol_bs_shift[j + 1][0]}]) result_df = result_df.append(row, ignore_index=True) for j in range(89): row = pd.DataFrame([{"trial": int(j), "subject": subject, "condition": "vol", "shift": p8_vol_shift[j + 1][0]}]) result_df = result_df.append(row, ignore_index=True) for j in range(89): row = pd.DataFrame([{"trial": int(j), "subject": subject, "condition": "invol", "shift": p8_invol_shift[j][0]}]) result_df = result_df.append(row, ignore_index=True)
bsd-3-clause
5,829,760,773,845,085,000
32.731707
79
0.501808
false
3.440299
false
false
false
jdf76/plugin.video.youtube
resources/lib/youtube_plugin/kodion/utils/http_server.py
1
21426
# -*- coding: utf-8 -*- """ Copyright (C) 2018-2018 plugin.video.youtube SPDX-License-Identifier: GPL-2.0-only See LICENSES/GPL-2.0-only for more information. """ from six.moves import BaseHTTPServer from six.moves.urllib.parse import parse_qs, urlparse from six.moves import range import json import os import re import requests import socket import xbmc import xbmcaddon import xbmcgui from .. import logger class YouTubeRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def __init__(self, request, client_address, server): self.addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(self.addon_id) whitelist_ips = addon.getSetting('kodion.http.ip.whitelist') whitelist_ips = ''.join(whitelist_ips.split()) self.whitelist_ips = whitelist_ips.split(',') self.local_ranges = ('10.', '172.16.', '192.168.', '127.0.0.1', 'localhost', '::1') self.chunk_size = 1024 * 64 try: self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id).decode('utf-8') except AttributeError: self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id) BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server) def connection_allowed(self): client_ip = self.client_address[0] log_lines = ['HTTPServer: Connection from |%s|' % client_ip] conn_allowed = client_ip.startswith(self.local_ranges) log_lines.append('Local range: |%s|' % str(conn_allowed)) if not conn_allowed: conn_allowed = client_ip in self.whitelist_ips log_lines.append('Whitelisted: |%s|' % str(conn_allowed)) if not conn_allowed: logger.log_debug('HTTPServer: Connection from |%s| not allowed' % client_ip) else: if self.path != '/ping': logger.log_debug(' '.join(log_lines)) return conn_allowed # noinspection PyPep8Naming def do_GET(self): addon = xbmcaddon.Addon('plugin.video.youtube') dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true' api_config_enabled = addon.getSetting('youtube.api.config.page') == 'true' if self.path == '/client_ip': client_json = json.dumps({"ip": "{ip}".format(ip=self.client_address[0])}) self.send_response(200) self.send_header('Content-Type', 'application/json; charset=utf-8') self.send_header('Content-Length', len(client_json)) self.end_headers() self.wfile.write(client_json.encode('utf-8')) if self.path != '/ping': logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path)) if not self.connection_allowed(): self.send_error(403) else: if dash_proxy_enabled and self.path.endswith('.mpd'): file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\')) file_chunk = True logger.log_debug('HTTPServer: Request file path |{file_path}|'.format(file_path=file_path.encode('utf-8'))) try: with open(file_path, 'rb') as f: self.send_response(200) self.send_header('Content-Type', 'application/xml+dash') self.send_header('Content-Length', os.path.getsize(file_path)) self.end_headers() while file_chunk: file_chunk = f.read(self.chunk_size) if file_chunk: self.wfile.write(file_chunk) except IOError: response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8')) self.send_error(404, response) elif api_config_enabled and self.path == '/api': html = self.api_config_page() html = html.encode('utf-8') self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', len(html)) self.end_headers() for chunk in self.get_chunks(html): self.wfile.write(chunk) elif api_config_enabled and self.path.startswith('/api_submit'): addon = xbmcaddon.Addon('plugin.video.youtube') i18n = addon.getLocalizedString xbmc.executebuiltin('Dialog.Close(addonsettings,true)') old_api_key = addon.getSetting('youtube.api.key') old_api_id = addon.getSetting('youtube.api.id') old_api_secret = addon.getSetting('youtube.api.secret') query = urlparse(self.path).query params = parse_qs(query) api_key = params.get('api_key', [None])[0] api_id = params.get('api_id', [None])[0] api_secret = params.get('api_secret', [None])[0] if api_key and api_id and api_secret: footer = i18n(30638) else: footer = u'' if re.search(r'api_key=(?:&|$)', query): api_key = '' if re.search(r'api_id=(?:&|$)', query): api_id = '' if re.search(r'api_secret=(?:&|$)', query): api_secret = '' updated = [] if api_key is not None and api_key != old_api_key: addon.setSetting('youtube.api.key', api_key) updated.append(i18n(30201)) if api_id is not None and api_id != old_api_id: addon.setSetting('youtube.api.id', api_id) updated.append(i18n(30202)) if api_secret is not None and api_secret != old_api_secret: updated.append(i18n(30203)) addon.setSetting('youtube.api.secret', api_secret) if addon.getSetting('youtube.api.key') and addon.getSetting('youtube.api.id') and \ addon.getSetting('youtube.api.secret'): enabled = i18n(30636) else: enabled = i18n(30637) if not updated: updated = i18n(30635) else: updated = i18n(30631) % u', '.join(updated) html = self.api_submit_page(updated, enabled, footer) html = html.encode('utf-8') self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', len(html)) self.end_headers() for chunk in self.get_chunks(html): self.wfile.write(chunk) elif self.path == '/ping': self.send_error(204) else: self.send_error(501) # noinspection PyPep8Naming def do_HEAD(self): logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path)) if not self.connection_allowed(): self.send_error(403) else: addon = xbmcaddon.Addon('plugin.video.youtube') dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true' if dash_proxy_enabled and self.path.endswith('.mpd'): file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\')) if not os.path.isfile(file_path): response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8')) self.send_error(404, response) else: self.send_response(200) self.send_header('Content-Type', 'application/xml+dash') self.send_header('Content-Length', os.path.getsize(file_path)) self.end_headers() else: self.send_error(501) # noinspection PyPep8Naming def do_POST(self): logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path)) if not self.connection_allowed(): self.send_error(403) elif self.path.startswith('/widevine'): license_url = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_url') license_token = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_token') if not license_url: self.send_error(404) return if not license_token: self.send_error(403) return size_limit = None length = int(self.headers['Content-Length']) post_data = self.rfile.read(length) li_headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'Bearer %s' % license_token } result = requests.post(url=license_url, headers=li_headers, data=post_data, stream=True) response_length = int(result.headers.get('content-length')) content = result.raw.read(response_length) content_split = content.split('\r\n\r\n'.encode('utf-8')) response_header = content_split[0].decode('utf-8', 'ignore') response_body = content_split[1] response_length = len(response_body) match = re.search(r'^Authorized-Format-Types:\s*(?P<authorized_types>.+?)\r*$', response_header, re.MULTILINE) if match: authorized_types = match.group('authorized_types').split(',') logger.log_debug('HTTPServer: Found authorized formats |{authorized_fmts}|'.format(authorized_fmts=authorized_types)) fmt_to_px = {'SD': (1280 * 528) - 1, 'HD720': 1280 * 720, 'HD': 7680 * 4320} if 'HD' in authorized_types: size_limit = fmt_to_px['HD'] elif 'HD720' in authorized_types: if xbmc.getCondVisibility('system.platform.android') == 1: size_limit = fmt_to_px['HD720'] else: size_limit = fmt_to_px['SD'] elif 'SD' in authorized_types: size_limit = fmt_to_px['SD'] self.send_response(200) if size_limit: self.send_header('X-Limit-Video', 'max={size_limit}px'.format(size_limit=str(size_limit))) for d in list(result.headers.items()): if re.match('^[Cc]ontent-[Ll]ength$', d[0]): self.send_header(d[0], response_length) else: self.send_header(d[0], d[1]) self.end_headers() for chunk in self.get_chunks(response_body): self.wfile.write(chunk) else: self.send_error(501) # noinspection PyShadowingBuiltins def log_message(self, format, *args): return def get_chunks(self, data): for i in range(0, len(data), self.chunk_size): yield data[i:i + self.chunk_size] @staticmethod def api_config_page(): addon = xbmcaddon.Addon('plugin.video.youtube') i18n = addon.getLocalizedString api_key = addon.getSetting('youtube.api.key') api_id = addon.getSetting('youtube.api.id') api_secret = addon.getSetting('youtube.api.secret') html = Pages().api_configuration.get('html') css = Pages().api_configuration.get('css') html = html.format(css=css, title=i18n(30634), api_key_head=i18n(30201), api_id_head=i18n(30202), api_secret_head=i18n(30203), api_id_value=api_id, api_key_value=api_key, api_secret_value=api_secret, submit=i18n(30630), header=i18n(30634)) return html @staticmethod def api_submit_page(updated_keys, enabled, footer): addon = xbmcaddon.Addon('plugin.video.youtube') i18n = addon.getLocalizedString html = Pages().api_submit.get('html') css = Pages().api_submit.get('css') html = html.format(css=css, title=i18n(30634), updated=updated_keys, enabled=enabled, footer=footer, header=i18n(30634)) return html class Pages(object): api_configuration = { 'html': u'<!doctype html>\n<html>\n' u'<head>\n\t<meta charset="utf-8">\n' u'\t<title>{title}</title>\n' u'\t<style>\n{css}\t</style>\n' u'</head>\n<body>\n' u'\t<div class="center">\n' u'\t<h5>{header}</h5>\n' u'\t<form action="/api_submit" class="config_form">\n' u'\t\t<label for="api_key">\n' u'\t\t<span>{api_key_head}</span><input type="text" name="api_key" value="{api_key_value}" size="50"/>\n' u'\t\t</label>\n' u'\t\t<label for="api_id">\n' u'\t\t<span>{api_id_head}</span><input type="text" name="api_id" value="{api_id_value}" size="50"/>\n' u'\t\t</label>\n' u'\t\t<label for="api_secret">\n' u'\t\t<span>{api_secret_head}</span><input type="text" name="api_secret" value="{api_secret_value}" size="50"/>\n' u'\t\t</label>\n' u'\t\t<input type="submit" value="{submit}">\n' u'\t</form>\n' u'\t</div>\n' u'</body>\n</html>', 'css': u'body {\n' u' background: #141718;\n' u'}\n' u'.center {\n' u' margin: auto;\n' u' width: 600px;\n' u' padding: 10px;\n' u'}\n' u'.config_form {\n' u' width: 575px;\n' u' height: 145px;\n' u' font-size: 16px;\n' u' background: #1a2123;\n' u' padding: 30px 30px 15px 30px;\n' u' border: 5px solid #1a2123;\n' u'}\n' u'h5 {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u' font-weight: 600;\n' u' width: 575px;\n' u' height: 20px;\n' u' background: #0f84a5;\n' u' padding: 5px 30px 5px 30px;\n' u' border: 5px solid #0f84a5;\n' u' margin: 0px;\n' u'}\n' u'.config_form input[type=submit],\n' u'.config_form input[type=button],\n' u'.config_form input[type=text],\n' u'.config_form textarea,\n' u'.config_form label {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u'}\n' u'.config_form label {\n' u' display:block;\n' u' margin-bottom: 10px;\n' u'}\n' u'.config_form label > span {\n' u' display: inline-block;\n' u' float: left;\n' u' width: 150px;\n' u'}\n' u'.config_form input[type=text] {\n' u' background: transparent;\n' u' border: none;\n' u' border-bottom: 1px solid #147a96;\n' u' width: 400px;\n' u' outline: none;\n' u' padding: 0px 0px 0px 0px;\n' u'}\n' u'.config_form input[type=text]:focus {\n' u' border-bottom: 1px dashed #0f84a5;\n' u'}\n' u'.config_form input[type=submit],\n' u'.config_form input[type=button] {\n' u' width: 150px;\n' u' background: #141718;\n' u' border: none;\n' u' padding: 8px 0px 8px 10px;\n' u' border-radius: 5px;\n' u' color: #fff;\n' u' margin-top: 10px\n' u'}\n' u'.config_form input[type=submit]:hover,\n' u'.config_form input[type=button]:hover {\n' u' background: #0f84a5;\n' u'}\n' } api_submit = { 'html': u'<!doctype html>\n<html>\n' u'<head>\n\t<meta charset="utf-8">\n' u'\t<title>{title}</title>\n' u'\t<style>\n{css}\t</style>\n' u'</head>\n<body>\n' u'\t<div class="center">\n' u'\t<h5>{header}</h5>\n' u'\t<div class="content">\n' u'\t\t<span>{updated}</span>\n' u'\t\t<span>{enabled}</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<div class="textcenter">\n' u'\t\t\t<span><small>{footer}</small></span>\n' u'\t\t</div>\n' u'\t</div>\n' u'\t</div>\n' u'</body>\n</html>', 'css': u'body {\n' u' background: #141718;\n' u'}\n' u'.center {\n' u' margin: auto;\n' u' width: 600px;\n' u' padding: 10px;\n' u'}\n' u'.textcenter {\n' u' margin: auto;\n' u' width: 600px;\n' u' padding: 10px;\n' u' text-align: center;\n' u'}\n' u'.content {\n' u' width: 575px;\n' u' height: 145px;\n' u' background: #1a2123;\n' u' padding: 30px 30px 15px 30px;\n' u' border: 5px solid #1a2123;\n' u'}\n' u'h5 {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u' font-weight: 600;\n' u' width: 575px;\n' u' height: 20px;\n' u' background: #0f84a5;\n' u' padding: 5px 30px 5px 30px;\n' u' border: 5px solid #0f84a5;\n' u' margin: 0px;\n' u'}\n' u'span {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u' display: block;\n' u' float: left;\n' u' width: 575px;\n' u'}\n' u'small {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 12px;\n' u' color: #fff;\n' u'}\n' } def get_http_server(address=None, port=None): addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(addon_id) address = address if address else addon.getSetting('kodion.http.listen') address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '0.0.0.0' port = int(port) if port else 50152 try: server = BaseHTTPServer.HTTPServer((address, port), YouTubeRequestHandler) return server except socket.error as e: logger.log_debug('HTTPServer: Failed to start |{address}:{port}| |{response}|'.format(address=address, port=port, response=str(e))) xbmcgui.Dialog().notification(addon.getAddonInfo('name'), str(e), xbmc.translatePath('special://home/addons/{0!s}/icon.png'.format(addon.getAddonInfo('id'))), 5000, False) return None def is_httpd_live(address=None, port=None): addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(addon_id) address = address if address else addon.getSetting('kodion.http.listen') address = '127.0.0.1' if address == '0.0.0.0' else address address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1' port = int(port) if port else 50152 url = 'http://{address}:{port}/ping'.format(address=address, port=port) try: response = requests.get(url) result = response.status_code == 204 if not result: logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response=response.status_code)) return result except: logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response='failed')) return False def get_client_ip_address(address=None, port=None): addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(addon_id) address = address if address else addon.getSetting('kodion.http.listen') address = '127.0.0.1' if address == '0.0.0.0' else address address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1' port = int(port) if port else 50152 url = 'http://{address}:{port}/client_ip'.format(address=address, port=port) response = requests.get(url) ip_address = None if response.status_code == 200: response_json = response.json() if response_json: ip_address = response_json.get('ip') return ip_address
gpl-2.0
4,763,203,937,845,970,000
41.511905
146
0.519882
false
3.521118
true
false
false
InnovArul/codesmart
Assignments/Jul-Nov-2017/reinforcement_learning_udemy/rl/monte_carlo_soft_epsilon.py
1
3861
from __future__ import print_function import numpy as np from grid import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy import matplotlib.pyplot as plt from monte_carlo_exploring_starts import max_dict EPS = 1e-4 GAMMA = 0.9 ALL_POSSIBLE_ACTIONS = {'U', 'D', 'L', 'R'} def random_action(a, eps=0.1): p = np.random.random() if(p < 1 - eps): return a else: return np.random.choice(list(ALL_POSSIBLE_ACTIONS)) # monte carlo sampling - finding out optimal policy (policy iteration) def play_game(grid, policy): all_states = list(grid.actions.keys()) state = (2, 0) # instead of taking random action at first step, consider the action which is probabilistic with the policy a = random_action(policy[state]) grid.set_state(state) states_actions_rewards = [(state, a, 0)] # action is corresponding to the one which is going to be taken while True: r = grid.move(a) state = grid.current_state() #print(prev_state) # if game over, break the loop if grid.game_over(): states_actions_rewards.append((state, None, r)) # agent has hit the wall and we should not allow it to happen break else: # collect the next action that we are gonna take and insert into the trace a = random_action(policy[state]) states_actions_rewards.append((state, a, r)) # calculate the returns by working backwards from terminal state G = 0 states_actions_returns = [] for i, state_action_reward in enumerate(reversed(states_actions_rewards)): state, action, reward = state_action_reward if i != 0: states_actions_returns.append((state, action, G)) G = reward + GAMMA * G states_actions_returns.reverse() return states_actions_returns def max_dict(hash): max_key = None max_val = float('-inf') for k in hash: if(hash[k] > max_val): max_key, max_val = k, hash[k] return max_key, max_val if __name__ == '__main__': #grid = standard_grid() grid = negative_grid(-0.1) print('grid') print_values(grid.rewards, grid) # init random policy policy = {} for s in grid.actions: policy[s] = np.random.choice(list(ALL_POSSIBLE_ACTIONS)) print('policy') print_policy(policy, grid) # initialioze Q(s, a) Q = {} returns = {} # buffer to hold all the returns for a state during monte-carlo game plays for s in grid.actions: # if state is non terminal Q[s] = {} for a in ALL_POSSIBLE_ACTIONS: # for all the possible actions, initialize Q(s,a) Q[s][a] = 0 returns[(s, a)] = [] # deltas deltas = [] for sample in range(5000): if sample % 500 == 0: print(sample) biggest_change = 0 # generate an episode and adapt Q(s, a) states_actions_returns = play_game(grid, policy) seen_states_actions = set() for s, a, G in states_actions_returns: key = (s, a) if s not in seen_states_actions: old_q = Q[s][a] returns[key].append(G) Q[s][a] = np.mean(returns[key]) seen_states_actions.add(key) biggest_change = max(biggest_change, abs(G - old_q)) deltas.append(biggest_change) # policy improvement for s in Q: policy[s] = max_dict(Q[s])[0] plt.plot(deltas) plt.show() V = {} # policy improvement for s in Q: V[s] = max_dict(Q[s])[1] print('grid') print_values(V, grid) print('policy') print_policy(policy, grid)
gpl-2.0
-5,864,667,669,027,085,000
29.164063
121
0.573168
false
3.645892
false
false
false
naturalness/sensibility
sensibility/language/java/__init__.py
1
6245
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import os import sys import token from io import BytesIO from keyword import iskeyword from pathlib import Path from typing import ( Any, AnyStr, Callable, IO, Iterable, Optional, Tuple, Union, overload, cast ) import javac_parser from .. import Language, SourceSummary from ...lexical_analysis import Lexeme, Location, Position, Token from ...vocabulary import NoSourceRepresentationError, Vocabulary, Vind here = Path(__file__).parent class JavaVocabulary(Vocabulary): """ The vocabulary, except it returns from """ first_entry_num = len(Vocabulary.SPECIAL_ENTRIES) def __init__(self, entries: Iterable[str], reprs: Iterable[str]) -> None: super().__init__(entries) # Create a look-up table for source representations. # The special tokens <unk>, <s>, </s> have NO reprs, thus are not # stored. self._index2repr = tuple(reprs) assert len(self._index2text) == self.first_entry_num + len(self._index2repr) def to_source_text(self, idx: Vind) -> str: if idx < self.first_entry_num: raise NoSourceRepresentationError(idx) return self._index2repr[idx - self.first_entry_num] @staticmethod def load() -> 'JavaVocabulary': entries = [] reprs = [] # Load from a tab-separated-values file with open(here / 'vocabulary.tsv') as vocab_file: first_entry = JavaVocabulary.first_entry_num for expected_num, line in enumerate(vocab_file, start=first_entry): # src_repr -- source representation num, entry, src_repr = line.rstrip().split() assert expected_num == int(num) entries.append(entry) reprs.append(src_repr) return JavaVocabulary(entries, reprs) def to_str(source: Union[str, bytes, IO[bytes]]) -> str: """ Coerce an input format to a Unicode string. """ if isinstance(source, str): return source elif isinstance(source, bytes): # XXX: Assume it's UTF-8 encoded! return source.decode('UTF-8') else: raise NotImplementedError class LazyVocabulary: def __init__(self, fn): self.fn = fn def __get__(self, obj, value): if not hasattr(self, 'value'): self.value = self.fn() return self.value class JavaToken(Token): """ HACK: javac_parser has some... interesting ideas about normalization. so add a `_raw` field to the token. """ # TODO: fix with upstream (javac_parser) to return a sensible value for the normalized value __slots__ = ('_raw',) def __init__(self, *, _raw: str, name: str, value: str, start: Position, end: Position) -> None: super().__init__(name=name, value=value, start=start, end=end) self._raw = _raw def __repr__(self) -> str: cls = type(self).__name__ return (f"{cls}(_raw={self._raw!r}" f"name={self.name!r}, value={self.value!r}, " f"start={self.start!r}, end={self.end!r})") class Java(Language): """ Defines the Java 8 programming language. """ extensions = {'.java'} vocabulary = cast(Vocabulary, LazyVocabulary(JavaVocabulary.load)) @property def java(self): """ Lazily start up the Java server. This decreases the chances of things going horribly wrong when two seperate process initialize the Java language instance around the same time. """ if not hasattr(self, '_java_server'): self._java_server = javac_parser.Java() # Py4j usually crashes as Python is cleaning up after exit() so # decrement the servers' reference count to lessen the chance of # that happening. @atexit.register def remove_reference(): del self._java_server return self._java_server def tokenize(self, source: Union[str, bytes, IO[bytes]]) -> Iterable[Token]: tokens = self.java.lex(to_str(source)) # Each token is a tuple with the following structure # (reproduced from javac_parser.py): # 1. Lexeme type # 2. Value (as it appears in the source file) # 3. A 2-tuple of start line, start column # 4. A 2-tuple of end line, end column # 5. A whitespace-free representation of the value for name, raw_value, start, end, normalized in tokens: # Omit the EOF token, as it's only useful for the parser. if name == 'EOF': continue # Take the NORMALIZED value, as Java allows unicode escapes in # ARBITRARY tokens and then things get hairy here. yield JavaToken(_raw=raw_value, name=name, value=normalized, start=Position(line=start[0], column=start[1]), end=Position(line=end[0], column=end[1])) def check_syntax(self, source: Union[str, bytes]) -> bool: return self.java.get_num_parse_errors(to_str(source)) == 0 def summarize_tokens(self, source: Iterable[Token]) -> SourceSummary: toks = [tok for tok in source if tok.name != 'EOF'] slines = set(line for tok in toks for line in tok.lines) return SourceSummary(n_tokens=len(toks), sloc=len(slines)) def vocabularize_tokens(self, source: Iterable[Token]) -> Iterable[Tuple[Location, str]]: for token in source: yield token.location, token.name java: Language = Java()
apache-2.0
170,523,511,138,746,530
33.888268
100
0.620657
false
3.883706
false
false
false
postlund/pyatv
tests/support/test_dns.py
1
10011
"""Unit tests for pyatv.support.dns""" import io import typing import pytest from pyatv.support import dns @pytest.mark.parametrize( "name,expected", ( ("_http._tcp.local", (None, "_http._tcp", "local")), ("foo._http._tcp.local", ("foo", "_http._tcp", "local")), ("foo.bar._http._tcp.local", ("foo.bar", "_http._tcp", "local")), ), ids=("ptr", "no_dot", "with_dot"), ) def test_happy_service_instance_names(name, expected): assert dns.ServiceInstanceName.split_name(name) == expected @pytest.mark.parametrize( "name", ( "_http.local", "._tcp.local", "_http.foo._tcp.local", "_tcp._http.local", ), ids=("no_proto", "no_service", "split", "reversed"), ) def test_sad_service_instance_names(name): with pytest.raises(ValueError): dns.ServiceInstanceName.split_name(name) # mapping is test_id: tuple(name, expected_raw) encode_domain_names = { "root": (".", b"\x00"), "empty": ("", b"\x00"), "example.com": ("example.com", b"\x07example\x03com\x00"), "example.com_list": (["example", "com"], b"\x07example\x03com\x00"), "unicode": ("Bücher.example", b"\x07B\xc3\xbccher\x07example\x00"), "dotted_instance": ( "Dot.Within._http._tcp.example.local", b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00", ), "dotted_instance_list": ( ["Dot.Within", "_http", "_tcp", "example", "local"], b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00", ), "truncated_ascii": ( ( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" ".test" ), ( b"\x3fabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijk" b"\x04test" b"\x00" ), ), "truncated_unicode": ( ( # The 'a' is at the beginning to force the codepoints to be split at 63 # bytes. The next line is also at the right length to be below 88 characters # even if each kana is counted as a double-width character. Additionally, # this sequence is NF*D* normalized, not NFC (which is what is used for # Net-Unicode). "aがあいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめも" ".test" ), ( b"\x3d" b"a\xe3\x81\x8c\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a" b"\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x93\xe3\x81\x95" b"\xe3\x81\x97\xe3\x81\x99\xe3\x81\x9b\xe3\x81\x9d\xe3\x81\x9f\xe3\x81\xa1" b"\xe3\x81\xa4\xe3\x81\xa6" b"\x04test" b"\x00" ), ), } @pytest.mark.parametrize( "name,expected_raw", [pytest.param(*value, id=key) for key, value in encode_domain_names.items()], ) def test_qname_encode(name, expected_raw): assert dns.qname_encode(name) == expected_raw # mapping is test_id: tuple(raw_name, offset, expected_name, expected_offset) # If expected offset is None, it means len(raw_name), otherwise it's like an array index # (positive is from the beginning, negative from the end) decode_domain_names = { "simple": (b"\x03foo\x07example\x03com\x00", 0, "foo.example.com", None), "null": (b"\00", 0, "", None), "compressed": (b"aaaa\x04test\x00\x05label\xC0\x04\xAB\xCD", 10, "label.test", -2), # This case has two levels of compression "multi_compressed": ( b"aaaa\x04test\x00\x05label\xC0\x04\x03foo\xC0\x0A\xAB\xCD", 18, "foo.label.test", -2, ), # Taken straight from the Internationalized Domain name Wikipedia page "idna": (b"\x0Dxn--bcher-kva\x07example\x00", 0, "bücher.example", None), # Taken from issue #919. Apple puts a non-breaking space between "Apple" and "TV". "nbsp": ( b"\x10Apple\xc2\xa0TV (4167)\x05local\x00", 0, "Apple\xa0TV (4167).local", None, ), # This is a doozy of a test case; it's covering a couple different areas of Unicode, # as well as exercising that DNS-SD allows dots in instance names. "unicode": ( ( b"\x1d\xe5\xb1\x85\xe9\x96\x93 Apple\xc2\xa0TV. En Espa\xc3\xb1ol" b"\x05local" b"\x00" ), 0, "居間 Apple TV. En Español.local", None, ), } @pytest.mark.parametrize( "raw_name,offset,expected_name,expected_offset", [pytest.param(*value, id=key) for key, value in decode_domain_names.items()], ) def test_domain_name_parsing( raw_name: bytes, offset: int, expected_name: str, expected_offset: typing.Optional[int], ): with io.BytesIO(raw_name) as buffer: buffer.seek(offset) name = dns.parse_domain_name(buffer) assert name == expected_name if expected_offset is None: assert buffer.tell() == len(raw_name) else: # if expected_offset is positive, this will wrap around to the beginning, if # it's negative it won't. raw_len = len(raw_name) assert buffer.tell() == (raw_len + expected_offset) % raw_len # mapping is test_id: tuple(encoded_data, expected_data, expected_offset) # If expected offset is None, it means len(raw_name), otherwise it's like an array index # (positive is from the beginning, negative from the end) decode_strings = { "null": (b"\x00", b"", None), # 63 is significant because that's the max length for a domain label, but not a # character-string (they have similar encodings). "len_63": (b"\x3F" + (63 * b"0"), (63 * b"0"), None), # For similar reasons as 63, 64 is significant because it would set only one of the # flag bits for name compression if domain-name encoding is assumed. "len_64": (b"\x40" + (64 * b"0"), (64 * b"0"), None), # Ditto for 128, but the other flag "len_128": (b"\x80" + (128 * b"0"), (128 * b"0"), None), # ...and 192 is both flags "len_192": (b"\xC0" + (192 * b"0"), (192 * b"0"), None), # 255 is the max length a character-string can be "len_255": (b"\xFF" + (255 * b"0"), (255 * b"0"), None), "trailing": (b"\x0A" + (10 * b"2") + (17 * b"9"), (10 * b"2"), -17), } @pytest.mark.parametrize( "encoded_data,expected_data,expected_offset", [pytest.param(*value, id=key) for key, value in decode_strings.items()], ) def test_string_parsing( encoded_data: bytes, expected_data: bytes, expected_offset: typing.Optional[int], ): with io.BytesIO(encoded_data) as buffer: name = dns.parse_string(buffer) assert name == expected_data if expected_offset is None: assert buffer.tell() == len(encoded_data) else: # if expected_offset is positive, this will wrap around to the beginning, if # it's negative it won't. data_len = len(encoded_data) assert buffer.tell() == (data_len + expected_offset) % data_len def test_dns_sd_txt_parse_single(): """Test that a TXT RDATA section with one key can be parsed properly.""" data = b"\x07foo=bar" extra_data = data + b"\xDE\xAD\xBE\xEF" * 3 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"bar"} def test_dns_sd_txt_parse_multiple(): """Test that a TXT RDATA section with multiple keys can be parsed properly.""" data = b"\x07foo=bar\x09spam=eggs" extra_data = data + b"\xDE\xAD\xBE\xEF" * 2 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"bar", "spam": b"eggs"} def test_dns_sd_txt_parse_binary(): """Test that a TXT RDATA section with a binary value can be parsed properly.""" # 0xfeed can't be decoded as UTF-8 or ASCII, so it'll thrown an error if it's not # being treated as binary data. data = b"\x06foo=\xFE\xED" extra_data = data + b"\xDE\xAD\xBE\xEF" * 3 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"\xFE\xED"} def test_dns_sd_txt_parse_long(): """Test that a TXT RDATA section with a long value can be parsed properly.""" # If TXT records are being parsed the same way domain names are, this won't work as # the data is too long to fit in a label. data = b"\xCCfoo=" + b"\xCA\xFE" * 100 extra_data = data + b"\xDE\xAD\xBE\xEF" * 3 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"\xCA\xFE" * 100} @pytest.mark.parametrize( "record_type,data,expected", [ (dns.QueryType.A, b"\x0A\x00\x00\x2A", "10.0.0.42"), (dns.QueryType.PTR, b"\x03foo\x07example\x03com\x00", "foo.example.com"), (dns.QueryType.TXT, b"\x07foo=bar", {"foo": b"bar"}), ( dns.QueryType.SRV, b"\x00\x0A\x00\x00\x00\x50\x03foo\x07example\x03com\x00", { "priority": 10, "weight": 0, "port": 80, "target": "foo.example.com", }, ), ], # Use the name of the record type as the test id ids=( t.name for t in ( dns.QueryType.A, dns.QueryType.PTR, dns.QueryType.TXT, dns.QueryType.SRV, ) ), ) def test_parse_rdata( record_type: dns.QueryType, data: bytes, expected: typing.Any, ): with io.BytesIO(data) as buffer: assert record_type.parse_rdata(buffer, len(data)) == expected assert buffer.tell() == len(data)
mit
8,634,286,590,532,209,000
34.715827
88
0.591903
false
3.068294
true
false
false
Ale-/civics
apps/models/migrations/0028_auto_20170924_1153.py
1
1318
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-09-24 11:53 from __future__ import unicode_literals import apps.models.utils from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('models', '0027_auto_20170922_1554'), ] operations = [ migrations.AlterField( model_name='event', name='image', field=models.ImageField(blank=True, help_text='Sube una imagen representativa del evento haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/events/'), verbose_name='Imagen'), ), migrations.AlterField( model_name='initiative', name='image', field=models.ImageField(blank=True, help_text='Sube una imagen representativa de la iniciativa haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/initiatives/'), verbose_name='Imagen'), ), ]
gpl-3.0
-8,622,433,261,067,904,000
49.230769
394
0.688361
false
3.306329
false
false
false
baixuexue123/djmo
utils/csv_response_.py
1
2274
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import import csv import codecs import cStringIO from django.http import StreamingHttpResponse from django.views.generic import View """ 流式响应StreamingHttpResponse可以快速,节省内存地产生一个大型文件 """ class Echo(object): """An object that implements just the write method of the file-like interface.""" def write(self, value): """Write the value by returning it, instead of storing in a buffer.""" return value class UnicodeWriter(object): """ A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. """ def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwargs): # Redirect output to a queue self.queue = cStringIO.StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwargs) self.stream = f self.encoder = codecs.getincrementalencoder(encoding)() def writerow(self, row): self.writer.writerow([handle_column(s) for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream value = self.stream.write(data) # empty queue self.queue.truncate(0) return value def writerows(self, rows): for row in rows: self.writerow(row) class ExampleView(View): headers = ('一些', '表头') def get(self, request): result = ( ('第一行', '数据1'), ('第二行', '数据2') ) echoer = Echo() writer = UnicodeWriter(echoer) def csv_iterator(): yield codecs.BOM_UTF8 yield writer.writerow(self.headers) for column in result: yield writer.writerow(column) response = StreamingHttpResponse( (row for row in csv_iterator()), content_type="text/csv;charset=utf-8" ) response['Content-Disposition'] = 'attachment;filename="example.csv"' return response
mit
-4,467,090,691,717,978,600
27.25641
85
0.599819
false
3.985533
false
false
false
YaoQ/faceplusplus-demo
hello.py
1
2906
#!/usr/bin/env python2 # Import system libraries and define helper functions import time import sys import os import os.path from pprint import pformat # First import the API class from the SDK from facepp import API from facepp import File def print_result(hint, result): def encode(obj): if type(obj) is unicode: return obj.encode('utf-8') if type(obj) is dict: return {encode(k): encode(v) for (k, v) in obj.iteritems()} if type(obj) is list: return [encode(i) for i in obj] return obj print hint result = encode(result) print '\n'.join([' ' + i for i in pformat(result, width = 75).split('\n')]) def init(): fdir = os.path.dirname(__file__) with open(os.path.join(fdir, 'apikey.cfg')) as f: exec(f.read()) srv = locals().get('SERVER') return API(API_KEY, API_SECRET, srv = srv) # In this tutorial, you will learn how to call Face ++ APIs and implement a # simple App which could recognize a face image in 3 candidates. api = init() # Here are the person names and their face images IMAGE_DIR = 'http://cn.faceplusplus.com/static/resources/python_demo/' PERSONS = [ ('Jim Parsons', IMAGE_DIR + '1.jpg'), ('Leonardo DiCaprio', IMAGE_DIR + '2.jpg'), ('Andy Liu', IMAGE_DIR + '3.jpg') ] TARGET_IMAGE = IMAGE_DIR + '4.jpg' # Step 1: Detect faces in the 3 pictures and find out their positions and # attributes FACES = {name: api.detection.detect(url = url) for name, url in PERSONS} for name, face in FACES.iteritems(): print_result(name, face) # Step 2: create persons using the face_id for name, face in FACES.iteritems(): rst = api.person.create( person_name = name, face_id = face['face'][0]['face_id']) print_result('create person {}'.format(name), rst) # Step 3: create a new group and add those persons in it rst = api.group.create(group_name = 'standard') print_result('create group', rst) rst = api.group.add_person(group_name = 'standard', person_name = FACES.iterkeys()) print_result('add these persons to group', rst) # Step 4: train the model rst = api.train.identify(group_name = 'standard') print_result('train', rst) # wait for training to complete rst = api.wait_async(rst['session_id']) print_result('wait async', rst) # Step 5: recognize face in a new image rst = api.recognition.identify(group_name = 'standard', url = TARGET_IMAGE) print_result('recognition result', rst) print '=' * 60 print 'The person with highest confidence:', \ rst['face'][0]['candidate'][0]['person_name'] # Finally, delete the persons and group because they are no longer needed api.group.delete(group_name = 'standard') api.person.delete(person_name = FACES.iterkeys()) # Congratulations! You have finished this tutorial, and you can continue # reading our API document and start writing your own App using Face++ API! # Enjoy :)
gpl-2.0
-6,944,040,991,084,908,000
31.288889
83
0.67309
false
3.390898
false
false
false
noskill/virt-manager
virtManager/connect.py
1
15892
# # Copyright (C) 2006, 2013 Red Hat, Inc. # Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA. # import os import logging import socket from gi.repository import Gio from gi.repository import GObject from gi.repository import Gtk from . import uiutil from .baseclass import vmmGObjectUI (HV_QEMU, HV_XEN, HV_LXC, HV_QEMU_SESSION, HV_BHYVE) = range(5) (CONN_SSH, CONN_TCP, CONN_TLS) = range(3) def current_user(): try: import getpass return getpass.getuser() except: return "" def default_conn_user(conn): if conn == CONN_SSH: return "root" return current_user() class vmmConnect(vmmGObjectUI): __gsignals__ = { "completed": (GObject.SignalFlags.RUN_FIRST, None, [str, bool]), "cancelled": (GObject.SignalFlags.RUN_FIRST, None, []), } def __init__(self): vmmGObjectUI.__init__(self, "connect.ui", "vmm-open-connection") self.builder.connect_signals({ "on_hypervisor_changed": self.hypervisor_changed, "on_transport_changed": self.transport_changed, "on_hostname_combo_changed": self.hostname_combo_changed, "on_connect_remote_toggled": self.connect_remote_toggled, "on_username_entry_changed": self.username_changed, "on_hostname_changed": self.hostname_changed, "on_cancel_clicked": self.cancel, "on_connect_clicked": self.open_conn, "on_vmm_open_connection_delete_event": self.cancel, }) self.browser = None self.browser_sigs = [] # Set this if we can't resolve 'hostname.local': means avahi # prob isn't configured correctly, and we should strip .local self.can_resolve_local = None # Plain hostname resolve failed, means we should just use IP addr self.can_resolve_hostname = None self.set_initial_state() self.dbus = None self.avahiserver = None try: self.dbus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None) self.avahiserver = Gio.DBusProxy.new_sync(self.dbus, 0, None, "org.freedesktop.Avahi", "/", "org.freedesktop.Avahi.Server", None) except Exception, e: logging.debug("Couldn't contact avahi: %s", str(e)) self.reset_state() @staticmethod def default_uri(always_system=False): if os.path.exists('/var/lib/xen'): if (os.path.exists('/dev/xen/evtchn') or os.path.exists("/proc/xen")): return 'xen:///' if (os.path.exists("/usr/bin/qemu") or os.path.exists("/usr/bin/qemu-kvm") or os.path.exists("/usr/bin/kvm") or os.path.exists("/usr/libexec/qemu-kvm")): if always_system or os.geteuid() == 0: return "qemu:///system" else: return "qemu:///session" return None def cancel(self, ignore1=None, ignore2=None): logging.debug("Cancelling open connection") self.close() self.emit("cancelled") return 1 def close(self, ignore1=None, ignore2=None): logging.debug("Closing open connection") self.topwin.hide() if self.browser: for obj, sig in self.browser_sigs: obj.disconnect(sig) self.browser_sigs = [] self.browser = None def show(self, parent, reset_state=True): logging.debug("Showing open connection") if reset_state: self.reset_state() self.topwin.set_transient_for(parent) self.topwin.present() self.start_browse() def _cleanup(self): pass def set_initial_state(self): self.widget("connect").grab_default() combo = self.widget("hypervisor") model = Gtk.ListStore(str) model.append(["QEMU/KVM"]) model.append(["Xen"]) model.append(["LXC (Linux Containers)"]) model.append(["QEMU/KVM user session"]) if self.config.with_bhyve: model.append(["Bhyve"]) combo.set_model(model) uiutil.set_combo_text_column(combo, 0) combo = self.widget("transport") model = Gtk.ListStore(str) model.append(["SSH"]) model.append(["TCP (SASL, Kerberos)"]) model.append(["SSL/TLS with certificates"]) combo.set_model(model) uiutil.set_combo_text_column(combo, 0) # Hostname combo box entry hostListModel = Gtk.ListStore(str, str, str) host = self.widget("hostname") host.set_model(hostListModel) host.set_entry_text_column(2) hostListModel.set_sort_column_id(2, Gtk.SortType.ASCENDING) def reset_state(self): self.set_default_hypervisor() self.widget("transport").set_active(0) self.widget("autoconnect").set_sensitive(True) self.widget("autoconnect").set_active(True) self.widget("hostname").get_model().clear() self.widget("hostname").get_child().set_text("") self.widget("connect-remote").set_active(False) self.widget("username-entry").set_text("") self.connect_remote_toggled(self.widget("connect-remote")) self.populate_uri() def is_remote(self): # Whether user is requesting a remote connection return self.widget("connect-remote").get_active() def set_default_hypervisor(self): default = self.default_uri(always_system=True) if not default or default.startswith("qemu"): self.widget("hypervisor").set_active(HV_QEMU) elif default.startswith("xen"): self.widget("hypervisor").set_active(HV_XEN) def add_service(self, interface, protocol, name, typ, domain, flags): ignore = flags try: # Async service resolving res = self.avahiserver.ServiceResolverNew("(iisssiu)", interface, protocol, name, typ, domain, -1, 0) resint = Gio.DBusProxy.new_sync(self.dbus, 0, None, "org.freedesktop.Avahi", res, "org.freedesktop.Avahi.ServiceResolver", None) def cb(proxy, sender, signal, args): ignore = proxy ignore = sender if signal == "Found": self.add_conn_to_list(*args) sig = resint.connect("g-signal", cb) self.browser_sigs.append((resint, sig)) except Exception, e: logging.exception(e) def remove_service(self, interface, protocol, name, typ, domain, flags): ignore = domain ignore = protocol ignore = flags ignore = interface ignore = typ try: model = self.widget("hostname").get_model() name = str(name) for row in model: if row[0] == name: model.remove(row.iter) except Exception, e: logging.exception(e) def add_conn_to_list(self, interface, protocol, name, typ, domain, host, aprotocol, address, port, text, flags): ignore = domain ignore = protocol ignore = flags ignore = interface ignore = typ ignore = text ignore = aprotocol ignore = port try: model = self.widget("hostname").get_model() for row in model: if row[2] == str(name): # Already present in list return host = self.sanitize_hostname(str(host)) model.append([str(address), str(host), str(name)]) except Exception, e: logging.exception(e) def start_browse(self): if self.browser or not self.avahiserver: return # Call method to create new browser, and get back an object path for it. interface = -1 # physical interface to use? -1 is unspec protocol = 0 # 0 = IPv4, 1 = IPv6, -1 = Unspecified service = '_libvirt._tcp' # Service name to poll for flags = 0 # Extra option flags domain = "" # Domain to browse in. NULL uses default bpath = self.avahiserver.ServiceBrowserNew("(iissu)", interface, protocol, service, domain, flags) # Create browser interface for the new object self.browser = Gio.DBusProxy.new_sync(self.dbus, 0, None, "org.freedesktop.Avahi", bpath, "org.freedesktop.Avahi.ServiceBrowser", None) def cb(proxy, sender, signal, args): ignore = proxy ignore = sender if signal == "ItemNew": self.add_service(*args) elif signal == "ItemRemove": self.remove_service(*args) self.browser_sigs.append((self.browser, self.browser.connect("g-signal", cb))) def hostname_combo_changed(self, src): model = src.get_model() txt = src.get_child().get_text() row = None for currow in model: if currow[2] == txt: row = currow break if not row: return ip = row[0] host = row[1] entry = host if not entry: entry = ip self.widget("hostname").get_child().set_text(entry) def hostname_changed(self, src_ignore): self.populate_uri() def hypervisor_changed(self, src): is_session = (src.get_active() == HV_QEMU_SESSION) uiutil.set_grid_row_visible( self.widget("session-warning-box"), is_session) uiutil.set_grid_row_visible( self.widget("connect-remote"), not is_session) uiutil.set_grid_row_visible( self.widget("username-entry"), not is_session) uiutil.set_grid_row_visible( self.widget("hostname"), not is_session) uiutil.set_grid_row_visible( self.widget("transport"), not is_session) if is_session: self.widget("connect-remote").set_active(False) self.populate_uri() def username_changed(self, src_ignore): self.populate_uri() def connect_remote_toggled(self, src_ignore): is_remote = self.is_remote() self.widget("hostname").set_sensitive(is_remote) self.widget("transport").set_sensitive(is_remote) self.widget("autoconnect").set_active(not is_remote) self.widget("username-entry").set_sensitive(is_remote) self.populate_default_user() self.populate_uri() def transport_changed(self, src_ignore): self.populate_default_user() self.populate_uri() def populate_uri(self): uri = self.generate_uri() self.widget("uri-entry").set_text(uri) def populate_default_user(self): conn = self.widget("transport").get_active() default_user = default_conn_user(conn) self.widget("username-entry").set_text(default_user) def generate_uri(self): hv = self.widget("hypervisor").get_active() conn = self.widget("transport").get_active() host = self.widget("hostname").get_child().get_text().strip() user = self.widget("username-entry").get_text() is_remote = self.is_remote() hvstr = "" if hv == HV_XEN: hvstr = "xen" elif hv == HV_QEMU or hv == HV_QEMU_SESSION: hvstr = "qemu" elif hv == HV_BHYVE: hvstr = "bhyve" else: hvstr = "lxc" addrstr = "" if user: addrstr += user + "@" addrstr += host hoststr = "" if not is_remote: hoststr = ":///" else: if conn == CONN_TLS: hoststr = "+tls://" if conn == CONN_SSH: hoststr = "+ssh://" if conn == CONN_TCP: hoststr = "+tcp://" hoststr += addrstr + "/" uri = hvstr + hoststr if hv in (HV_QEMU, HV_BHYVE): uri += "system" elif hv == HV_QEMU_SESSION: uri += "session" return uri def validate(self): is_remote = self.is_remote() host = self.widget("hostname").get_child().get_text() if is_remote and not host: return self.err.val_err(_("A hostname is required for " "remote connections.")) return True def open_conn(self, ignore): if not self.validate(): return auto = False if self.widget("autoconnect").get_sensitive(): auto = self.widget("autoconnect").get_active() uri = self.generate_uri() logging.debug("Generate URI=%s, auto=%s", uri, auto) self.close() self.emit("completed", uri, auto) def sanitize_hostname(self, host): if host == "linux" or host == "localhost": host = "" if host.startswith("linux-"): tmphost = host[6:] try: long(tmphost) host = "" except ValueError: pass if host: host = self.check_resolve_host(host) return host def check_resolve_host(self, host): # Try to resolve hostname # # Avahi always uses 'hostname.local', but for some reason # fedora 12 out of the box can't resolve '.local' names # Attempt to resolve the name. If it fails, remove .local # if present, and try again if host.endswith(".local"): if self.can_resolve_local is False: host = host[:-6] elif self.can_resolve_local is None: try: socket.getaddrinfo(host, None) except: logging.debug("Couldn't resolve host '%s'. Stripping " "'.local' and retrying.", host) self.can_resolve_local = False host = self.check_resolve_host(host[:-6]) else: self.can_resolve_local = True else: if self.can_resolve_hostname is False: host = "" elif self.can_resolve_hostname is None: try: socket.getaddrinfo(host, None) except: logging.debug("Couldn't resolve host '%s'. Disabling " "host name resolution, only using IP addr", host) self.can_resolve_hostname = False else: self.can_resolve_hostname = True return host
gpl-2.0
1,945,841,404,944,853,200
32.527426
80
0.543418
false
4.083248
false
false
false
sravangottapu/Ip_Scanner
ip_scanner.py
1
1187
import threading import time import os import re import shlex import _thread import sys import subprocess alive = True f = open("list.txt","w") class myThread(threading.Thread): def __init__(self,var,ip): threading.Thread.__init__(self) self.var = var self.ip = ip def run(self): if(alive): ping_ip(self.var,self.ip) #self._stop.set() print("Thread Exited") def ping_ip(cmd,ip): try: output = subprocess.check_output(cmd) f.write(ip) f.write("\n") print(ip + "Reachable") except: print(ip + "Not Reachable") first = input("Enter the first Ip") second = input("Enter the second Ip") first = int(first) second = int(second) ping = "ping " c1 = "-c1 " start = time.time() cmd_no_ip = ping + c1 t_end = time.time() + 2 for i in range(first,second): ip = "172.16.114."+str(i) cmd = cmd_no_ip + ip cmd = shlex.split(cmd) try: thread1 = myThread(cmd,ip) thread1.start() thread1.join(1) except: print("Not thread" + ip) end = time.time() end = end - start alive = False print("Total Time" + str(end)) sys.exit() quit()
gpl-3.0
6,060,178,347,985,288,000
21.396226
45
0.581297
false
3.131926
false
false
false
mbj4668/pyang
pyang/plugins/threegpp.py
1
11115
"""3GPP usage guidelines plugin See 3GPP TS 32.160 clause 6.2 Copyright Ericsson 2020 Author balazs.lengyel@ericsson.com Revision 2020-11-25 Checks implemented 6.2.1.2 Module name starts with _3gpp- 6.2.1.3 namespace pattern urn:3gpp:sa5:<module-name> 6.2.1.4-a prefix ends with 3gpp 6.2.1.4-b prefix.length <= 10 char 6.2.1.5 yang 1.1 missing 6.2.1.5 yang 1.1 incorrect 6.2.1.6-a anydata 6.2.1.6-b anyxml 6.2.1.6-c rpc 6.2.1.6-d deviation 6.2.1.9 description not needed for enum, bit, choice, container, leaf-list, leaf, typedef, grouping, augment, uses 6.2.1.b-a module-description-missing 6.2.1.b-b module-organization-missing 6.2.1.b-c module-organization includes 3gpp 6.2.1.b-d module-contact-missing 6.2.1.b-d module-contact-incorrect 6.2.1.c module-reference-missing 6.2.1.c module-reference-incorrect 6.2.1.d-a module-revision-missing 6.2.1.d-a module-revision-reference-missing 6.2.1.e default meaning 6.2.1.f-a linelength > 80 6.2.1.f-b no-tabs 6.2.1.f-c no-strange-chars 6.2.1.f-d no-CR-chars 6.2-a no-containers """ import optparse import re import io import sys from pyang import plugin from pyang import statements from pyang import error from pyang.error import err_add from pyang.plugins import lint def pyang_plugin_init(): plugin.register_plugin(THREEGPPlugin()) class THREEGPPlugin(lint.LintPlugin): def __init__(self): lint.LintPlugin.__init__(self) self.modulename_prefixes = ['_3gpp'] def add_opts(self, optparser): optlist = [ optparse.make_option("--3gpp", dest="threegpp", action="store_true", help="Validate the module(s) according to " \ "3GPP rules."), ] optparser.add_options(optlist) def setup_ctx(self, ctx): if not ctx.opts.threegpp: return self._setup_ctx(ctx) error.add_error_code( '3GPP_BAD_NAMESPACE_VALUE', 3, '3GPP: the namespace should be urn:3gpp:sa5:%s') statements.add_validation_fun( 'grammar', ['namespace'], lambda ctx, s: self.v_chk_namespace(ctx, s)) error.add_error_code( '3GPP_BAD_PREFIX_VALUE', 3, '3GPP: the prefix should end with 3gpp') error.add_error_code( '3GPP_TOO_LONG_PREFIX', 3, '3GPP: the prefix should not be longer than 13 characters') statements.add_validation_fun( 'grammar', ['prefix'], lambda ctx, s: self.v_chk_prefix(ctx, s)) error.add_error_code( '3GPP_BAD_YANG_VERSION', 3, '3GPP: the yang-version should be 1.1') statements.add_validation_fun( 'grammar', ['yang-version'], lambda ctx, s: self.v_chk_yang_version(ctx, s)) # check that yang-version is present. If not, # it defaults to 1. which is bad for 3GPP statements.add_validation_fun( 'grammar', ['module'], lambda ctx, s: self.v_chk_yang_version_present(ctx, s)) error.add_error_code( '3GPP_STATEMENT_NOT_ALLOWED', 3, ('3GPP: YANG statements anydata, anyxml, deviation, rpc ' 'should not be used')) statements.add_validation_fun( 'grammar', ['anydata' , 'anyxml' , 'deviation' , 'rpc'], lambda ctx, s: self.v_chk_not_allowed_statements(ctx, s)) error.add_error_code( '3GPP_BAD_ORGANIZATION', 3, '3GPP: organization statement must include 3GPP') statements.add_validation_fun( 'grammar', ['organization'], lambda ctx, s: self.v_chk_organization(ctx, s)) error.add_error_code( '3GPP_BAD_CONTACT', 3, '3GPP: incorrect contact statement') statements.add_validation_fun( 'grammar', ['contact'], lambda ctx, s: self.v_chk_contact(ctx, s)) error.add_error_code( '3GPP_MISSING_MODULE_REFERENCE', 3, '3GPP: the module should have a reference substatement') statements.add_validation_fun( 'grammar', ['module'], lambda ctx, s: self.v_chk_module_reference_present(ctx, s)) error.add_error_code( '3GPP_BAD_MODULE_REFERENCE', 3, '3GPP: the module\'s reference substatement is incorrect') statements.add_validation_fun( 'grammar', ['reference'], lambda ctx, s: self.v_chk_module_reference(ctx, s)) error.add_error_code( '3GPP_TAB_IN_FILE', 3, '3GPP: tab characters should not be used in YANG modules') error.add_error_code( '3GPP_WHITESPACE_AT_END_OF_LINE', 3, '3GPP: extra whitespace should not be added at the end of the line') error.add_error_code( '3GPP_LONG_LINE', 3, '3GPP: line longer than 80 characters') error.add_error_code( '3GPP_CR_IN_FILE', 3, ('3GPP: Carriage-return characters should not be used. ' 'End-of-line should be just one LF character')) error.add_error_code( '3GPP_NON_ASCII', 4, '3GPP: the module should only use ASCII characters') statements.add_validation_fun( 'grammar', ['module'], lambda ctx, s: self.v_chk_3gpp_format(ctx, s)) error.add_error_code( '3GPP_LIMITED_CONTAINER_USE', 4, ('3GPP: containers should only be used to contain the attributes ' 'of a class')) statements.add_validation_fun( 'grammar', ['container'], lambda ctx, s: self.v_chk_limited_container_use(ctx, s)) def pre_validate_ctx(self, ctx, modules): if ctx.opts.threegpp: ctx.canonical = False return def v_chk_namespace(self, ctx, stmt): r = 'urn:3gpp:sa5:' + stmt.i_module.arg +'$' if re.match(r, stmt.arg) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_NAMESPACE_VALUE', stmt.i_module.arg) def v_chk_prefix(self, ctx, stmt): if stmt.parent.keyword != 'module' : return r = '.+3gpp$' if re.match(r, stmt.arg) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_PREFIX_VALUE',()) if len(stmt.arg) > 13 : err_add(ctx.errors, stmt.pos, '3GPP_TOO_LONG_PREFIX',()) def v_chk_yang_version_present(self, ctx, stmt): yang_version_present = False for stmt in stmt.substmts: if stmt.keyword == 'yang-version' : yang_version_present = True if not(yang_version_present) : err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',()) def v_chk_yang_version(self, ctx, stmt): r = '1.1' if re.match(r, stmt.arg) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',()) def v_chk_not_allowed_statements(self, ctx, stmt): err_add(ctx.errors, stmt.pos, '3GPP_STATEMENT_NOT_ALLOWED',()) def v_chk_organization(self, ctx, stmt): r = '3GPP' if re.search(r, stmt.arg, re.IGNORECASE) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_ORGANIZATION',()) def v_chk_contact(self, ctx, stmt): if stmt.arg != ('https://www.3gpp.org/DynaReport/' 'TSG-WG--S5--officials.htm?Itemid=464'): err_add(ctx.errors, stmt.pos, '3GPP_BAD_CONTACT',()) def v_chk_module_reference_present(self, ctx, stmt): module_reference_present = False for stmt in stmt.substmts: if stmt.keyword == 'reference' : module_reference_present = True if not(module_reference_present) : err_add(ctx.errors, stmt.pos, '3GPP_MISSING_MODULE_REFERENCE',()) def v_chk_module_reference(self, ctx, stmt): if stmt.parent.keyword != 'module' : return if not(stmt.arg.startswith('3GPP TS ')) : err_add(ctx.errors, stmt.pos, '3GPP_BAD_MODULE_REFERENCE',()) def v_chk_3gpp_format(self, ctx, stmt): if (not(stmt.arg.startswith("_3gpp"))): return filename = stmt.pos.ref try: fd = io.open(filename, "r", encoding="utf-8", newline='') pos = error.Position(stmt.pos.ref) pos.top = stmt lineno = 0 for line in fd: lineno += 1 pos.line = lineno # no tabs if (line.find('\t') != -1 ): err_add(ctx.errors, pos, '3GPP_TAB_IN_FILE',()) # no whitespace after the line # removed for now as there are just too many of these # errors # if (re.search('.*\s+\n',line) != None ): # err_add(ctx.errors, self.pos, # '3GPP_WHITESPACE_AT_END_OF_LINE',()) # lines shorter then 80 char if (len(line) > 82 ): err_add(ctx.errors, pos, '3GPP_LONG_LINE',()) # EOL should be just NL no CR if (line.find('\r') != -1 ): err_add(ctx.errors, pos, '3GPP_CR_IN_FILE',()) # only us-ascii chars try: line.encode('ascii') except UnicodeEncodeError: err_add(ctx.errors, pos, '3GPP_NON_ASCII',()) except IOError as ex: sys.stderr.write("error %s: %s\n" % (filename, ex)) sys.exit(1) except UnicodeDecodeError as ex: s = str(ex).replace('utf-8', 'utf8') sys.stderr.write("%s: unicode error: %s\n" % (filename, s)) sys.exit(1) def v_chk_limited_container_use(self, ctx, stmt): if stmt.arg != 'attributes' or stmt.parent.keyword != 'list' : err_add(ctx.errors, stmt.pos, '3GPP_LIMITED_CONTAINER_USE',()) def post_validate_ctx(self, ctx, modules): if not ctx.opts.threegpp: return """Remove some lint errors that 3GPP considers acceptable""" for ctx_error in ctx.errors[:]: if ((ctx_error[1] == "LINT_MISSING_REQUIRED_SUBSTMT" or ctx_error[1] == "LINT_MISSING_RECOMMENDED_SUBSTMT") and ctx_error[2][2] == 'description' and (ctx_error[2][1] == 'enum' or ctx_error[2][1] == 'bit' or ctx_error[2][1] == 'choice' or ctx_error[2][1] == 'container' or ctx_error[2][1] == 'leaf-list' or ctx_error[2][1] == 'leaf' or ctx_error[2][1] == 'typedef' or ctx_error[2][1] == 'grouping' or ctx_error[2][1] == 'augment' or ctx_error[2][1] == 'uses')): # remove error from ctx ctx.errors.remove(ctx_error) return
isc
1,029,402,971,902,015,200
34.970874
79
0.544939
false
3.382532
false
false
false
silvio/elbe
elbepack/xmldefaults.py
1
4019
import random import string import sys armel_defaults = { "arch": "armel", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb", "nicmodel": "smc91c111" } armel_virtio_defaults = { "arch": "armel", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm-virtio", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb", "nicmodel": "smc91c111" } armhf_defaults = { "arch": "armhf", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb -cpu cortex-a9", "nicmodel": "smc91c111" } armhf_virtio_defaults = { "arch": "armhf", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm-virtio", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb -cpu cortex-a9", "nicmodel": "virtio" } ppc_defaults = { "arch": "powerpc", "size": "20G", "mem": "256", "interpreter": "qemu-system-ppc", "userinterpr": "qemu-ppc-static", "console": "ttyPZ0,115200n1", "machine": "mac99", "nicmodel": "rtl8139" } amd64_defaults = { "arch": "amd64", "size": "20G", "mem": "1024", "interpreter": "kvm", "console": "ttyS0,115200n1", "machine": "pc", "nicmodel": "virtio" } i386_defaults = { "arch": "i386", "size": "20G", "mem": "1024", "interpreter": "kvm", "console": "ttyS0,115200n1", "machine": "pc", "nicmodel": "virtio" } defaults = { "armel": armel_defaults, "armel-virtio": armel_virtio_defaults, "armhf": armhf_defaults, "armhf-virtio": armhf_virtio_defaults, "ppc": ppc_defaults, "amd64": amd64_defaults, "i386": i386_defaults, "nodefaults": {} } xml_field_path = { "arch": "project/buildimage/arch", "size": "project/buildimage/size", "mem": "project/buildimage/mem", "interpreter": "project/buildimage/interpreter", "console": "project/buildimage/console", "machine": "project/buildimage/machine", "nicmodel": "project/buildimage/NIC/model" } def get_random_mac(): binaddr = [random.randint(0,256) for i in range(6) ] binaddr[0] &= 0xfe binaddr[0] |= 0x02 s = map( lambda x: "%02x" % x, binaddr ) return string.join( s, ":" ) class ElbeDefaults(object): def __init__(self, build_type): if not defaults.has_key(build_type): print "Please specify a valid buildtype." print "Valid buildtypes:" print defaults.keys() sys.exit(20) self.defaults = defaults[build_type] self.defaults["nicmac"] = get_random_mac() def __getitem__( self, key ): if self.defaults.has_key( key ): return self.defaults[key] print "No Default value has been Provided" print "Either use a valid buildtype, or provide the field in the xml File." print "The location in the xml is here:" print xml_field_path[key] sys.exit(20)
gpl-3.0
1,458,241,890,226,960,400
28.123188
91
0.464543
false
3.497824
false
false
false
shaarli/python-shaarli-client
setup.py
1
2012
#!/usr/bin/env python3 """Setup script for shaarli-client""" import codecs import os import re from setuptools import find_packages, setup def get_long_description(): """Reads the main README.rst to get the program's long description""" with codecs.open('README.rst', 'r', 'utf-8') as f_readme: return f_readme.read() def get_package_metadata(attribute): """Reads metadata from the main package's __init__""" with open(os.path.join('shaarli_client', '__init__.py'), 'r') as f_init: return re.search( r'^__{attr}__\s*=\s*[\'"]([^\'"]*)[\'"]'.format(attr=attribute), f_init.read(), re.MULTILINE ).group(1) setup( name=get_package_metadata('title'), version=get_package_metadata('version'), description=get_package_metadata('brief'), long_description=get_long_description(), author=get_package_metadata('author'), maintainer='VirtualTam', maintainer_email='virtualtam@flibidi.net', license='MIT', url='https://github.com/shaarli/python-shaarli-client', keywords='bookmark bookmarking shaarli social', packages=find_packages(exclude=['tests.*', 'tests']), entry_points={ 'console_scripts': [ 'shaarli = shaarli_client.main:main', ], }, install_requires=[ 'requests >= 2.25', 'pyjwt == 2.0.1' ], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Topic :: Utilities', ] )
mit
-5,212,573,942,387,458,000
31.451613
76
0.598907
false
3.839695
false
false
false
de-tour/detour
server/handling.py
1
6094
import cherrypy from cherrypy.lib.static import serve_file from cherrypy.process.plugins import SimplePlugin from queue import Queue, Empty from collections import namedtuple from concurrent import Crawler import parsing import json import traceback import random from urllib.parse import unquote from ws4py.websocket import WebSocket from ws4py.messaging import TextMessage PoolItem = namedtuple('PoolItem', ['verb', 'args', 'output']) class Search: def __init__(self): self.engines_suggest = [] self.engines_search = [] self.add_engines(parsing.sites) self.pool_suggest = Crawler(cls_list=self.engines_suggest) self.pool_search = Crawler(cls_list=self.engines_search) def start(self): self.pool_suggest.start() self.pool_search.start() def add_engines(self, engines): for Engine in engines: if parsing.is_balancer(Engine): self.add_engines(Engine.balance()) else: if parsing.can_suggest(Engine): self.engines_suggest.append(Engine) if parsing.can_search(Engine): self.engines_search.append(Engine) def stop(self): self.pool_suggest.stop() self.pool_search.stop() def suggest(self, keyword): if not keyword: yield [] return output = Queue() k = len(self.engines_suggest) // 2 for engine in random.sample(self.engines_suggest, k): self.pool_suggest.put(engine, PoolItem('suggest', (keyword,), output)) failure = 0 result_set = set() while failure < 1: try: result_set.update(output.get(timeout=1)) except Empty: failure += 1 ordered_results = parsing.rank_list(result_set, keyword)[0:10] result_set = set(ordered_results) yield ordered_results def search(self, keyword, from_id): if not keyword: yield [] return output = Queue() for engine in self.engines_search: if not parsing.is_meta(engine): self.pool_search.put(engine, PoolItem('search', (keyword, from_id + 1, None), output)) else: for site in parsing.domains: filtered = engine.site_filter(site, keyword) self.pool_search.put(engine, PoolItem('search', (filtered, from_id + 1, None), output)) failure = 0 result_set = set() while failure < 5: try: new_results = set(output.get(timeout=1)) print('Search %s: %d unique results' % (repr(keyword), len(result_set))) yield parsing.rank_list(new_results - result_set, keyword) result_set.update(new_results) except Empty: failure += 1 class WSHandler(WebSocket): def opened(self): cherrypy.engine.log('WebSocket opened') def received_message(self, msg): cherrypy.engine.log('Received ' + str(msg)) try: params = json.loads(str(msg)) verb = params['verb'] if verb == 'suggest': self.ws_suggest(unquote(params['keyword'])) elif verb == 'search': self.ws_search(unquote(params['keyword']), params['from_id']) else: raise ValueError('Unknown verb. (suggest, serach)') except (KeyError, AttributeError, TypeError, ValueError) as e: cherrypy.engine.log('Handler Exception - %s' % repr(e)) cherrypy.engine.log(traceback.format_exc()) def closed(self, code, reason): cherrypy.engine.log('A client left') def ws_suggest(self, keyword): results = Queue() cherrypy.engine.publish('detour_suggest', keyword, results) generator = results.get() for item in generator: if item: msg = json.dumps({'from': keyword, 'results': item}) cherrypy.engine.publish('websocket-broadcast', msg) def ws_search(self, keyword, from_id): results = Queue() cherrypy.engine.publish('detour_search', keyword, from_id, results) generator = results.get() for r_list in generator: if r_list: d = { 'results': [r.items() for r in r_list], 'keyword': keyword, 'from_id': from_id, } cherrypy.engine.publish('websocket-broadcast', json.dumps(d)) class Daemon(SimplePlugin): def __init__(self, bus): SimplePlugin.__init__(self, bus) def start(self): self.bus.log('Daemon plugin starts') self.priority = 70 self.search_daemon = Search() self.search_daemon.start() self.bus.subscribe('detour_suggest', self.suggest_handler) self.bus.subscribe('detour_search', self.search_handler) def stop(self): self.bus.unsubscribe('detour_suggest', self.suggest_handler) self.bus.unsubscribe('detour_search', self.search_handler) self.search_daemon.stop() self.bus.log('Daemon plugin stops') def suggest_handler(self, keyword, bucket): self.bus.log('Suggest ' + repr(keyword)) generator = self.search_daemon.suggest(keyword) print("suggest_handler: got generator") bucket.put(generator) def search_handler(self, keyword, from_id, bucket): self.bus.log('Search ' + repr(keyword) + ' from ID ' + repr(from_id)) generator = self.search_daemon.search(keyword, from_id) print("search_handler: got generator") bucket.put(generator) class Detour: def __init__(self, public): self.public = public @cherrypy.expose def index(self, q=None): return serve_file(self.public + '/index.html') @cherrypy.expose def ws(self): handler = cherrypy.request.ws_handler cherrypy.log("Handler created: %s" % repr(handler))
gpl-3.0
3,814,767,568,741,064,000
32.119565
107
0.582212
false
4.154056
false
false
false
niklasberglund/freesprints
source/freesprints/__init__.py
1
9191
import pygame, sys import pygame.font from pygame.locals import * import logging import fs_menu import helpers as helpers import plugins import os.path import race import hardware import defaults import logging from rainbow_logging_handler import RainbowLoggingHandler DISPLAY_RESOLUTION = (1024, 768) # platform-specific imports if helpers.is_running_on_rpi():# running on Raspberry Pi import RPi.GPIO import os print "ON RASPBERRY PI" #os.environ['SDL_VIDEODRIVER']="fbcon" #os.environ["SDL_FBDEV"] = "/dev/fb1" print "SET DRIVER" else: # running on computer import FakeRPi.GPIO class Application(object): instance = None state = None # application state constants STATE_MAINMENU = 0 STATE_INGAME = 1 # member variables window_surface = None menu_surface = None menu = None state = STATE_MAINMENU plugin_loader = None roller_controller = None race_options = None race_object = None selected_plugin_index = 0 # 0 by default. this should ideally be restored from stored settings def __init__(self): print "Application.__init__" pygame.font.init() menu_options_dict = { "font_path": "fonts/Cave-Story.ttf", "font_size": 42, "color_background": (0, 0, 0), "color_text": (255, 255, 255), "color_text_highlight": (100, 20, 45) } menu_structure = [ { "title": "New race", "callback": self.start_race, "submenu": [ { "title": "Start", "callback": self.start_race }, { "title": "Race visualizer", "callback": None, "submenu_populator_callback": self.populate_visualizers, "identifier": "race_visualizer_selection" }, { "title": "Number of rollers", "input": { "type": "int", "verifier": None, "value": "2" }, "callback": self.start_race }, { "title": "Roller diameter(mm)", "input": { "type": "int", "verifier": None, "value": "200" } } ] }, { "title": "Options", "callback": self.show_options }, { "title": "Exit", "callback": self.exit } ] #self.window_surface = pygame.display.set_mode((500, 400), pygame.FULLSCREEN, 32) pygame.display.init() self.window_surface = pygame.display.set_mode(defaults.RESOLUTION, 0, 32) menu_options = fs_menu.MenuOptions(menu_options_dict) self.menu = fs_menu.Menu(self.window_surface, menu_structure, menu_options) self.roller_controller = hardware.RollerController() def load_plugins(self): self.plugin_loader = plugins.PluginLoader() def start_race(self): print "start game" self.state = self.STATE_INGAME race_options = race.Options() race_participants = ([ race.Participant("Niklas", 7, Color("red")), race.Participant("Some loser", 11, Color("blue")) ]) self.race_object = race.Race(race_options, race_participants) plugins = self.plugin_loader.getAvailablePlugins() self.race_object.start() plugins[self.selected_plugin_index].start(self.race_object) def show_options(self): print "show options" def populate_visualizers(self): print "populate_visualizers" submenu = [] pluginIndex = 0 for plugin in self.plugin_loader.getAvailablePlugins(): submenu.append({ "title": plugin.name, "callback": self.select_plugin, "tag": pluginIndex }) pluginIndex = pluginIndex + 1 return submenu def select_plugin(self, plugin_index): print "selected plugin with index " + str(plugin_index) self.selected_plugin_index = plugin_index def exit(self): pygame.quit() sys.exit() def hide(self): pass def get_window_surface(self): return self.window_surface def game_loop(self): # run the game loop while True: for event in pygame.event.get(): if event.type == pygame.locals.QUIT: self.exit() elif event.type == pygame.locals.KEYUP: if self.state == self.STATE_MAINMENU: self.menu.registerKeypress(event.key) elif event.key == pygame.locals.K_ESCAPE: self.exit() def start(self): # set up pygame pygame.init() pygame.font.init() if helpers.is_running_on_rpi(): disp_no = os.getenv("DISPLAY") if disp_no: print "I'm running under X display = {0}".format(disp_no) # Check which frame buffer drivers are available # Start with fbcon since directfb hangs with composite output drivers = ['fbcon', 'directfb', 'svgalib'] found = False for driver in drivers: # Make sure that SDL_VIDEODRIVER is set if not os.getenv('SDL_VIDEODRIVER'): os.putenv('SDL_VIDEODRIVER', driver) try: pygame.display.init() except pygame.error: print 'Driver: {0} failed.'.format(driver) continue found = True break if not found: raise Exception('No suitable video driver found!') size = (pygame.display.Info().current_w, pygame.display.Info().current_h) print "Framebuffer size: %d x %d" % (size[0], size[1]) #self.window_surface = pygame.display.set_mode(size, pygame.FULLSCREEN) # set up the window pygame.display.set_caption('Freesprints') # set up the colors BLACK = (0, 0, 0) WHITE = (255, 255, 255) RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) # set up fonts #availableFonts = pygame.font.get_fonts() font_path = "./fonts/Cave-Story.ttf" #basicFont = pygame.font.SysFont(None, 30) basicFont = pygame.font.Font(font_path, 48) # set up the text #text = basicFont.render('asdasd', True, WHITE, BLUE) #textRect = text.get_rect() #textRect.centerx = self.window_surface.get_rect().centerx #textRect.centery = self.window_surface.get_rect().centery # draw the white background onto the surface self.window_surface.fill(BLACK) # draw a green polygon onto the surface #pygame.draw.polygon(self.window_surface, GREEN, ((146, 0), (291, 106), (236, 277), (56, 277), (0, 106))) # draw some blue lines onto the surface #pygame.draw.line(self.window_surface, BLUE, (60, 60), (120, 60), 4) #pygame.draw.line(self.window_surface, BLUE, (120, 60), (60, 120)) #pygame.draw.line(self.window_surface, BLUE, (60, 120), (120, 120), 4) # draw a blue circle onto the surface #pygame.draw.circle(self.window_surface, BLUE, (300, 50), 20, 0) # draw a red ellipse onto the surface #pygame.draw.ellipse(self.window_surface, RED, (450, 160, 40, 80), 1) # menu background background = pygame.image.load('images/menu_background.png').convert() backgroundRect = background.get_rect() backgroundRect.x = 0 backgroundRect.y = 0 self.window_surface.blit(background, backgroundRect) # draw the window onto the screen pygame.display.update() self.menu.render() self.game_loop() app = None logger = None def get_app(): global app if app == None: app = Application() return app def get_logger(): global logger if logger == None: logger = logging.getLogger('freesprints') logger.setLevel(logging.DEBUG) formatter = logging.Formatter("[%(asctime)s] %(name)s %(funcName)s():%(lineno)d\t%(message)s") # same as default # setup colored logging handler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'yellow', True)) handler.setFormatter(formatter) logger.addHandler(handler) return logger def init(): global app print "start" app = get_app() app.load_plugins() app.start()
mit
-8,094,526,841,021,716,000
29.842282
121
0.527581
false
4.20064
false
false
false
pburdet/hyperspy
hyperspy/_signals/eds.py
1
21939
# -*- coding: utf-8 -*- # Copyright 2007-2011 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. from __future__ import division import numpy as np from hyperspy import utils from hyperspy._signals.spectrum import Spectrum from hyperspy.misc.elements import elements as elements_db from hyperspy.misc.eds import utils as utils_eds from hyperspy.misc.utils import isiterable class EDSSpectrum(Spectrum): _signal_type = "EDS" def __init__(self, *args, **kwards): Spectrum.__init__(self, *args, **kwards) if self.metadata.Signal.signal_type == 'EDS': print('The microscope type is not set. Use ' 'set_signal_type(\'EDS_TEM\') or set_signal_type(\'EDS_SEM\')') self.metadata.Signal.binned = True def _get_line_energy(self, Xray_line, FWHM_MnKa=None): """ Get the line energy and the energy resolution of a Xray line. The return values are in the same units than the signal axis Parameters ---------- Xray_line : strings Valid element X-ray lines e.g. Fe_Kb. FWHM_MnKa: {None, float, 'auto'} The energy resolution of the detector in eV if 'auto', used the one in 'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa' Returns ------ float: the line energy, if FWHM_MnKa is None (float,float): the line energy and the energy resolution, if FWHM_MnKa is not None """ units_name = self.axes_manager.signal_axes[0].units if FWHM_MnKa == 'auto': if self.metadata.Signal.signal_type == 'EDS_SEM': FWHM_MnKa = self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa elif self.metadata.Signal.signal_type == 'EDS_TEM': FWHM_MnKa = self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa else: raise NotImplementedError( "This method only works for EDS_TEM or EDS_SEM signals. " "You can use `set_signal_type(\"EDS_TEM\")` or" "`set_signal_type(\"EDS_SEM\")` to convert to one of these" "signal types.") line_energy = utils_eds._get_energy_xray_line(Xray_line) if units_name == 'eV': line_energy *= 1000 if FWHM_MnKa is not None: line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy / 1000) * 1000 elif units_name == 'keV': if FWHM_MnKa is not None: line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy) else: raise ValueError( "%s is not a valid units for the energy axis. " "Only `eV` and `keV` are supported. " "If `s` is the variable containing this EDS spectrum:\n " ">>> s.axes_manager.signal_axes[0].units = \'keV\' \n" % (units_name)) if FWHM_MnKa is None: return line_energy else: return line_energy, line_FWHM def _get_beam_energy(self): """ Get the beam energy. The return value is in the same units than the signal axis """ if "Acquisition_instrument.SEM.beam_energy" in self.metadata: beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy elif "Acquisition_instrument.TEM.beam_energy" in self.metadata: beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy else: raise AttributeError( "To use this method the beam energy `Acquisition_instrument.TEM.beam_energy` " "or `Acquisition_instrument.SEM.beam_energy` must be defined in " "`metadata`.") units_name = self.axes_manager.signal_axes[0].units if units_name == 'eV': beam_energy = beam_energy * 1000 return beam_energy def sum(self, axis): """Sum the data over the given axis. Parameters ---------- axis : {int, string} The axis can be specified using the index of the axis in `axes_manager` or the axis name. Returns ------- s : Signal See also -------- sum_in_mask, mean Examples -------- >>> import numpy as np >>> s = Signal(np.random.random((64,64,1024))) >>> s.data.shape (64,64,1024) >>> s.sum(-1).data.shape (64,64) # If we just want to plot the result of the operation s.sum(-1, True).plot() """ # modify time spend per spectrum if "Acquisition_instrument.SEM" in self.metadata: mp = self.metadata.Acquisition_instrument.SEM else: mp = self.metadata.Acquisition_instrument.TEM if mp.has_item('Detector.EDS.live_time'): mp.Detector.EDS.live_time = mp.Detector.EDS.live_time * \ self.axes_manager.shape[axis] return super(EDSSpectrum, self).sum(axis) def rebin(self, new_shape): """Rebins the data to the new shape Parameters ---------- new_shape: tuple of ints The new shape must be a divisor of the original shape """ new_shape_in_array = [] for axis in self.axes_manager._axes: new_shape_in_array.append( new_shape[axis.index_in_axes_manager]) factors = (np.array(self.data.shape) / np.array(new_shape_in_array)) s = super(EDSSpectrum, self).rebin(new_shape) # modify time per spectrum if "Acquisition_instrument.SEM.Detector.EDS.live_time" in s.metadata: for factor in factors: s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time *= factor if "Acquisition_instrument.TEM.Detector.EDS.live_time" in s.metadata: for factor in factors: s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time *= factor return s def set_elements(self, elements): """Erase all elements and set them. Parameters ---------- elements : list of strings A list of chemical element symbols. See also -------- add_elements, set_line, add_lines. Examples -------- >>> s = signals.EDSSEMSpectrum(np.arange(1024)) >>> s.set_elements(['Ni', 'O'],['Ka','Ka']) Adding Ni_Ka Line Adding O_Ka Line >>> s.mapped_paramters.Acquisition_instrument.SEM.beam_energy = 10 >>> s.set_elements(['Ni', 'O']) Adding Ni_La Line Adding O_Ka Line """ # Erase previous elements and X-ray lines if "Sample.elements" in self.metadata: del self.metadata.Sample.elements self.add_elements(elements) def add_elements(self, elements): """Add elements and the corresponding X-ray lines. The list of elements is stored in `metadata.Sample.elements` Parameters ---------- elements : list of strings The symbol of the elements. See also -------- set_elements, add_lines, set_lines. """ if not isiterable(elements) or isinstance(elements, basestring): raise ValueError( "Input must be in the form of a list. For example, " "if `s` is the variable containing this EDS spectrum:\n " ">>> s.add_elements(('C',))\n" "See the docstring for more information.") if "Sample.elements" in self.metadata: elements_ = set(self.metadata.Sample.elements) else: elements_ = set() for element in elements: if element in elements_db: elements_.add(element) else: raise ValueError( "%s is not a valid chemical element symbol." % element) if not hasattr(self.metadata, 'Sample'): self.metadata.add_node('Sample') self.metadata.Sample.elements = sorted(list(elements_)) def set_lines(self, lines, only_one=True, only_lines=("Ka", "La", "Ma")): """Erase all Xrays lines and set them. See add_lines for details. Parameters ---------- lines : list of strings A list of valid element X-ray lines to add e.g. Fe_Kb. Additionally, if `metadata.Sample.elements` is defined, add the lines of those elements that where not given in this list. only_one: bool If False, add all the lines of each element in `metadata.Sample.elements` that has not line defined in lines. If True (default), only add the line at the highest energy above an overvoltage of 2 (< beam energy / 2). only_lines : {None, list of strings} If not None, only the given lines will be added. See also -------- add_lines, add_elements, set_elements.. """ if "Sample.xray_lines" in self.metadata: del self.metadata.Sample.xray_lines self.add_lines(lines=lines, only_one=only_one, only_lines=only_lines) def add_lines(self, lines=(), only_one=True, only_lines=("Ka", "La", "Ma")): """Add X-rays lines to the internal list. Although most functions do not require an internal list of X-ray lines because they can be calculated from the internal list of elements, ocassionally it might be useful to customize the X-ray lines to be use by all functions by default using this method. The list of X-ray lines is stored in `metadata.Sample.xray_lines` Parameters ---------- lines : list of strings A list of valid element X-ray lines to add e.g. Fe_Kb. Additionally, if `metadata.Sample.elements` is defined, add the lines of those elements that where not given in this list. If the list is empty (default), and `metadata.Sample.elements` is defined, add the lines of all those elements. only_one: bool If False, add all the lines of each element in `metadata.Sample.elements` that has not line defined in lines. If True (default), only add the line at the highest energy above an overvoltage of 2 (< beam energy / 2). only_lines : {None, list of strings} If not None, only the given lines will be added. See also -------- set_lines, add_elements, set_elements. """ if "Sample.xray_lines" in self.metadata: xray_lines = set(self.metadata.Sample.xray_lines) else: xray_lines = set() # Define the elements which Xray lines has been customized # So that we don't attempt to add new lines automatically elements = set() for line in xray_lines: elements.add(line.split("_")[0]) end_energy = self.axes_manager.signal_axes[0].high_value for line in lines: try: element, subshell = line.split("_") except ValueError: raise ValueError( "Invalid line symbol. " "Please provide a valid line symbol e.g. Fe_Ka") if element in elements_db: elements.add(element) if subshell in elements_db[element]['Atomic_properties']['Xray_lines']: lines_len = len(xray_lines) xray_lines.add(line) if lines_len != len(xray_lines): print("%s line added," % line) else: print("%s line already in." % line) if (self._get_line_energy(element + '_' + subshell) > end_energy): print("Warning: %s %s is above the data energy range." % (element, subshell)) else: raise ValueError( "%s is not a valid line of %s." % (line, element)) else: raise ValueError( "%s is not a valid symbol of an element." % element) if "Sample.elements" in self.metadata: extra_elements = (set(self.metadata.Sample.elements) - elements) if extra_elements: new_lines = self._get_lines_from_elements( extra_elements, only_one=only_one, only_lines=only_lines) if new_lines: self.add_lines(list(new_lines) + list(lines)) self.add_elements(elements) if not hasattr(self.metadata, 'Sample'): self.metadata.add_node('Sample') if "Sample.xray_lines" in self.metadata: xray_lines = xray_lines.union( self.metadata.Sample.xray_lines) self.metadata.Sample.xray_lines = sorted(list(xray_lines)) def _get_lines_from_elements(self, elements, only_one=False, only_lines=("Ka", "La", "Ma")): """Returns the X-ray lines of the given elements in spectral range of the data. Parameters ---------- elements : list of strings A list containing the symbol of the chemical elements. only_one : bool If False, add all the lines of each element in the data spectral range. If True only add the line at the highest energy above an overvoltage of 2 (< beam energy / 2). only_lines : {None, list of strings} If not None, only the given lines will be returned. Returns ------- """ beam_energy = self._get_beam_energy() end_energy = self.axes_manager.signal_axes[0].high_value if beam_energy < end_energy: end_energy = beam_energy lines = [] for element in elements: # Possible line (existing and excited by electron) element_lines = [] for subshell in elements_db[element]['Atomic_properties']['Xray_lines'].keys(): if only_lines and subshell not in only_lines: continue if (self._get_line_energy(element + '_' + subshell) < end_energy): element_lines.append(element + "_" + subshell) if only_one and element_lines: # Choose the best line select_this = -1 for i, line in enumerate(element_lines): if (self._get_line_energy(line) < beam_energy / 2): select_this = i break element_lines = [element_lines[select_this], ] if not element_lines: print(("There is not X-ray line for element %s " % element) + "in the data spectral range") else: lines.extend(element_lines) return lines def get_lines_intensity(self, xray_lines=None, plot_result=False, integration_window_factor=2., only_one=True, only_lines=("Ka", "La", "Ma"), **kwargs): """Return the intensity map of selected Xray lines. The intensities, the number of X-ray counts, are computed by suming the spectrum over the different X-ray lines. The sum window width is calculated from the energy resolution of the detector defined as defined in `self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa` or `self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa`. Parameters ---------- xray_lines: {None, "best", list of string} If None, if `mapped.parameters.Sample.elements.xray_lines` contains a list of lines use those. If `mapped.parameters.Sample.elements.xray_lines` is undefined or empty but `mapped.parameters.Sample.elements` is defined, use the same syntax as `add_line` to select a subset of lines for the operation. Alternatively, provide an iterable containing a list of valid X-ray lines symbols. plot_result : bool If True, plot the calculated line intensities. If the current object is a single spectrum it prints the result instead. integration_window_factor: Float The integration window is centered at the center of the X-ray line and its width is defined by this factor (2 by default) times the calculated FWHM of the line. only_one : bool If False, use all the lines of each element in the data spectral range. If True use only the line at the highest energy above an overvoltage of 2 (< beam energy / 2). only_lines : {None, list of strings} If not None, use only the given lines. kwargs The extra keyword arguments for plotting. See `utils.plot.plot_signals` Returns ------- intensities : list A list containing the intensities as Signal subclasses. Examples -------- >>> specImg.get_lines_intensity(["C_Ka", "Ta_Ma"]) See also -------- set_elements, add_elements. """ if xray_lines is None: if 'Sample.xray_lines' in self.metadata: xray_lines = self.metadata.Sample.xray_lines elif 'Sample.elements' in self.metadata: xray_lines = self._get_lines_from_elements( self.metadata.Sample.elements, only_one=only_one, only_lines=only_lines) else: raise ValueError( "Not X-ray line, set them with `add_elements`") intensities = [] # test 1D Spectrum (0D problem) #signal_to_index = self.axes_manager.navigation_dimension - 2 for Xray_line in xray_lines: line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa='auto') det = integration_window_factor * line_FWHM / 2. img = self[..., line_energy - det:line_energy + det ].integrate1D(-1) img.metadata.General.title = ( 'Intensity of %s at %.2f %s from %s' % (Xray_line, line_energy, self.axes_manager.signal_axes[0].units, self.metadata.General.title)) if img.axes_manager.navigation_dimension >= 2: img = img.as_image([0, 1]) elif img.axes_manager.navigation_dimension == 1: img.axes_manager.set_signal_dimension(1) if plot_result and img.axes_manager.signal_dimension == 0: print("%s at %s %s : Intensity = %.2f" % (Xray_line, line_energy, self.axes_manager.signal_axes[0].units, img.data)) intensities.append(img) if plot_result and img.axes_manager.signal_dimension != 0: utils.plot.plot_signals(intensities, **kwargs) return intensities def get_take_off_angle(self): """Calculate the take-off-angle (TOA). TOA is the angle with which the X-rays leave the surface towards the detector. Parameters are read in 'SEM.tilt_stage', 'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and 'SEM.Detector.EDS.elevation_angle' in 'metadata'. Returns ------- take_off_angle: float (Degree) See also -------- utils.eds.take_off_angle Notes ----- Defined by M. Schaffer et al., Ultramicroscopy 107(8), pp 587-597 (2007) """ if self.metadata.Signal.signal_type == 'EDS_SEM': mp = self.metadata.Acquisition_instrument.SEM elif self.metadata.Signal.signal_type == 'EDS_TEM': mp = self.metadata.Acquisition_instrument.TEM tilt_stage = mp.tilt_stage azimuth_angle = mp.Detector.EDS.azimuth_angle elevation_angle = mp.Detector.EDS.elevation_angle TOA = utils.eds.take_off_angle(tilt_stage, azimuth_angle, elevation_angle) return TOA
gpl-3.0
1,777,743,600,678,881,300
37.489474
104
0.548749
false
4.252568
false
false
false
stevedh/queryutils
queryutils/user.py
1
2181
from json import JSONEncoder class User(object): def __init__(self, name): self.name = name self.sessions = {} self.queries = [] class VerboseUserEncoder(JSONEncoder): def encode(self, obj): user_dict = {} user_dict['name'] = obj.name session_dict = {} for (session_id, session) in obj.sessions.iteritems(): session_dict[session_id] = SessionEncoder().default(session) query_list = [] for query in obj.queries: query_list.append(QueryEncoder().default(query)) user_dict['queries'] = query_list return user_dict def default(self, obj): if isinstance(obj, User): return self.encode(obj) return JSONEncoder.default(self, obj) class UserEncoder(JSONEncoder): def encode(self, obj): user_dict = {} user_dict['name'] = obj.name session_dict = {} for (session_id, session) in obj.sessions.iteritems(): session_dict['id'] = session_id query_list = [] for query in session.queries: query_dict = {} query_dict['delta'] = query.delta query_dict['time'] = query.time query_dict['text'] = query.text query_list.append(query_dict) session_dict['queries'] = query_list session_dict['user'] = obj.name try: autorecurring_query_list = [] for query in obj.autorecurring_queries: query_dict = {} query_dict['repeat_delta'] = query.repeat_delta query_dict['time'] = query.time query_dict['text'] = query.text autorecurring_query_list.append(query_dict) user_dict['autorecurring_queries'] = autorecurring_query_list except AttributeError: print "Not encoding autorecurring queries. No such attribute." user_dict['sessions'] = session_dict return user_dict def default(self, obj): if isinstance(obj, User): return self.encode(obj) return JSONEncoder.default(self, obj)
bsd-3-clause
8,245,894,538,224,595,000
33.078125
74
0.558459
false
4.218569
false
false
false
ajhager/copycat
copycat/workspace/string.py
1
12784
# Copyright (c) 2007-2017 Joseph Hager. # # Copycat is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License, # as published by the Free Software Foundation. # # Copycat is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Copycat; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """String""" import random import copycat.toolbox as toolbox class String(object): """String is a letter string in the workspace. This could be the initial string, modified string or target string. Each object in a string has a unique string number that identifies it from other objects in the string.""" def __init__(self, workspace, string): self.workspace = workspace self.slipnet = self.workspace.slipnet self.name = string self.highest_string_number = -1 self.length = len(string) self.letters = {} self.groups = {} self.proposed_groups = {} self.object_positions = {} self.left_right_bonds = {} self.from_to_bonds = {} self.proposed_bonds = {} self.intra_string_unhappiness = 0 self.bonds_to_scan_distribution = range(self.length) def add_to_object_positions(self, obj, position): """Add an object to the object positions.""" if position in self.object_positions: self.object_positions[position].append(obj) else: self.object_positions[position] = [obj] def remove_from_object_positions(self, obj, position): """Remove an object from the object positions.""" if obj in self.object_positions[position]: self.object_positions[position].remove(obj) def add_letter(self, letter): """Add a letter to the string.""" self.highest_string_number += 1 letter.string_number = self.highest_string_number position = letter.left_string_position self.letters[position] = letter self.add_to_object_positions(letter, position) def get_letters(self): """Return a list of letters in the string.""" return [self.letters[index] for index in sorted(self.letters.keys())] def get_letter(self, position): """Return the letter at the given position in the string.""" return self.letters.get(position) def get_random_letter(self): """Return a random letter from the string.""" return random.choice(self.get_letters()) def get_leftmost_letter(self): """Return the leftmost letter in the string.""" return self.letters.get(0) def get_rightmost_letter(self): """Return the rightmost letter in the string.""" return self.letters.get(len(self.letters) - 1) def add_group(self, group): """Add a group to the string.""" self.highest_string_number += 1 group.string_number = self.highest_string_number self.groups[group.left_object.string_number] = group self.add_to_object_positions(group, group.left_string_position) self.add_to_object_positions(group, group.right_string_position) def remove_group(self, group): """Remove a group from the string.""" if group.left_object.string_number in self.groups: del self.groups[group.left_object.string_number] self.remove_from_object_positions(group, group.left_string_position) self.remove_from_object_positions(group, group.right_string_position) def get_groups(self): """Return a list of groups in the string.""" return list(self.groups.values()) def get_group(self, position): """Return the group at the given position in letters. Positions start at 0 and refer to the position of the leftmost object in the group.""" return self.get_letter(position).group def get_existing_group(self, group): """Return the group in the string if it has the same properties as the given group.""" existing_group = self.groups.get(group.left_object.string_number) if existing_group: if existing_group.length == group.length and \ existing_group.group_category == group.group_category and \ existing_group.direction_category == group.direction_category: return existing_group def add_proposed_group(self, group): """Add a proposed group to the string.""" position = (group.left_object.string_number, group.right_object.string_number) if position in self.proposed_groups: self.proposed_groups[position].append(group) else: self.proposed_groups[position] = [group] def remove_proposed_group(self, group): """Remove a proposed group from the string.""" position = (group.left_object.string_number, group.right_object.string_number) items = self.proposed_groups.get(position, []) if group in items: self.proposed_groups[position].remove(group) def get_proposed_groups(self): """Return a list of the proposed groups in the string.""" return list(set(toolbox.flatten(self.proposed_groups.values()))) def get_proposed_group(self, first, second): """Return the proposed group at first, second position.""" return self.proposed_groups.get((first, second)) def add_bond(self, bond): """Add a bond to the string, sameness bonds in both directions.""" left_number = bond.left_object.string_number right_number = bond.right_object.string_number self.left_right_bonds[(left_number, right_number)] = bond from_number = bond.from_object.string_number to_number = bond.to_object.string_number self.from_to_bonds[(from_number, to_number)] = bond if bond.bond_category == self.slipnet.plato_sameness: self.left_right_bonds[(right_number, left_number)] = bond self.from_to_bonds[(to_number, from_number)] = bond def remove_bond(self, bond): """Remove a built bond from the string.""" left_number = bond.left_object.string_number right_number = bond.right_object.string_number if (left_number, right_number) in self.left_right_bonds: del self.left_right_bonds[(left_number, right_number)] from_number = bond.from_object.string_number to_number = bond.to_object.string_number if (from_number, to_number) in self.from_to_bonds: del self.from_to_bonds[(from_number, to_number)] if bond.bond_category == self.slipnet.plato_sameness: if (right_number, left_number) in self.left_right_bonds: del self.left_right_bonds[(right_number, left_number)] if (to_number, from_number) in self.from_to_bonds: del self.from_to_bonds[(to_number, from_number)] def get_bonds(self): """Return a list of the built bonds in the string.""" return list(set(self.from_to_bonds.values())) def get_bond(self, from_object, to_object): """Return the bond between the two objects, if any.""" return self.from_to_bonds.get((from_object.string_number, to_object.string_number)) def get_existing_bond(self, bond): """Return the bond in the string if it has the same properties as the given bond.""" existing_bond = self.get_bond(bond.from_object, bond.to_object) if existing_bond: if existing_bond.bond_category == bond.bond_category and \ existing_bond.direction_category == bond.direction_category: return existing_bond def add_proposed_bond(self, bond): """Add the proposed bond to the string.""" position = (bond.from_object.string_number, bond.to_object.string_number) if position in self.proposed_bonds: self.proposed_bonds[position].append(bond) else: self.proposed_bonds[position] = [bond] def remove_proposed_bond(self, bond): """Add the proposed bond to the string.""" position = (bond.from_object.string_number, bond.to_object.string_number) if position in self.proposed_bonds: items = self.proposed_bonds[position] if bond in items: self.proposed_bonds[position].remove(bond) def get_proposed_bonds(self): """Return a list of proposed bonds in the string.""" return list(set(toolbox.flatten(self.proposed_bonds.values()))) def get_proposed_bond(self, first, second): """Return a proposed bonds at first, second in the string.""" return self.proposed_bonds.get((first, second)) def get_objects(self, category=None): """Return the list of objects of the given object category. If no category is given, return all objects.""" if category == self.slipnet.plato_letter: return self.get_letters() elif category == self.slipnet.plato_group: return self.get_groups() return self.get_letters() + self.get_groups() def get_non_string_spanning_objects(self): """Return all objects that do not span the entire string.""" return [o for o in self.get_objects() if not o.spans_whole_string()] def get_random_object(self, method=None): """Return a random object from the string.""" if method: objects = self.get_objects() values = [getattr(obj, method) for obj in objects] values = self.workspace.temperature_adjusted_values(values) return objects[toolbox.weighted_index(values)] return random.choice(self.get_objects()) def get_random_leftmost_object(self): """Return a random leftmost object from the string.""" leftmost_objects = [] category = self.slipnet.plato_string_position_category for obj in self.get_objects(): if obj.get_descriptor(category) == self.slipnet.plato_leftmost: leftmost_objects.append(obj) if leftmost_objects: values = [obj.relative_importance for obj in leftmost_objects] return toolbox.weighted_select(values, leftmost_objects) def update_relative_importances(self): """Update the relative, normalized importances of all the objects in the string.""" raw_importance = sum([o.raw_importance for o in self.get_objects()]) for obj in self.get_objects(): if raw_importance == 0: importance = 0 else: quot = obj.raw_importance / float(raw_importance) importance = round(100 * quot) obj.relative_importance = importance def update_intra_string_unhappiness(self): """Calculate the average of the intra-string unhappiness of all the objects in the string.""" unhappiness = [o.intra_string_unhappiness for o in self.get_objects()] self.intra_string_unhappiness = round(toolbox.average(*unhappiness)) def local_bond_category_relevance(self, bond_category): """A function of how many bonds in the string have the given bond category. This function is not perfect; it gives just a rough estimate of the relevance of this bond category.""" objects = self.get_non_string_spanning_objects() if len(objects) == 1: return 0 bond_count = 0 for obj in objects: if obj.right_bond: if obj.right_bond.bond_category == bond_category: bond_count += 1 return 100 * (float(bond_count) / (len(objects) - 1)) def local_direction_category_relevance(self, direction_category): """A function of how many bonds in the string have the given direction category. This function is not perfect; it gives just a rough estimate of the relevance of this direction category.""" objects = self.get_non_string_spanning_objects() if len(objects) == 1: return 0 bond_count = 0 for obj in objects: if obj.right_bond: if obj.right_bond.direction_category == direction_category: bond_count += 1 return 100 * (float(bond_count) / (len(objects) - 1))
gpl-2.0
-2,942,944,997,608,735,000
41.471761
80
0.632431
false
4.076531
false
false
false
noahlittle/noahlittle.github.io
iCTRL/var/mobile/pentest/exploits/iCTRL/cupp/cupp.py
1
55986
#!/usr/bin/python # # [Program] # # CUPP 3.1 # Common User Passwords Profiler # # # # [Author] # # Muris Kurgas aka j0rgan # j0rgan [at] remote-exploit [dot] org # http://www.remote-exploit.org # http://www.azuzi.me # # # # [License] # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # See 'docs/LICENSE' for more information. import sys import os import ftplib import ConfigParser import urllib import gzip import csv # Reading configuration file... config = ConfigParser.ConfigParser() config.read('cupp.cfg') years = config.get('years', 'years').split(',') chars = config.get('specialchars', 'chars').split(',') numfrom = config.getint('nums','from') numto = config.getint('nums','to') wcfrom = config.getint('nums','wcfrom') wcto = config.getint('nums','wcto') threshold = config.getint('nums','threshold') # 1337 mode configs, well you can add more lines if you add it to config file too. # You will need to add more lines in two places in cupp.py code as well... a = config.get('leet','a') i = config.get('leet','i') e = config.get('leet','e') t = config.get('leet','t') o = config.get('leet','o') s = config.get('leet','s') g = config.get('leet','g') z = config.get('leet','z') # for concatenations... def concats(seq, start, stop): for mystr in seq: for num in xrange(start, stop): yield mystr + str(num) # for sorting and making combinations... def komb(seq, start): for mystr in seq: for mystr1 in start: yield mystr + mystr1 if len(sys.argv) < 2 or sys.argv[1] == '-h': print " ___________ " print " \033[07m cupp.py! \033[27m # Common" print " \ # User" print " \ \033[1;31m,__,\033[1;m # Passwords" print " \ \033[1;31m(\033[1;moo\033[1;31m)____\033[1;m # Profiler" print " \033[1;31m(__) )\ \033[1;m " print " \033[1;31m ||--|| \033[1;m\033[05m*\033[25m\033[1;m [ Muris Kurgas | j0rgan@remote-exploit.org ]\r\n\r\n" print " [ Options ]\r\n" print " -h You are looking at it baby! :)" print " For more help take a look in docs/README" print " Global configuration file is cupp.cfg\n" print " -i Interactive questions for user password profiling\r\n" print " -w Use this option to improve existing dictionary," print " or WyD.pl output to make some pwnsauce\r\n" print " -l Download huge wordlists from repository\r\n" print " -a Parse default usernames and passwords directly from Alecto DB." print " Project Alecto uses purified databases of Phenoelit and CIRT" print " which where merged and enhanced.\r\n" print " -v Version of the program\r\n" exit() elif sys.argv[1] == '-v': print "\r\n \033[1;31m[ cupp.py ] v3.1\033[1;m\r\n" print " * Hacked up by j0rgan - j0rgan@remote-exploit.org" print " * http://www.remote-exploit.org\r\n" print " Take a look docs/README file for more info about the program\r\n" exit() elif sys.argv[1] == '-w': if len(sys.argv) < 3: print "\r\n[Usage]: "+sys.argv[0]+" -w [FILENAME]\r\n" exit() fajl = open(sys.argv[2], "r") listic = fajl.readlines() linije = 0 for line in listic: linije += 1 listica = [] for x in listic: listica += x.split() print "\r\n *************************************************" print " * \033[1;31mWARNING!!!\033[1;m *" print " * Using large wordlists in some *" print " * options bellow is NOT recommended! *" print " *************************************************\r\n" conts = raw_input("[>] Do you want to concatenate all words from wordlist? Y/[N]: ").lower() if conts == "y" and linije > threshold: print "\r\n[-] Maximum number of words for concatenation is "+str(threshold) print "[-] Check configuration file for increasing this number.\r\n" conts = raw_input("[>] Do you want to concatenate all words from wordlist? Y/[N]: ").lower() conts = conts cont = [''] if conts == "y": for cont1 in listica: for cont2 in listica: if listica.index(cont1) != listica.index(cont2): cont.append(cont1+cont2) spechars = [''] spechars1 = raw_input("[>] Do you want to add special chars at the end of words? Y/[N]: ").lower() if spechars1 == "y": for spec1 in chars: spechars.append(spec1) for spec2 in chars: spechars.append(spec1+spec2) for spec3 in chars: spechars.append(spec1+spec2+spec3) randnum = raw_input("[>] Do you want to add some random numbers at the end of words? Y/[N]").lower() leetmode = raw_input("[>]Leet mode? (i.e. leet = 1337) Y/[N]: ").lower() kombinacija1 = list(komb(listica, years)) kombinacija2 = [''] if conts == "y": kombinacija2 = list(komb(cont, years)) kombinacija3 = [''] kombinacija4 = [''] if spechars1 == "y": kombinacija3 = list(komb(listica, spechars)) if conts == "y": kombinacija4 = list(komb(cont, spechars)) kombinacija5 = [''] kombinacija6 = [''] if randnum == "y": kombinacija5 = list(concats(listica, numfrom, numto)) if conts == "y": kombinacija6 = list(concats(cont, numfrom, numto)) print "\r\n[+] Now making a dictionary..." print "[+] Sorting list and removing duplicates..." komb_unique1 = dict.fromkeys(kombinacija1).keys() komb_unique2 = dict.fromkeys(kombinacija2).keys() komb_unique3 = dict.fromkeys(kombinacija3).keys() komb_unique4 = dict.fromkeys(kombinacija4).keys() komb_unique5 = dict.fromkeys(kombinacija5).keys() komb_unique6 = dict.fromkeys(kombinacija6).keys() komb_unique7 = dict.fromkeys(listica).keys() komb_unique8 = dict.fromkeys(cont).keys() uniqlist = komb_unique1+komb_unique2+komb_unique3+komb_unique4+komb_unique5+komb_unique6+komb_unique7+komb_unique8 unique_lista = dict.fromkeys(uniqlist).keys() unique_leet = [] if leetmode == "y": for x in unique_lista: # if you want to add more leet chars, you will need to add more lines in cupp.cfg too... x = x.replace('a',a) x = x.replace('i',i) x = x.replace('e',e) x = x.replace('t',t) x = x.replace('o',o) x = x.replace('s',s) x = x.replace('g',g) x = x.replace('z',z) unique_leet.append(x) unique_list = unique_lista + unique_leet unique_list_finished = [] for x in unique_list: if len(x) > wcfrom and len(x) < wcto: unique_list_finished.append(x) f = open ( sys.argv[2]+'.cupp.txt', 'w' ) unique_list_finished.sort() f.write (os.linesep.join(unique_list_finished)) f = open ( sys.argv[2]+'.cupp.txt', 'r' ) lines = 0 for line in f: lines += 1 f.close() print "[+] Saving dictionary to \033[1;31m"+sys.argv[2]+".cupp.txt\033[1;m, counting \033[1;31m"+str(lines)+" words.\033[1;m" print "[+] Now load your pistolero with \033[1;31m"+sys.argv[2]+".cupp.txt\033[1;m and shoot! Good luck!" fajl.close() exit() elif sys.argv[1] == '-i': print "\r\n[+] Insert the informations about the victim to make a dictionary" print "[+] If you don't know all the info, just hit enter when asked! ;)\r\n" # We need some informations first! name = raw_input("[>] Name: ").lower() while len(name) == 0 or name == " " or name == " " or name == " ": print "\r\n[-] You must enter a name at least!" name = raw_input("[>] Name: ").lower() name = str(name) surname = raw_input("[>] Surname: ").lower() nick = raw_input("[>] Nickname: ").lower() birthdate = raw_input("[>] Birthdate (DDMMYYYY): ") while len(birthdate) != 0 and len(birthdate) != 8: print "\r\n[-] You must enter 8 digits for birthday!" birthdate = raw_input("[>] Birthdate (DDMMYYYY): ") birthdate = str(birthdate) print "\r\n" wife = raw_input("[>] Wife's(husband's) name: ").lower() wifen = raw_input("[>] Wife's(husband's) nickname: ").lower() wifeb = raw_input("[>] Wife's(husband's) birthdate (DDMMYYYY): ") while len(wifeb) != 0 and len(wifeb) != 8: print "\r\n[-] You must enter 8 digits for birthday!" wifeb = raw_input("[>] Wife's(husband's) birthdate (DDMMYYYY): ") wifeb = str(wifeb) print "\r\n" kid = raw_input("[>] Child's name: ").lower() kidn = raw_input("[>] Child's nickname: ").lower() kidb = raw_input("[>] Child's birthdate (DDMMYYYY): ") while len(kidb) != 0 and len(kidb) != 8: print "\r\n[-] You must enter 8 digits for birthday!" kidb = raw_input("[>] Child's birthdate (DDMMYYYY): ") kidb = str(kidb) print "\r\n" pet = raw_input("[>] Pet's name: ").lower() company = raw_input("[>] Company name: ").lower() print "\r\n" words = [''] oth = raw_input("[>] Do you want to add some key words about the victim? Y/[N]: ").lower() if oth == "y": words = raw_input("[>] Please enter the words, separated by comma. [i.e. hacker, juice, black]: ").lower().split(", ") spechars = [''] spechars1 = raw_input("[>] Do you want to add special chars at the end of words? Y/[N]: ").lower() if spechars1 == "y": for spec1 in chars: spechars.append(spec1) for spec2 in chars: spechars.append(spec1+spec2) for spec3 in chars: spechars.append(spec1+spec2+spec3) randnum = raw_input("[>] Do you want to add some random numbers at the end of words? Y/[N]").lower() leetmode = raw_input("[>] Leet mode? (i.e. leet = 1337) Y/[N]: ").lower() print "\r\n[+] Now making a dictionary..." # Now me must do some string modifications... # Birthdays first birthdate_yy = birthdate[-2:] birthdate_yyy = birthdate[-3:] birthdate_yyyy = birthdate[-4:] birthdate_xd = birthdate[1:2] birthdate_xm = birthdate[3:4] birthdate_dd = birthdate[:2] birthdate_mm = birthdate[2:4] wifeb_yy = wifeb[-2:] wifeb_yyy = wifeb[-3:] wifeb_yyyy = wifeb[-4:] wifeb_xd = wifeb[1:2] wifeb_xm = wifeb[3:4] wifeb_dd = wifeb[:2] wifeb_mm = wifeb[2:4] kidb_yy = kidb[-2:] kidb_yyy = kidb[-3:] kidb_yyyy = kidb[-4:] kidb_xd = kidb[1:2] kidb_xm = kidb[3:4] kidb_dd = kidb[:2] kidb_mm = kidb[2:4] # Convert first letters to uppercase... nameup = name.title() surnameup = surname.title() nickup = nick.title() wifeup = wife.title() wifenup = wifen.title() kidup = kid.title() kidnup = kidn.title() petup = pet.title() companyup = company.title() wordsup = [] for words1 in words: wordsup.append(words1.title()) word = words+wordsup # reverse a name rev_name = name[::-1] rev_nameup = nameup[::-1] rev_nick = nick[::-1] rev_nickup = nickup[::-1] rev_wife = wife[::-1] rev_wifeup = wifeup[::-1] rev_kid = kid[::-1] rev_kidup = kidup[::-1] reverse = [rev_name, rev_nameup, rev_nick, rev_nickup, rev_wife, rev_wifeup, rev_kid, rev_kidup] rev_n = [rev_name, rev_nameup, rev_nick, rev_nickup] rev_w = [rev_wife, rev_wifeup] rev_k = [rev_kid, rev_kidup] # Let's do some serious work! This will be a mess of code, but... who cares? :) # Birthdays combinations bds = [birthdate_yy, birthdate_yyy, birthdate_yyyy, birthdate_xd, birthdate_xm, birthdate_dd, birthdate_mm] bdss = [] for bds1 in bds: bdss.append(bds1) for bds2 in bds: if bds.index(bds1) != bds.index(bds2): bdss.append(bds1+bds2) for bds3 in bds: if bds.index(bds1) != bds.index(bds2) and bds.index(bds2) != bds.index(bds3) and bds.index(bds1) != bds.index(bds3): bdss.append(bds1+bds2+bds3) # For a woman... wbds = [wifeb_yy, wifeb_yyy, wifeb_yyyy, wifeb_xd, wifeb_xm, wifeb_dd, wifeb_mm] wbdss = [] for wbds1 in wbds: wbdss.append(wbds1) for wbds2 in wbds: if wbds.index(wbds1) != wbds.index(wbds2): wbdss.append(wbds1+wbds2) for wbds3 in wbds: if wbds.index(wbds1) != wbds.index(wbds2) and wbds.index(wbds2) != wbds.index(wbds3) and wbds.index(wbds1) != wbds.index(wbds3): wbdss.append(wbds1+wbds2+wbds3) # and a child... kbds = [kidb_yy, kidb_yyy, kidb_yyyy, kidb_xd, kidb_xm, kidb_dd, kidb_mm] kbdss = [] for kbds1 in kbds: kbdss.append(kbds1) for kbds2 in kbds: if kbds.index(kbds1) != kbds.index(kbds2): kbdss.append(kbds1+kbds2) for kbds3 in kbds: if kbds.index(kbds1) != kbds.index(kbds2) and kbds.index(kbds2) != kbds.index(kbds3) and kbds.index(kbds1) != kbds.index(kbds3): kbdss.append(kbds1+kbds2+kbds3) # string combinations.... kombinaac = [pet, petup, company, companyup] kombina = [name, surname, nick, nameup, surnameup, nickup] kombinaw = [wife, wifen, wifeup, wifenup, surname, surnameup] kombinak = [kid, kidn, kidup, kidnup, surname, surnameup] kombinaa = [] for kombina1 in kombina: kombinaa.append(kombina1) for kombina2 in kombina: if kombina.index(kombina1) != kombina.index(kombina2) and kombina.index(kombina1.title()) != kombina.index(kombina2.title()): kombinaa.append(kombina1+kombina2) kombinaaw = [] for kombina1 in kombinaw: kombinaaw.append(kombina1) for kombina2 in kombinaw: if kombinaw.index(kombina1) != kombinaw.index(kombina2) and kombinaw.index(kombina1.title()) != kombinaw.index(kombina2.title()): kombinaaw.append(kombina1+kombina2) kombinaak = [] for kombina1 in kombinak: kombinaak.append(kombina1) for kombina2 in kombinak: if kombinak.index(kombina1) != kombinak.index(kombina2) and kombinak.index(kombina1.title()) != kombinak.index(kombina2.title()): kombinaak.append(kombina1+kombina2) komb1 = list(komb(kombinaa, bdss)) komb2 = list(komb(kombinaaw, wbdss)) komb3 = list(komb(kombinaak, kbdss)) komb4 = list(komb(kombinaa, years)) komb5 = list(komb(kombinaac, years)) komb6 = list(komb(kombinaaw, years)) komb7 = list(komb(kombinaak, years)) komb8 = list(komb(word, bdss)) komb9 = list(komb(word, wbdss)) komb10 = list(komb(word, kbdss)) komb11 = list(komb(word, years)) komb12 = [''] komb13 = [''] komb14 = [''] komb15 = [''] komb16 = [''] komb21 = [''] if randnum == "y": komb12 = list(concats(word, numfrom, numto)) komb13 = list(concats(kombinaa, numfrom, numto)) komb14 = list(concats(kombinaac, numfrom, numto)) komb15 = list(concats(kombinaaw, numfrom, numto)) komb16 = list(concats(kombinaak, numfrom, numto)) komb21 = list(concats(reverse, numfrom, numto)) komb17 = list(komb(reverse, years)) komb18 = list(komb(rev_w, wbdss)) komb19 = list(komb(rev_k, kbdss)) komb20 = list(komb(rev_n, bdss)) komb001 = [''] komb002 = [''] komb003 = [''] komb004 = [''] komb005 = [''] komb006 = [''] if spechars1 == "y": komb001 = list(komb(kombinaa, spechars)) komb002 = list(komb(kombinaac, spechars)) komb003 = list(komb(kombinaaw , spechars)) komb004 = list(komb(kombinaak , spechars)) komb005 = list(komb(word, spechars)) komb006 = list(komb(reverse, spechars)) print "[+] Sorting list and removing duplicates..." komb_unique1 = dict.fromkeys(komb1).keys() komb_unique2 = dict.fromkeys(komb2).keys() komb_unique3 = dict.fromkeys(komb3).keys() komb_unique4 = dict.fromkeys(komb4).keys() komb_unique5 = dict.fromkeys(komb5).keys() komb_unique6 = dict.fromkeys(komb6).keys() komb_unique7 = dict.fromkeys(komb7).keys() komb_unique8 = dict.fromkeys(komb8).keys() komb_unique9 = dict.fromkeys(komb9).keys() komb_unique10 = dict.fromkeys(komb10).keys() komb_unique11 = dict.fromkeys(komb11).keys() komb_unique12 = dict.fromkeys(komb12).keys() komb_unique13 = dict.fromkeys(komb13).keys() komb_unique14 = dict.fromkeys(komb14).keys() komb_unique15 = dict.fromkeys(komb15).keys() komb_unique16 = dict.fromkeys(komb16).keys() komb_unique17 = dict.fromkeys(komb17).keys() komb_unique18 = dict.fromkeys(komb18).keys() komb_unique19 = dict.fromkeys(komb19).keys() komb_unique20 = dict.fromkeys(komb20).keys() komb_unique21 = dict.fromkeys(komb21).keys() komb_unique01 = dict.fromkeys(kombinaa).keys() komb_unique02 = dict.fromkeys(kombinaac).keys() komb_unique03 = dict.fromkeys(kombinaaw).keys() komb_unique04 = dict.fromkeys(kombinaak).keys() komb_unique05 = dict.fromkeys(word).keys() komb_unique07 = dict.fromkeys(komb001).keys() komb_unique08 = dict.fromkeys(komb002).keys() komb_unique09 = dict.fromkeys(komb003).keys() komb_unique010 = dict.fromkeys(komb004).keys() komb_unique011 = dict.fromkeys(komb005).keys() komb_unique012 = dict.fromkeys(komb006).keys() uniqlist = bdss+wbdss+kbdss+reverse+komb_unique01+komb_unique02+komb_unique03+komb_unique04+komb_unique05+komb_unique1+komb_unique2+komb_unique3+komb_unique4+komb_unique5+komb_unique6+komb_unique7+komb_unique8+komb_unique9+komb_unique10+komb_unique11+komb_unique12+komb_unique13+komb_unique14+komb_unique15+komb_unique16+komb_unique17+komb_unique18+komb_unique19+komb_unique20+komb_unique21+komb_unique07+komb_unique08+komb_unique09+komb_unique010+komb_unique011+komb_unique012 unique_lista = dict.fromkeys(uniqlist).keys() unique_leet = [] if leetmode == "y": for x in unique_lista: # if you want to add more leet chars, you will need to add more lines in cupp.cfg too... x = x.replace('a',a) x = x.replace('i',i) x = x.replace('e',e) x = x.replace('t',t) x = x.replace('o',o) x = x.replace('s',s) x = x.replace('g',g) x = x.replace('z',z) unique_leet.append(x) unique_list = unique_lista + unique_leet unique_list_finished = [] for x in unique_list: if len(x) > wcfrom and len(x) < wcto: unique_list_finished.append(x) unique_list_finished.sort() f = open ( name+'.txt', 'w' ) f.write (os.linesep.join(unique_list_finished)) f = open ( name+'.txt', 'r' ) lines = 0 for line in f: lines += 1 f.close() print "[+] Saving dictionary to \033[1;31m"+name+".txt\033[1;m, counting \033[1;31m"+str(lines)+"\033[1;m words." print "[+] Now load your pistolero with \033[1;31m"+name+".txt\033[1;m and shoot! Good luck!" exit() elif sys.argv[1] == '-a': url = config.get('alecto','alectourl') print "\r\n[+] Checking if alectodb is not present..." if os.path.isfile('alectodb.csv.gz') == 0: print "[+] Downloading alectodb.csv.gz..." webFile = urllib.urlopen(url) localFile = open(url.split('/')[-1], 'w') localFile.write(webFile.read()) webFile.close() localFile.close() f = gzip.open('alectodb.csv.gz', 'rb') data = csv.reader(f) usernames = [] passwords = [] for row in data: usernames.append(row[5]) passwords.append(row[6]) gus = list(set(usernames)) gpa = list(set(passwords)) gus.sort() gpa.sort() print "\r\n[+] Exporting to alectodb-usernames.txt and alectodb-passwords.txt\r\n[+] Done." f = open ( 'alectodb-usernames.txt', 'w' ) f.write (os.linesep.join(gus)) f.close() f = open ( 'alectodb-passwords.txt', 'w' ) f.write (os.linesep.join(gpa)) f.close() f.close() sys.exit() elif sys.argv[1] == '-l': ftpname = config.get('downloader','ftpname') ftpurl = config.get('downloader','ftpurl') ftppath = config.get('downloader','ftppath') ftpuser = config.get('downloader','ftpuser') ftppass = config.get('downloader','ftppass') if os.path.isdir('dictionaries') == 0: os.mkdir('dictionaries') print " \r\n Choose the section you want to download:\r\n" print " 1 Moby 14 french 27 places" print " 2 afrikaans 15 german 28 polish" print " 3 american 16 hindi 39 random" print " 4 aussie 17 hungarian 30 religion" print " 5 chinese 18 italian 31 russian" print " 6 computer 19 japanese 32 science" print " 7 croatian 20 latin 33 spanish" print " 8 czech 21 literature 34 swahili" print " 9 danish 22 movieTV 35 swedish" print " 10 databases 23 music 36 turkish" print " 11 dictionaries 24 names 37 yiddish" print " 12 dutch 25 net 38 exit program" print " 13 finnish 26 norwegian \r\n" print " \r\n Files will be downloaded from "+ftpname+" repository" print " \r\n Tip: After downloading wordlist, you can improve it with -w option\r\n" filedown = raw_input("[>] Enter number: ") filedown.isdigit() while filedown.isdigit() == 0: print "\r\n[-] Wrong choice. " filedown = raw_input("[>] Enter number: ") filedown = str(filedown) while int(filedown) > 38: print "\r\n[-] Wrong choice. " filedown = raw_input("[>] Enter number: ") filedown = str(filedown) def handleDownload(block): file.write(block) print ".", def downloader(): ftp.login(ftpuser, ftppass) ftp.cwd(ftppath) def filequitter(): file.close() print ' done.' if filedown == "1": print "\r\n[+] connecting...\r\n" ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('Moby') if os.path.isdir('dictionaries/Moby/') == 0: os.mkdir('dictionaries/Moby/') dire = 'dictionaries/Moby/' file = open(dire+'mhyph.tar.gz', 'wb') print "\r\n[+] downloading mhyph.tar.gz..." ftp.retrbinary('RETR ' + 'mhyph.tar.gz', handleDownload) filequitter() file = open(dire+'mlang.tar.gz', 'wb') print "\r\n[+] downloading mlang.tar.gz..." ftp.retrbinary('RETR ' + 'mlang.tar.gz', handleDownload) filequitter() file = open(dire+'moby.tar.gz', 'wb') print "\r\n[+] downloading moby.tar.gz..." ftp.retrbinary('RETR ' + 'moby.tar.gz', handleDownload) filequitter() file = open(dire+'mpos.tar.gz', 'wb') print "\r\n[+] downloading mpos.tar.gz..." ftp.retrbinary('RETR ' + 'mpos.tar.gz', handleDownload) filequitter() file = open(dire+'mpron.tar.gz', 'wb') print "\r\n[+] downloading mpron.tar.gz..." ftp.retrbinary('RETR ' + 'mpron.tar.gz', handleDownload) filequitter() file = open(dire+'mthes.tar.gz', 'wb') print "\r\n[+] downloading mthes.tar.gz..." ftp.retrbinary('RETR ' + 'mthes.tar.gz', handleDownload) filequitter() file = open(dire+'mwords.tar.gz', 'wb') print "\r\n[+] downloading mwords.tar.gz..." ftp.retrbinary('RETR ' + 'mwords.tar.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "2": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('afrikaans') if os.path.isdir('dictionaries/afrikaans/') == 0: os.mkdir('dictionaries/afrikaans/') dire = 'dictionaries/afrikaans/' file = open(dire+'afr_dbf.zip', 'wb') print "\r\n[+] downloading afr_dbf.zip..." ftp.retrbinary('RETR ' + 'afr_dbf.zip', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "3": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('american') if os.path.isdir('dictionaries/american/') == 0: os.mkdir('dictionaries/american/') dire = 'dictionaries/american/' file = open(dire+'dic-0294.tar.gz', 'wb') print "\r\n[+] downloading dic-0294.tar.gz..." ftp.retrbinary('RETR ' + 'dic-0294.tar.gz', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "4": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('aussie') if os.path.isdir('dictionaries/aussie/') == 0: os.mkdir('dictionaries/aussie/') dire = 'dictionaries/aussie/' file = open(dire+'oz.Z', 'wb') print "\r\n[+] downloading oz.Z..." ftp.retrbinary('RETR ' + 'oz.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "5": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('chinese') if os.path.isdir('dictionaries/chinese/') == 0: os.mkdir('dictionaries/chinese/') dire = 'dictionaries/chinese/' file = open(dire+'chinese.Z', 'wb') print "\r\n[+] downloading chinese.Z..." ftp.retrbinary('RETR ' + 'chinese.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "6": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('computer') if os.path.isdir('dictionaries/computer/') == 0: os.mkdir('dictionaries/computer/') dire = 'dictionaries/computer/' file = open(dire+'Domains.Z', 'wb') print "\r\n[+] downloading Domains.Z..." ftp.retrbinary('RETR ' + 'Domains.Z', handleDownload) filequitter() file = open(dire+'Dosref.Z', 'wb') print "\r\n[+] downloading Dosref.Z..." ftp.retrbinary('RETR ' + 'Dosref.Z', handleDownload) filequitter() file = open(dire+'Ftpsites.Z', 'wb') print "\r\n[+] downloading Ftpsites.Z..." ftp.retrbinary('RETR ' + 'Ftpsites.Z', handleDownload) filequitter() file = open(dire+'Jargon.Z', 'wb') print "\r\n[+] downloading Jargon.Z..." ftp.retrbinary('RETR ' + 'Jargon.Z', handleDownload) filequitter() file = open(dire+'common-passwords.txt.Z', 'wb') print "\r\n[+] downloading common-passwords.txt.Z..." ftp.retrbinary('RETR ' + 'common-passwords.txt.Z', handleDownload) filequitter() file = open(dire+'etc-hosts.Z', 'wb') print "\r\n[+] downloading etc-hosts.Z..." ftp.retrbinary('RETR ' + 'etc-hosts.Z', handleDownload) filequitter() file = open(dire+'foldoc.gz', 'wb') print "\r\n[+] downloading foldoc.gz..." ftp.retrbinary('RETR ' + 'foldoc.gz', handleDownload) filequitter() file = open(dire+'language-list.Z', 'wb') print "\r\n[+] downloading language-list.Z..." ftp.retrbinary('RETR ' + 'language-list.Z', handleDownload) filequitter() file = open(dire+'unix.Z', 'wb') print "\r\n[+] downloading unix.Z..." ftp.retrbinary('RETR ' + 'unix.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "7": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('croatian') if os.path.isdir('dictionaries/croatian/') == 0: os.mkdir('dictionaries/croatian/') dire = 'dictionaries/croatian/' file = open(dire+'croatian.gz', 'wb') print "\r\n[+] downloading croatian.gz..." ftp.retrbinary('RETR ' + 'croatian.gz', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "8": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('czech') if os.path.isdir('dictionaries/czech/') == 0: os.mkdir('dictionaries/czech/') dire = 'dictionaries/czech/' file = open(dire+'czech-wordlist-ascii-cstug-novak.Z', 'wb') print "\r\n[+] downloading czech-wordlist-ascii-cstug-novak.Z..." ftp.retrbinary('RETR ' + 'czech-wordlist-ascii-cstug-novak.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "9": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('danish') if os.path.isdir('dictionaries/danish/') == 0: os.mkdir('dictionaries/danish/') dire = 'dictionaries/danish/' file = open(dire+'danish.words.Z', 'wb') print "\r\n[+] downloading danish.words.Z..." ftp.retrbinary('RETR ' + 'danish.words.Z', handleDownload) filequitter() file = open(dire+'dansk.zip', 'wb') print "\r\n[+] downloading dansk.zip..." ftp.retrbinary('RETR ' + 'dansk.zip', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "10": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('databases') if os.path.isdir('dictionaries/databases/') == 0: os.mkdir('dictionaries/databases/') dire = 'dictionaries/databases/' file = open(dire+'acronyms.Z', 'wb') print "\r\n[+] downloading acronyms.Z..." ftp.retrbinary('RETR ' + 'acronyms.Z', handleDownload) filequitter() file = open(dire+'att800.Z', 'wb') print "\r\n[+] downloading att800.Z..." ftp.retrbinary('RETR ' + 'att800.Z', handleDownload) filequitter() file = open(dire+'computer-companies.Z', 'wb') print "\r\n[+] downloading computer-companies.Z..." ftp.retrbinary('RETR ' + 'computer-companies.Z', handleDownload) filequitter() file = open(dire+'world_heritage.Z', 'wb') print "\r\n[+] downloading world_heritage.Z..." ftp.retrbinary('RETR ' + 'world_heritage.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "11": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('dictionaries') if os.path.isdir('dictionaries/dictionaries/') == 0: os.mkdir('dictionaries/dictionaries/') dire = 'dictionaries/dictionaries/' file = open(dire+'Antworth.gz', 'wb') print "\r\n[+] downloading Antworth.gz..." ftp.retrbinary('RETR ' + 'Antworth.gz', handleDownload) filequitter() file = open(dire+'CRL.words.gz', 'wb') print "\r\n[+] downloading CRL.words.gz..." ftp.retrbinary('RETR ' + 'CRL.words.gz', handleDownload) filequitter() file = open(dire+'Roget.words.gz', 'wb') print "\r\n[+] downloading Roget.words.gz..." ftp.retrbinary('RETR ' + 'Roget.words.gz', handleDownload) filequitter() file = open(dire+'Unabr.dict.gz', 'wb') print "\r\n[+] downloading Unabr.dict.gz..." ftp.retrbinary('RETR ' + 'Unabr.dict.gz', handleDownload) filequitter() file = open(dire+'Unix.dict.gz', 'wb') print "\r\n[+] downloading Unix.dict.gz..." ftp.retrbinary('RETR ' + 'Unix.dict.gz', handleDownload) filequitter() file = open(dire+'englex-dict.gz', 'wb') print "\r\n[+] downloading englex-dict.gz..." ftp.retrbinary('RETR ' + 'englex-dict.gz', handleDownload) filequitter() file = open(dire+'knuth_britsh.gz', 'wb') print "\r\n[+] downloading knuth_britsh.gz..." ftp.retrbinary('RETR ' + 'knuth_britsh.gz', handleDownload) filequitter() file = open(dire+'knuth_words.gz', 'wb') print "\r\n[+] downloading knuth_words.gz..." ftp.retrbinary('RETR ' + 'knuth_words.gz', handleDownload) filequitter() file = open(dire+'pocket-dic.gz', 'wb') print "\r\n[+] downloading pocket-dic.gz..." ftp.retrbinary('RETR ' + 'pocket-dic.gz', handleDownload) filequitter() file = open(dire+'shakesp-glossary.gz', 'wb') print "\r\n[+] downloading shakesp-glossary.gz..." ftp.retrbinary('RETR ' + 'shakesp-glossary.gz', handleDownload) filequitter() file = open(dire+'special.eng.gz', 'wb') print "\r\n[+] downloading special.eng.gz..." ftp.retrbinary('RETR ' + 'special.eng.gz', handleDownload) filequitter() file = open(dire+'words-english.gz', 'wb') print "\r\n[+] downloading words-english.gz..." ftp.retrbinary('RETR ' + 'words-english.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "12": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('dutch') if os.path.isdir('dictionaries/dutch/') == 0: os.mkdir('dictionaries/dutch/') dire = 'dictionaries/dutch/' file = open(dire+'words.dutch.Z', 'wb') print "\r\n[+] downloading words.dutch.Z..." ftp.retrbinary('RETR ' + 'words.dutch.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "13": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('finnish') if os.path.isdir('dictionaries/finnish/') == 0: os.mkdir('dictionaries/finnish/') dire = 'dictionaries/finnish/' file = open(dire+'finnish.gz', 'wb') print "\r\n[+] downloading finnish.gz..." ftp.retrbinary('RETR ' + 'finnish.gz', handleDownload) filequitter() file = open(dire+'firstnames.finnish.gz', 'wb') print "\r\n[+] downloading firstnames.finnish.gz..." ftp.retrbinary('RETR ' + 'firstnames.finnish.gz', handleDownload) filequitter() file = open(dire+'words.finnish.FAQ.gz', 'wb') print "\r\n[+] downloading words.finnish.FAQ.gz..." ftp.retrbinary('RETR ' + 'words.finnish.FAQ.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "14": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('french') if os.path.isdir('dictionaries/french/') == 0: os.mkdir('dictionaries/french/') dire = 'dictionaries/french/' file = open(dire+'dico.Z', 'wb') print "\r\n[+] downloading dico.Z..." ftp.retrbinary('RETR ' + 'dico.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "15": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('german') if os.path.isdir('dictionaries/german/') == 0: os.mkdir('dictionaries/german/') dire = 'dictionaries/german/' file = open(dire+'deutsch.dic.Z', 'wb') print "\r\n[+] downloading deutsch.dic.Z..." ftp.retrbinary('RETR ' + 'deutsch.dic.Z', handleDownload) filequitter() file = open(dire+'germanl.Z', 'wb') print "\r\n[+] downloading germanl.Z..." ftp.retrbinary('RETR ' + 'germanl.Z', handleDownload) filequitter() file = open(dire+'words.german.Z', 'wb') print "\r\n[+] downloading words.german.Z..." ftp.retrbinary('RETR ' + 'words.german.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "16": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('hindi') if os.path.isdir('dictionaries/hindi/') == 0: os.mkdir('dictionaries/hindi/') dire = 'dictionaries/hindi/' file = open(dire+'hindu-names.Z', 'wb') print "\r\n[+] downloading hindu-names.Z..." ftp.retrbinary('RETR ' + 'hindu-names.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "17": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('hungarian') if os.path.isdir('dictionaries/hungarian/') == 0: os.mkdir('dictionaries/hungarian/') dire = 'dictionaries/hungarian/' file = open(dire+'hungarian.gz', 'wb') print "\r\n[+] downloading hungarian.gz..." ftp.retrbinary('RETR ' + 'hungarian.gz', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "18": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('italian') if os.path.isdir('dictionaries/italian/') == 0: os.mkdir('dictionaries/italian/') dire = 'dictionaries/italian/' file = open(dire+'words.italian.Z', 'wb') print "\r\n[+] downloading words.italian.Z..." ftp.retrbinary('RETR ' + 'words.italian.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "19": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('japanese') if os.path.isdir('dictionaries/japanese/') == 0: os.mkdir('dictionaries/japanese/') dire = 'dictionaries/japanese/' file = open(dire+'words.japanese.Z', 'wb') print "\r\n[+] downloading words.japanese.Z..." ftp.retrbinary('RETR ' + 'words.japanese.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "20": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('latin') if os.path.isdir('dictionaries/latin/') == 0: os.mkdir('dictionaries/latin/') dire = 'dictionaries/latin/' file = open(dire+'wordlist.aug.Z', 'wb') print "\r\n[+] downloading wordlist.aug.Z..." ftp.retrbinary('RETR ' + 'wordlist.aug.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "21": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('literature') if os.path.isdir('dictionaries/literature/') == 0: os.mkdir('dictionaries/literature/') dire = 'dictionaries/literature/' file = open(dire+'LCarrol.gz', 'wb') print "\r\n[+] downloading LCarrol.gz..." ftp.retrbinary('RETR ' + 'LCarrol.gz', handleDownload) filequitter() file = open(dire+'Paradise.Lost.gz', 'wb') print "\r\n[+] downloading Paradise.Lost.gz..." ftp.retrbinary('RETR ' + 'Paradise.Lost.gz', handleDownload) filequitter() file = open(dire+'aeneid.gz', 'wb') print "\r\n[+] downloading aeneid.gz..." ftp.retrbinary('RETR ' + 'aeneid.gz', handleDownload) filequitter() file = open(dire+'arthur.gz', 'wb') print "\r\n[+] downloading arthur.gz..." ftp.retrbinary('RETR ' + 'arthur.gz', handleDownload) filequitter() file = open(dire+'cartoon.gz', 'wb') print "\r\n[+] downloading cartoon.gz..." ftp.retrbinary('RETR ' + 'cartoon.gz', handleDownload) filequitter() file = open(dire+'cartoons-olivier.gz', 'wb') print "\r\n[+] downloading cartoons-olivier.gz..." ftp.retrbinary('RETR ' + 'cartoons-olivier.gz', handleDownload) filequitter() file = open(dire+'charlemagne.gz', 'wb') print "\r\n[+] downloading charlemagne.gz..." ftp.retrbinary('RETR ' + 'charlemagne.gz', handleDownload) filequitter() file = open(dire+'fable.gz', 'wb') print "\r\n[+] downloading fable.gz..." ftp.retrbinary('RETR ' + 'fable.gz', handleDownload) filequitter() file = open(dire+'iliad.gz', 'wb') print "\r\n[+] downloading iliad.gz..." ftp.retrbinary('RETR ' + 'iliad.gz', handleDownload) filequitter() file = open(dire+'myths-legends.gz', 'wb') print "\r\n[+] downloading myths-legends.gz..." ftp.retrbinary('RETR ' + 'myths-legends.gz', handleDownload) filequitter() file = open(dire+'odyssey.gz', 'wb') print "\r\n[+] downloading odyssey.gz..." ftp.retrbinary('RETR ' + 'odyssey.gz', handleDownload) filequitter() file = open(dire+'sf.gz', 'wb') print "\r\n[+] downloading sf.gz..." ftp.retrbinary('RETR ' + 'sf.gz', handleDownload) filequitter() file = open(dire+'shakespeare.gz', 'wb') print "\r\n[+] downloading shakespeare.gz..." ftp.retrbinary('RETR ' + 'shakespeare.gz', handleDownload) filequitter() file = open(dire+'tolkien.words.gz', 'wb') print "\r\n[+] downloading tolkien.words.gz..." ftp.retrbinary('RETR ' + 'tolkien.words.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "22": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('movieTV') if os.path.isdir('dictionaries/movieTV/') == 0: os.mkdir('dictionaries/movieTV/') dire = 'dictionaries/movieTV/' file = open(dire+'Movies.Z', 'wb') print "\r\n[+] downloading Movies.Z..." ftp.retrbinary('RETR ' + 'Movies.Z', handleDownload) filequitter() file = open(dire+'Python.Z', 'wb') print "\r\n[+] downloading Python.Z..." ftp.retrbinary('RETR ' + 'Python.Z', handleDownload) filequitter() file = open(dire+'Trek.Z', 'wb') print "\r\n[+] downloading Trek.Z..." ftp.retrbinary('RETR ' + 'Trek.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "23": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('music') if os.path.isdir('dictionaries/music/') == 0: os.mkdir('dictionaries/music/') dire = 'dictionaries/music/' file = open(dire+'music-classical.gz', 'wb') print "\r\n[+] downloading music-classical.gz..." ftp.retrbinary('RETR ' + 'music-classical.gz', handleDownload) filequitter() file = open(dire+'music-country.gz', 'wb') print "\r\n[+] downloading music-country.gz..." ftp.retrbinary('RETR ' + 'music-country.gz', handleDownload) filequitter() file = open(dire+'music-jazz.gz', 'wb') print "\r\n[+] downloading music-jazz.gz..." ftp.retrbinary('RETR ' + 'music-jazz.gz', handleDownload) filequitter() file = open(dire+'music-other.gz', 'wb') print "\r\n[+] downloading music-other.gz..." ftp.retrbinary('RETR ' + 'music-other.gz', handleDownload) filequitter() file = open(dire+'music-rock.gz', 'wb') print "\r\n[+] downloading music-rock.gz..." ftp.retrbinary('RETR ' + 'music-rock.gz', handleDownload) filequitter() file = open(dire+'music-shows.gz', 'wb') print "\r\n[+] downloading music-shows.gz..." ftp.retrbinary('RETR ' + 'music-shows.gz', handleDownload) filequitter() file = open(dire+'rock-groups.gz', 'wb') print "\r\n[+] downloading rock-groups.gz..." ftp.retrbinary('RETR ' + 'rock-groups.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "24": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('names') if os.path.isdir('dictionaries/names/') == 0: os.mkdir('dictionaries/names/') dire = 'dictionaries/names/' file = open(dire+'ASSurnames.gz', 'wb') print "\r\n[+] downloading ASSurnames.gz..." ftp.retrbinary('RETR ' + 'ASSurnames.gz', handleDownload) filequitter() file = open(dire+'Congress.gz', 'wb') print "\r\n[+] downloading Congress.gz..." ftp.retrbinary('RETR ' + 'Congress.gz', handleDownload) filequitter() file = open(dire+'Family-Names.gz', 'wb') print "\r\n[+] downloading Family-Names.gz..." ftp.retrbinary('RETR ' + 'Family-Names.gz', handleDownload) filequitter() file = open(dire+'Given-Names.gz', 'wb') print "\r\n[+] downloading Given-Names.gz..." ftp.retrbinary('RETR ' + 'Given-Names.gz', handleDownload) filequitter() file = open(dire+'actor-givenname.gz', 'wb') print "\r\n[+] downloading actor-givenname.gz..." ftp.retrbinary('RETR ' + 'actor-givenname.gz', handleDownload) filequitter() file = open(dire+'actor-surname.gz', 'wb') print "\r\n[+] downloading actor-surname.gz..." ftp.retrbinary('RETR ' + 'actor-surname.gz', handleDownload) filequitter() file = open(dire+'cis-givenname.gz', 'wb') print "\r\n[+] downloading cis-givenname.gz..." ftp.retrbinary('RETR ' + 'cis-givenname.gz', handleDownload) filequitter() file = open(dire+'cis-surname.gz', 'wb') print "\r\n[+] downloading cis-surname.gz..." ftp.retrbinary('RETR ' + 'cis-surname.gz', handleDownload) filequitter() file = open(dire+'crl-names.gz', 'wb') print "\r\n[+] downloading crl-names.gz..." ftp.retrbinary('RETR ' + 'crl-names.gz', handleDownload) filequitter() file = open(dire+'famous.gz', 'wb') print "\r\n[+] downloading famous.gz..." ftp.retrbinary('RETR ' + 'famous.gz', handleDownload) filequitter() file = open(dire+'fast-names.gz', 'wb') print "\r\n[+] downloading fast-names.gz..." ftp.retrbinary('RETR ' + 'fast-names.gz', handleDownload) filequitter() file = open(dire+'female-names-kantr.gz', 'wb') print "\r\n[+] downloading female-names-kantr.gz..." ftp.retrbinary('RETR ' + 'female-names-kantr.gz', handleDownload) filequitter() file = open(dire+'female-names.gz', 'wb') print "\r\n[+] downloading female-names.gz..." ftp.retrbinary('RETR ' + 'female-names.gz', handleDownload) filequitter() file = open(dire+'givennames-ol.gz', 'wb') print "\r\n[+] downloading givennames-ol.gz..." ftp.retrbinary('RETR ' + 'givennames-ol.gz', handleDownload) filequitter() file = open(dire+'male-names-kantr.gz', 'wb') print "\r\n[+] downloading male-names-kantr.gz..." ftp.retrbinary('RETR ' + 'male-names-kantr.gz', handleDownload) filequitter() file = open(dire+'male-names.gz', 'wb') print "\r\n[+] downloading male-names.gz..." ftp.retrbinary('RETR ' + 'male-names.gz', handleDownload) filequitter() file = open(dire+'movie-characters.gz', 'wb') print "\r\n[+] downloading movie-characters.gz..." ftp.retrbinary('RETR ' + 'movie-characters.gz', handleDownload) filequitter() file = open(dire+'names.french.gz', 'wb') print "\r\n[+] downloading names.french.gz..." ftp.retrbinary('RETR ' + 'names.french.gz', handleDownload) filequitter() file = open(dire+'names.hp.gz', 'wb') print "\r\n[+] downloading names.hp.gz..." ftp.retrbinary('RETR ' + 'names.hp.gz', handleDownload) filequitter() file = open(dire+'other-names.gz', 'wb') print "\r\n[+] downloading other-names.gz..." ftp.retrbinary('RETR ' + 'other-names.gz', handleDownload) filequitter() file = open(dire+'shakesp-names.gz', 'wb') print "\r\n[+] downloading shakesp-names.gz..." ftp.retrbinary('RETR ' + 'shakesp-names.gz', handleDownload) filequitter() file = open(dire+'surnames-ol.gz', 'wb') print "\r\n[+] downloading surnames-ol.gz..." ftp.retrbinary('RETR ' + 'surnames-ol.gz', handleDownload) filequitter() file = open(dire+'surnames.finnish.gz', 'wb') print "\r\n[+] downloading surnames.finnish.gz..." ftp.retrbinary('RETR ' + 'surnames.finnish.gz', handleDownload) filequitter() file = open(dire+'usenet-names.gz', 'wb') print "\r\n[+] downloading usenet-names.gz..." ftp.retrbinary('RETR ' + 'usenet-names.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "25": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('net') if os.path.isdir('dictionaries/net/') == 0: os.mkdir('dictionaries/net/') dire = 'dictionaries/net/' file = open(dire+'hosts-txt.Z', 'wb') print "\r\n[+] downloading hosts-txt.Z..." ftp.retrbinary('RETR ' + 'hosts-txt.Z', handleDownload) filequitter() file = open(dire+'inet-machines.Z', 'wb') print "\r\n[+] downloading inet-machines.Z..." ftp.retrbinary('RETR ' + 'inet-machines.Z', handleDownload) filequitter() file = open(dire+'usenet-loginids.Z', 'wb') print "\r\n[+] downloading usenet-loginids.Z..." ftp.retrbinary('RETR ' + 'usenet-loginids.Z', handleDownload) filequitter() file = open(dire+'usenet-machines.Z', 'wb') print "\r\n[+] downloading usenet-machines.Z..." ftp.retrbinary('RETR ' + 'usenet-machines.Z', handleDownload) filequitter() file = open(dire+'uunet-sites.Z', 'wb') print "\r\n[+] downloading uunet-sites.Z..." ftp.retrbinary('RETR ' + 'uunet-sites.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "26": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('norwegian') if os.path.isdir('dictionaries/norwegian/') == 0: os.mkdir('dictionaries/norwegian/') dire = 'dictionaries/norwegian/' file = open(dire+'words.norwegian.Z', 'wb') print "\r\n[+] downloading words.norwegian.Z..." ftp.retrbinary('RETR ' + 'words.norwegian.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "27": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('places') if os.path.isdir('dictionaries/places/') == 0: os.mkdir('dictionaries/places/') dire = 'dictionaries/places/' file = open(dire+'Colleges.Z', 'wb') print "\r\n[+] downloading Colleges.Z..." ftp.retrbinary('RETR ' + 'Colleges.Z', handleDownload) filequitter() file = open(dire+'US-counties.Z', 'wb') print "\r\n[+] downloading US-counties.Z..." ftp.retrbinary('RETR ' + 'US-counties.Z', handleDownload) filequitter() file = open(dire+'World.factbook.Z', 'wb') print "\r\n[+] downloading World.factbook.Z..." ftp.retrbinary('RETR ' + 'World.factbook.Z', handleDownload) filequitter() file = open(dire+'Zipcodes.Z', 'wb') print "\r\n[+] downloading Zipcodes.Z..." ftp.retrbinary('RETR ' + 'Zipcodes.Z', handleDownload) filequitter() file = open(dire+'places.Z', 'wb') print "\r\n[+] downloading places.Z..." ftp.retrbinary('RETR ' + 'places.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "28": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('polish') if os.path.isdir('dictionaries/polish/') == 0: os.mkdir('dictionaries/polish/') dire = 'dictionaries/polish/' file = open(dire+'words.polish.Z', 'wb') print "\r\n[+] downloading words.polish.Z..." ftp.retrbinary('RETR ' + 'words.polish.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "29": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('random') if os.path.isdir('dictionaries/random/') == 0: os.mkdir('dictionaries/random/') dire = 'dictionaries/random/' file = open(dire+'Ethnologue.gz', 'wb') print "\r\n[+] downloading Ethnologue.gz..." ftp.retrbinary('RETR ' + 'Ethnologue.gz', handleDownload) filequitter() file = open(dire+'abbr.gz', 'wb') print "\r\n[+] downloading abbr.gz..." ftp.retrbinary('RETR ' + 'abbr.gz', handleDownload) filequitter() file = open(dire+'chars.gz', 'wb') print "\r\n[+] downloading chars.gz..." ftp.retrbinary('RETR ' + 'chars.gz', handleDownload) filequitter() file = open(dire+'dogs.gz', 'wb') print "\r\n[+] downloading dogs.gz..." ftp.retrbinary('RETR ' + 'dogs.gz', handleDownload) filequitter() file = open(dire+'drugs.gz', 'wb') print "\r\n[+] downloading drugs.gz..." ftp.retrbinary('RETR ' + 'drugs.gz', handleDownload) filequitter() file = open(dire+'junk.gz', 'wb') print "\r\n[+] downloading junk.gz..." ftp.retrbinary('RETR ' + 'junk.gz', handleDownload) filequitter() file = open(dire+'numbers.gz', 'wb') print "\r\n[+] downloading numbers.gz..." ftp.retrbinary('RETR ' + 'numbers.gz', handleDownload) filequitter() file = open(dire+'phrases.gz', 'wb') print "\r\n[+] downloading phrases.gz..." ftp.retrbinary('RETR ' + 'phrases.gz', handleDownload) filequitter() file = open(dire+'sports.gz', 'wb') print "\r\n[+] downloading sports.gz..." ftp.retrbinary('RETR ' + 'sports.gz', handleDownload) filequitter() file = open(dire+'statistics.gz', 'wb') print "\r\n[+] downloading statistics.gz..." ftp.retrbinary('RETR ' + 'statistics.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "30": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('religion') if os.path.isdir('dictionaries/religion/') == 0: os.mkdir('dictionaries/religion/') dire = 'dictionaries/religion/' file = open(dire+'Koran.Z', 'wb') print "\r\n[+] downloading Koran.Z..." ftp.retrbinary('RETR ' + 'Koran.Z', handleDownload) filequitter() file = open(dire+'kjbible.Z', 'wb') print "\r\n[+] downloading kjbible.Z..." ftp.retrbinary('RETR ' + 'kjbible.Z', handleDownload) filequitter() file = open(dire+'norse.Z', 'wb') print "\r\n[+] downloading norse.Z..." ftp.retrbinary('RETR ' + 'norse.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "31": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('russian') if os.path.isdir('dictionaries/russian/') == 0: os.mkdir('dictionaries/russian/') dire = 'dictionaries/russian/' file = open(dire+'russian.lst.Z', 'wb') print "\r\n[+] downloading russian.lst.Z..." ftp.retrbinary('RETR ' + 'russian.lst.Z', handleDownload) filequitter() file = open(dire+'russian_words.koi8.Z', 'wb') print "\r\n[+] downloading russian_words.koi8.Z..." ftp.retrbinary('RETR ' + 'russian_words.koi8.Z', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "32": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('science') if os.path.isdir('dictionaries/science/') == 0: os.mkdir('dictionaries/science/') dire = 'dictionaries/science/' file = open(dire+'Acr-diagnosis.gz', 'wb') print "\r\n[+] downloading Acr-diagnosis.gz..." ftp.retrbinary('RETR ' + 'Acr-diagnosis.gz', handleDownload) filequitter() file = open(dire+'Algae.gz', 'wb') print "\r\n[+] downloading Algae.gz..." ftp.retrbinary('RETR ' + 'Algae.gz', handleDownload) filequitter() file = open(dire+'Bacteria.gz', 'wb') print "\r\n[+] downloading Bacteria.gz..." ftp.retrbinary('RETR ' + 'Bacteria.gz', handleDownload) filequitter() file = open(dire+'Fungi.gz', 'wb') print "\r\n[+] downloading Fungi.gz..." ftp.retrbinary('RETR ' + 'Fungi.gz', handleDownload) filequitter() file = open(dire+'Microalgae.gz', 'wb') print "\r\n[+] downloading Microalgae.gz..." ftp.retrbinary('RETR ' + 'Microalgae.gz', handleDownload) filequitter() file = open(dire+'Viruses.gz', 'wb') print "\r\n[+] downloading Viruses.gz..." ftp.retrbinary('RETR ' + 'Viruses.gz', handleDownload) filequitter() file = open(dire+'asteroids.Z', 'wb') print "\r\n[+] downloading asteroids.Z..." ftp.retrbinary('RETR ' + 'asteroids.Z', handleDownload) filequitter() file = open(dire+'biology.Z', 'wb') print "\r\n[+] downloading biology.Z..." ftp.retrbinary('RETR ' + 'biology.Z', handleDownload) filequitter() file = open(dire+'tech.gz', 'wb') print "\r\n[+] downloading tech.gz..." ftp.retrbinary('RETR ' + 'tech.gz', handleDownload) filequitter() print '[+] files saved to '+ dire ftp.quit() exit() if filedown == "33": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('spanish') if os.path.isdir('dictionaries/spanish/') == 0: os.mkdir('dictionaries/spanish/') dire = 'dictionaries/spanish/' file = open(dire+'words.spanish.Z', 'wb') print "\r\n[+] downloading words.spanish.Z..." ftp.retrbinary('RETR ' + 'words.spanish.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "34": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('swahili') if os.path.isdir('dictionaries/swahili/') == 0: os.mkdir('dictionaries/swahili/') dire = 'dictionaries/swahili/' file = open(dire+'swahili.gz', 'wb') print "\r\n[+] downloading swahili.gz..." ftp.retrbinary('RETR ' + 'swahili.gz', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "35": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('swedish') if os.path.isdir('dictionaries/swedish/') == 0: os.mkdir('dictionaries/swedish/') dire = 'dictionaries/swedish/' file = open(dire+'words.swedish.Z', 'wb') print "\r\n[+] downloading words.swedish.Z..." ftp.retrbinary('RETR ' + 'words.swedish.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "36": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('turkish') if os.path.isdir('dictionaries/turkish/') == 0: os.mkdir('dictionaries/turkish/') dire = 'dictionaries/turkish/' file = open(dire+'turkish.dict.gz', 'wb') print "\r\n[+] downloading turkish.dict.gz..." ftp.retrbinary('RETR ' + 'turkish.dict.gz', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() if filedown == "37": print "[+] connecting..." ftp = ftplib.FTP(ftpurl) downloader() ftp.cwd('yiddish') if os.path.isdir('dictionaries/yiddish/') == 0: os.mkdir('dictionaries/yiddish/') dire = 'dictionaries/yiddish/' file = open(dire+'yiddish.Z', 'wb') print "\r\n[+] downloading yiddish.Z..." ftp.retrbinary('RETR ' + 'yiddish.Z', handleDownload) filequitter() print '[+] file saved to '+ dire ftp.quit() exit() else: print '[-] leaving.' exit() else: print "\r\n[Usage]: "+sys.argv[0] +" [OPTIONS] \r\n" print "[Help]: "+sys.argv[0] +" -h\r\n" exit()
apache-2.0
-5,743,812,412,542,314,000
29.003751
478
0.633408
false
2.578812
true
false
false
ISCDtoolbox/FaciLe
pipeline/createDatabase.py
1
2873
import os import sys import numpy as np from copy import deepcopy import argparse #Parallel import subprocess as sp import multiprocessing as mp sys.path.append(os.path.join(os.path.dirname(__file__),"../projects/tools")) import msh import executable_paths as exe def parse(): parser = argparse.ArgumentParser(description="Creates mandible and masseter files for the database creation") parser.add_argument("-i", "--inputDir", help="input directory", type=str, required=True) parser.add_argument("-o", "--outputDir", help="output directory", type=str, required=True) return parser.parse_args() def checkArgs(args): if not os.path.exists(args.inputDir): print args.input + "is not a valid directory" sys.exit() if not len([f for f in os.listdir(args.inputDir) if f[0]=="."]) == 0: print args.inputDir + " is an empty directory" sys.exit() if not os.path.exists(args.outputDir): print args.outputDir + " does not exist, creating" os.system("mkdir " + args.outputDir) args.inputDir = os.path.abspath(args.inputDir) args.outputDir = os.path.abspath(args.outputDir) def command(cmd, displayOutput=False): err = 1 print "Running the command '" + cmd + "'" if displayOutput: err = os.system(cmd) else: err = os.system(cmd + " > tmp_out.txt 2>tmp_err.txt") if err: print "An error happened while executing:\n"+cmd+"\nLook in tmp_out.txt or tmp_err.txt for info\nExiting..." sys.exit() else: os.system("rm tmp_out.txt tmp_err.txt >/dev/null 2>&1") def work(in_file): """Defines the work unit on an input file""" root = '.'.join(in_file.split("/")[-1].split(".")[:-1]) if not os.path.exists("tmp_"+root): os.mkdir("tmp_"+root) os.chdir("tmp_"+root) os.system("cp /home/norgeot/dev/own/FaciLe/projects/warping/demo/sphere.o1.mesh ./sphere.mesh") cmd = " ".join([exe.processSkull, "-i " + in_file, "-t ../../OsTemplate.mesh",">",root+"_OUT.txt"]) print "Starting the skull processing for " + in_file #os.system(cmd) print "Skull processing finished for " + in_file #clean the working directories for ext in [".warped.mesh", ".box.1.o.", "mat","_OUT.txt"]: for f in os.listdir("."): if ext in f: os.rename(f, os.path.join(args.outputDir,f)) for f in os.listdir("."): if ".mesh" in f or ".sol" in f: #os.remove(f) #print f + " was successfully removed" a=2 return 0 if __name__=="__main__": args = parse() checkArgs(args) files = [os.path.join(args.inputDir,f) for f in os.listdir(args.inputDir) if ".mesh" in f] #Set up the parallel task pool to use all available processors count = mp.cpu_count() pool = mp.Pool(processes=count) pool.map(work, files)
gpl-3.0
9,033,572,557,116,844,000
31.647727
116
0.622346
false
3.348485
false
false
false
iain-peddie/well-behaved-python
tests/WellBehavedPythonTests/Discovery/ModuleExaminerTests.py
1
3114
#!/usr/bin/env python3 # Copyright 2013 Iain Peddie inr314159@hotmail.com # # This file is part of WellBehavedPython # # WellBehavedPython is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # WellBehavedPython is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with WellBehavedPython. If not, see <http://www.gnu.org/licenses/>. from WellBehavedPython.api import * from WellBehavedPython.Engine.TestCase import TestCase from WellBehavedPython.Discovery.ModuleExaminer import ModuleExaminer class ModuleExaminerTests(TestCase): def test_examiner_can_find__only_class_in_simple_module(self): # Where examiner = ModuleExaminer('WellBehavedPythonTests.Samples.SampleModule'); # When classes = examiner.listAllClasses() # The classes have been imported # Then from ..Samples import SampleModule expect(classes).toEqual([SampleModule.SampleTests]) def test_examiner_can_find_all_classes_in_complex_module(self): # Where examiner = ModuleExaminer('WellBehavedPythonTests.Samples.SampleComplexModule'); # When classes = examiner.listAllClasses() # The classes have been imported # Then from ..Samples import SampleComplexModule expect(classes).toContain(SampleComplexModule.SampleFirstTests) expect(classes).toContain(SampleComplexModule.SampleSecondTests) expect(classes).toContain(SampleComplexModule.StandaloneClass) def test_examiner_can_find_all_modules(self): # Where examiner = ModuleExaminer('WellBehavedPythonTests.Samples'); # When modules = examiner.listAllModules(); # Then from ..Samples import SampleModule from ..Samples import SampleComplexModule expect(modules).toContain('WellBehavedPythonTests.Samples.SampleModule'); expect(modules).toContain('WellBehavedPythonTests.Samples.SampleComplexModule'); def test_examiner_is_not_recursive_for_modules(self): # Where examiner = ModuleExaminer('WellBehavedPythonTests'); # When modules = examiner.listAllModules(); # Then expect(modules).toContain('WellBehavedPythonTests.BackwardsCompatibilityTests'); expect(modules).Not.toContain('WellBehavedPythonTests.Discovery.Samples.SampleModule'); def test_examining_can_find_subpackages(self): # Where examiner = ModuleExaminer('WellBehavedPythonTests') # When packages = examiner.listAllPackages() # Then expect(packages).toContain('WellBehavedPythonTests.Discovery')
gpl-3.0
8,893,566,164,321,204,000
35.635294
95
0.706487
false
3.977011
true
false
false
QualiSystems/Azure-Shell
package/cloudshell/cp/azure/domain/common/vm_details_provider.py
1
5896
from azure.mgmt.compute.models import StorageAccountTypes from cloudshell.cp.core.models import VmDetailsProperty, VmDetailsData, VmDetailsNetworkInterface from cloudshell.cp.azure.domain.vm_management.operations.deploy_operation import get_ip_from_interface_name class VmDetailsProvider(object): def __init__(self, network_service, resource_id_parser): """ :param cloudshell.cp.azure.domain.services.network_service.NetworkService network_service: :param AzureResourceIdParser resource_id_parser: :return: """ self.network_service = network_service self.resource_id_parser = resource_id_parser def create(self, instance, is_market_place, logger, network_client, group_name): """ :param group_name: :param network_client: :param instance: azure.mgmt.compute.models.VirtualMachine :param is_market_place: bool :param logging.Logger logger: :return: """ vm_instance_data = None vm_network_data = None if is_market_place: vm_instance_data = self._get_vm_instance_data_for_market_place(instance) vm_network_data = self._get_vm_network_data(instance, network_client, group_name, logger) logger.info("VM {} was created via market place.".format(instance.name)) else: vm_instance_data = self._get_vm_instance_data_for_custom_image(instance) vm_network_data = self._get_vm_network_data(instance, network_client, group_name, logger) logger.info("VM {} was created via custom image.".format(instance.name)) return VmDetailsData(vmInstanceData=vm_instance_data, vmNetworkData=vm_network_data) @staticmethod def _get_vm_instance_data_for_market_place(instance): data = [ VmDetailsProperty(key='Image Publisher',value= instance.storage_profile.image_reference.publisher), VmDetailsProperty(key='Image Offer',value= instance.storage_profile.image_reference.offer), VmDetailsProperty(key='Image SKU',value= instance.storage_profile.image_reference.sku), VmDetailsProperty(key='VM Size',value= instance.hardware_profile.vm_size), VmDetailsProperty(key='Operating System',value= instance.storage_profile.os_disk.os_type.name), VmDetailsProperty(key='Disk Type',value= 'HDD' if instance.storage_profile.os_disk.managed_disk.storage_account_type == StorageAccountTypes.standard_lrs else 'SSD') ] return data def _get_vm_instance_data_for_custom_image(self, instance): image_name = self.resource_id_parser.get_image_name(resource_id=instance.storage_profile.image_reference.id) resource_group = self.resource_id_parser.get_resource_group_name(resource_id=instance.storage_profile.image_reference.id) data = [ VmDetailsProperty(key='Image',value= image_name), VmDetailsProperty(key='Image Resource Group',value= resource_group), VmDetailsProperty(key='VM Size',value= instance.hardware_profile.vm_size), VmDetailsProperty(key='Operating System',value= instance.storage_profile.os_disk.os_type.name), VmDetailsProperty(key='Disk Type',value= 'HDD' if instance.storage_profile.os_disk.managed_disk.storage_account_type == StorageAccountTypes.standard_lrs else 'SSD') ] return data def _get_vm_network_data(self, instance, network_client, group_name, logger): network_interface_objects = [] for network_interface in instance.network_profile.network_interfaces: nic_name = self.resource_id_parser.get_name_from_resource_id(network_interface.id) nic = network_client.network_interfaces.get(group_name, nic_name) ip_configuration = nic.ip_configurations[0] private_ip = ip_configuration.private_ip_address public_ip = '' network_data = [VmDetailsProperty(key="IP", value=ip_configuration.private_ip_address)] subnet_name = ip_configuration.subnet.id.split('/')[-1] current_interface = VmDetailsNetworkInterface(interfaceId=nic.resource_guid, networkId=subnet_name, isPrimary=nic.primary, networkData=network_data, privateIpAddress=private_ip, publicIpAddress=public_ip) if ip_configuration.public_ip_address: public_ip_name = get_ip_from_interface_name(nic_name) public_ip_object = self.network_service.get_public_ip(network_client=network_client, group_name=group_name, ip_name=public_ip_name) public_ip = public_ip_object.ip_address network_data.append(VmDetailsProperty(key="Public IP", value=public_ip)) network_data.append( VmDetailsProperty(key="Public IP Type", value=public_ip_object.public_ip_allocation_method)) # logger.info("VM {} was created with public IP '{}'.".format(instance.name, # ip_configuration.public_ip_address.ip_address)) logger.info("VM {} was created with public IP '{}'.".format(instance.name, public_ip)) network_data.append(VmDetailsProperty(key="MAC Address", value=nic.mac_address)) network_interface_objects.append(current_interface) return network_interface_objects
apache-2.0
8,312,419,053,083,852,000
53.100917
150
0.61652
false
4.232592
true
false
false
fengjian/libinjection
src/sqlparse2c.py
3
3800
#!/usr/bin/env python # # Copyright 2012, 2013 Nick Galbreath # nickg@client9.com # BSD License -- see COPYING.txt for details # """ Converts a libinjection JSON data file to a C header (.h) file """ import sys def toc(obj): """ main routine """ print """ #ifndef LIBINJECTION_SQLI_DATA_H #define LIBINJECTION_SQLI_DATA_H #include "libinjection.h" #include "libinjection_sqli.h" typedef struct { const char *word; char type; } keyword_t; static size_t parse_money(sfilter * sf); static size_t parse_other(sfilter * sf); static size_t parse_white(sfilter * sf); static size_t parse_operator1(sfilter *sf); static size_t parse_char(sfilter *sf); static size_t parse_hash(sfilter *sf); static size_t parse_dash(sfilter *sf); static size_t parse_slash(sfilter *sf); static size_t parse_backslash(sfilter * sf); static size_t parse_operator2(sfilter *sf); static size_t parse_string(sfilter *sf); static size_t parse_word(sfilter * sf); static size_t parse_var(sfilter * sf); static size_t parse_number(sfilter * sf); static size_t parse_tick(sfilter * sf); static size_t parse_ustring(sfilter * sf); static size_t parse_qstring(sfilter * sf); static size_t parse_nqstring(sfilter * sf); static size_t parse_xstring(sfilter * sf); static size_t parse_bstring(sfilter * sf); static size_t parse_estring(sfilter * sf); static size_t parse_bword(sfilter * sf); """ # # Mapping of character to function # fnmap = { 'CHAR_WORD' : 'parse_word', 'CHAR_WHITE': 'parse_white', 'CHAR_OP1' : 'parse_operator1', 'CHAR_UNARY': 'parse_operator1', 'CHAR_OP2' : 'parse_operator2', 'CHAR_BANG' : 'parse_operator2', 'CHAR_BACK' : 'parse_backslash', 'CHAR_DASH' : 'parse_dash', 'CHAR_STR' : 'parse_string', 'CHAR_HASH' : 'parse_hash', 'CHAR_NUM' : 'parse_number', 'CHAR_SLASH': 'parse_slash', 'CHAR_SEMICOLON' : 'parse_char', 'CHAR_COMMA': 'parse_char', 'CHAR_LEFTPARENS': 'parse_char', 'CHAR_RIGHTPARENS': 'parse_char', 'CHAR_LEFTBRACE': 'parse_char', 'CHAR_RIGHTBRACE': 'parse_char', 'CHAR_VAR' : 'parse_var', 'CHAR_OTHER': 'parse_other', 'CHAR_MONEY': 'parse_money', 'CHAR_TICK' : 'parse_tick', 'CHAR_UNDERSCORE': 'parse_underscore', 'CHAR_USTRING' : 'parse_ustring', 'CHAR_QSTRING' : 'parse_qstring', 'CHAR_NQSTRING' : 'parse_nqstring', 'CHAR_XSTRING' : 'parse_xstring', 'CHAR_BSTRING' : 'parse_bstring', 'CHAR_ESTRING' : 'parse_estring', 'CHAR_BWORD' : 'parse_bword' } print print "typedef size_t (*pt2Function)(sfilter *sf);" print "static const pt2Function char_parse_map[] = {" pos = 0 for character in obj['charmap']: print " &%s, /* %d */" % (fnmap[character], pos) pos += 1 print "};" print # keywords # load them keywords = obj['keywords'] for fingerprint in list(obj[u'fingerprints']): fingerprint = '0' + fingerprint.upper() keywords[fingerprint] = 'F' needhelp = [] for key in keywords.iterkeys(): if key != key.upper(): needhelp.append(key) for key in needhelp: tmpv = keywords[key] del keywords[key] keywords[key.upper()] = tmpv print "static const keyword_t sql_keywords[] = {" for k in sorted(keywords.keys()): if len(k) > 31: sys.stderr.write("ERROR: keyword greater than 32 chars\n") sys.exit(1) print " {\"%s\", '%s'}," % (k, keywords[k]) print "};" print "static const size_t sql_keywords_sz = %d;" % (len(keywords), ) print "#endif" return 0 if __name__ == '__main__': import json sys.exit(toc(json.load(sys.stdin)))
bsd-3-clause
-4,710,446,708,618,401,000
27.787879
73
0.604211
false
3.071948
false
false
false
inconvergent/differential-cloud
modules/helpers.py
1
1866
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function def get_args(): import argparse parser = argparse.ArgumentParser() parser.add_argument( '--procs', type=int, default=4, help='number of processors.' ) parser.add_argument( '--nearl', type=float, default=0.003 ) parser.add_argument( '--midl', type=float, default=0.008 ) parser.add_argument( '--farl', type=float, default=0.05 ) parser.add_argument( '--stp', type=float, default=1.0e-7 ) parser.add_argument( '--reject', type=float, default=1.0 ) parser.add_argument( '--attract', type=float, default=0.3 ) parser.add_argument( '--nmax', type=int, default=1000000 ) parser.add_argument( '--itt', type=int, default=10000000000 ) parser.add_argument( '--vnum', type=int, default=10000000000 ) parser.add_argument( '--stat', type=int, default=100 ) parser.add_argument( '--export', type=int, default=1000 ) parser.add_argument( '--out', type=str, default='./res/res' ) parser.add_argument( '--startRad', type=float, default=0.01 ) parser.add_argument( '--startNum', type=int, default=100 ) return parser.parse_args() def make_info_str(args): s = '' for k in vars(args): s += '# ' + str(k) + ': ' + str(getattr(args,k)) + '\n' return s def print_stats(steps,dm, meta=False): from time import strftime from time import time if isinstance(meta, str): meta = ' | {:s}'.format(meta) else: meta = '' print( '{:s} | stp: {:d} sec: {:.2f} v: {:d}{:s}' .format( strftime('%d/%m/%y %H:%M:%S'), steps, time()-dm.get_start_time(), dm.get_vnum(), meta ) ) return
mit
-6,956,966,956,868,832,000
14.55
59
0.546088
false
3.094527
false
false
false
apacha/OMR-Datasets
omrdatasettools/OmrDataset.py
1
11026
from enum import Enum, auto from typing import Dict class OmrDataset(Enum): """ The available OMR datasets that can be automatically downloaded with Downloader.py """ #: The Audiveris OMR dataset from https://github.com/Audiveris/omr-dataset-tools, Copyright 2017 by Hervé Bitteur under AGPL-3.0 license Audiveris = auto() #: The Baro Single Stave dataset from http://www.cvc.uab.es/people/abaro/datasets.html, Copyright 2019 Arnau Baró, Pau Riba, Jorge Calvo-Zaragoza, and Alicia Fornés under CC-BY-NC-SA 4.0 license Baro = auto() #: The Capitan dataset from http://grfia.dlsi.ua.es/, License unspecified, free for research purposes Capitan = auto() #: Custom version of the CVC-MUSCIMA dataset that contains all images in grayscale, binary and with the #: following staff-line augmentations: interrupted, kanungo, thickness-variation-v1/2, y-variation-v1/2 #: typeset-emulation and whitespeckles. (all data augmentations that could be aligned automatically). #: The grayscale images are different from the WriterIdentification dataset, in such a way, that they were aligned #: to the images from the Staff-Removal dataset. This is the recommended dataset for object detection, as the #: MUSCIMA++ annotations can be used with a variety of underlying images. #: See https://github.com/apacha/CVC-MUSCIMA to learn more. CvcMuscima_MultiConditionAligned = auto() #: The larger version of the CVC-MUSCIMA dataset for staff removal in black and white with augmentations #: from http://www.cvc.uab.es/cvcmuscima/index_database.html, #: Copyright 2012 Alicia Fornés, Anjan Dutta, Albert Gordo and Josep Lladós under CC-BY-NC-SA 4.0 license CvcMuscima_StaffRemoval = auto() #: The smaller version of the CVC-MUSCIMA dataset for writer identification in grayscale #: from http://www.cvc.uab.es/cvcmuscima/index_database.html, #: Copyright 2012 Alicia Fornés, Anjan Dutta, Albert Gordo and Josep Lladós under CC-BY-NC-SA 4.0 license CvcMuscima_WriterIdentification = auto() #: Edirom dataset. All rights reserved Edirom_Bargheer = auto() #: Edirom datasets on Freischuetz from https://freischuetz-digital.de/edition.html. All rights reserved. Edirom_FreischuetzDigital = auto() #: The Fornes Music Symbols dataset from http://www.cvc.uab.es/~afornes/, License unspecified - citation requested Fornes = auto() #: The official HOMUS dataset from http://grfia.dlsi.ua.es/homus/, License unspecified. Homus_V1 = auto() #: The improved version of the HOMUS dataset with several bugs-fixed from https://github.com/apacha/Homus Homus_V2 = auto() #: The MUSCIMA++ dataset from https://ufal.mff.cuni.cz/muscima, Copyright 2017 Jan Hajic jr. under CC-BY-NC-SA 4.0 license MuscimaPlusPlus_V1 = auto() #: The second version of the MUSCIMA++ dataset from https://github.com/OMR-Research/muscima-pp MuscimaPlusPlus_V2 = auto() #: A sub-set of the MUSCIMA++ annotations that contains bounding-box annotations for staves, staff measures and system measures. It was semi-automatically constructed from existing annotations and manually verified for correctness. The annotations are available in a plain JSON format as well as in the COCO format. MuscimaPlusPlus_MeasureAnnotations = auto() #: The OpenOMR Symbols dataset from https://sourceforge.net/projects/openomr/, Copyright 2013 by Arnaud F. Desaedeleer under GPL license OpenOmr = auto() #: The Printed Music Symbols dataset from https://github.com/apacha/PrintedMusicSymbolsDataset, Copyright 2017 by Alexander Pacha under MIT license Printed = auto() #: The Rebelo dataset (part 1) with music symbols from http://www.inescporto.pt/~arebelo/index.php, Copyright 2017 by Ana Rebelo under CC BY-SA 4.0 license Rebelo1 = auto() #: The Rebelo dataset (part 2) with music symbols from http://www.inescporto.pt/~arebelo/index.php, Copyright 2017 by Ana Rebelo under CC BY-SA 4.0 license Rebelo2 = auto() #: The DeepScore dataset (version 1) with extended vocabulary from https://tuggeluk.github.io/downloads/, License unspecified. DeepScores_V1_Extended = auto() #: The AudioLabs v1 dataset (aka. Measure Bounding Box Annotation) from https://www.audiolabs-erlangen.de/resources/MIR/2019-ISMIR-LBD-Measures, Copyright 2019 by Frank Zalkow, Angel Villar Corrales, TJ Tsai, Vlora Arifi-Müller, and Meinard Müller under CC BY-NC-SA 4.0 license AudioLabs_v1 = auto() #: The AudioLabs v2 dataset, enhanced with staves, staff measures and the original system measures. The annotations are available in csv, JSON and COCO format. AudioLabs_v2 = auto() #: The Accidentals detection dataset by Kwon-Young Choi from https://www-intuidoc.irisa.fr/en/choi_accidentals/, License unspecified. ChoiAccidentals = auto() def get_dataset_download_url(self) -> str: """ Returns the url of the selected dataset. Example usage: OmrDataset.Fornes.get_dataset_download_url() """ return self.dataset_download_urls()[self.name] def get_dataset_filename(self) -> str: """ Returns the name of the downloaded zip file of a dataset. Example usage: OmrDataset.Fornes.get_dataset_filename() """ return self.dataset_file_names()[self.name] def dataset_download_urls(self) -> Dict[str, str]: """ Returns a mapping with all URLs, mapped from their enum keys """ return { # Official URL: https://github.com/Audiveris/omr-dataset-tools/tree/master/data/input-images "Audiveris": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudiverisOmrDataset.zip", # Official URL: http://www.cvc.uab.es/people/abaro/datasets/MUSCIMA_ABARO.zip "Baro": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/BaroMuscima.zip", # Official URL: http://grfia.dlsi.ua.es/cm/projects/timul/databases/BimodalHandwrittenSymbols.zip "Capitan": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/BimodalHandwrittenSymbols.zip", # Official URL: http://www.cvc.uab.es/cvcmuscima/CVCMUSCIMA_WI.zip "CvcMuscima_WriterIdentification": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_WI.zip", # Official URL: http://www.cvc.uab.es/cvcmuscima/CVCMUSCIMA_SR.zip "CvcMuscima_StaffRemoval": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_SR.zip", # Official URL: https://github.com/apacha/CVC-MUSCIMA "CvcMuscima_MultiConditionAligned": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_MCA.zip", "Edirom_Bargheer": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Bargheer.zip", "Edirom_FreischuetzDigital": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/FreischuetzDigital.zip", # Official URL: http://www.cvc.uab.es/cvcmuscima/datasets/Music_Symbols.zip "Fornes": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Music_Symbols.zip", # Official URL: http://grfia.dlsi.ua.es/homus/HOMUS.zip "Homus_V1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/HOMUS.zip", # Official URL: https://github.com/apacha/Homus "Homus_V2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/HOMUS-2.0.zip", # Official URL: https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11372/LRT-2372/MUSCIMA-pp_v1.0.zip?sequence=1&isAllowed=y "MuscimaPlusPlus_V1": "https://github.com/OMR-Research/muscima-pp/releases/download/v1.0/MUSCIMA-pp_v1.0.zip", # Official URL: https://github.com/OMR-Research/muscima-pp "MuscimaPlusPlus_V2": "https://github.com/OMR-Research/muscima-pp/releases/download/v2.0/MUSCIMA-pp_v2.0.zip", "MuscimaPlusPlus_Images": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVC_MUSCIMA_PP_Annotated-Images.zip", "MuscimaPlusPlus_MeasureAnnotations": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/MUSCIMA-pp_v1.0-measure-annotations.zip", # Official URL: https://sourceforge.net/projects/openomr/ "OpenOmr": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/OpenOMR-Dataset.zip", "Printed": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/PrintedMusicSymbolsDataset.zip", "Rebelo1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Rebelo-Music-Symbol-Dataset1.zip", "Rebelo2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Rebelo-Music-Symbol-Dataset2.zip", "DeepScores_V1_Extended": "https://repository.cloudlab.zhaw.ch/artifactory/deepscores/ds_extended.zip", # Official URL: https://www.audiolabs-erlangen.de/resources/MIR/2019-ISMIR-LBD-Measures "AudioLabs_v1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudioLabs_v1.zip", "AudioLabs_v2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudioLabs_v2.zip", # Official URL: https://www-intuidoc.irisa.fr/en/choi_accidentals/ "ChoiAccidentals": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/choi_accidentals_dataset.zip" } def dataset_file_names(self) -> Dict[str, str]: """ Returns a map of all file_names, mapped from their enum keys """ return { "Audiveris": "AudiverisOmrDataset.zip", "Baro": "BaroMuscima.zip", "Capitan": "BimodalHandwrittenSymbols.zip", "CvcMuscima_WriterIdentification": "CVCMUSCIMA_WI.zip", "CvcMuscima_StaffRemoval": "CVCMUSCIMA_SR.zip", "CvcMuscima_MultiConditionAligned": "CVCMUSCIMA_MCA.zip", "Edirom_Bargheer": "Bargheer.zip", "Edirom_FreischuetzDigital": "FreischuetzDigital.zip", "Fornes": "Music_Symbols.zip", "Homus_V1": "HOMUS.zip", "Homus_V2": "HOMUS-2.0.zip", "MuscimaPlusPlus_V1": "MUSCIMA-pp_v1.0.zip", "MuscimaPlusPlus_V2": "MUSCIMA-pp_v2.0.zip", "MuscimaPlusPlus_Images": "CVC_MUSCIMA_PP_Annotated-Images.zip", "MuscimaPlusPlus_MeasureAnnotations": "MUSCIMA-pp_v1.0-measure-annotations.zip", "OpenOmr": "OpenOMR-Dataset.zip", "Printed": "PrintedMusicSymbolsDataset.zip", "Rebelo1": "Rebelo-Music-Symbol-Dataset1.zip", "Rebelo2": "Rebelo-Music-Symbol-Dataset2.zip", "DeepScores_V1_Extended": "ds_extended.zip", "AudioLabs_v1": "AudioLabs_v1.zip", "AudioLabs_v2": "AudioLabs_v2.zip", "ChoiAccidentals": "choi_accidentals_dataset.zip" }
mit
-4,851,323,977,944,946,000
58.551351
319
0.699737
false
3.070513
false
false
false
khrapovs/datastorage
datastorage/compustat.py
1
2589
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Short interest dynamics """ from __future__ import print_function, division import os import zipfile import datetime as dt import pandas as pd import matplotlib.pyplot as plt import seaborn as sns path = os.getenv("HOME") + '/Dropbox/Research/data/Compustat/data/' # __location__ = os.path.realpath(os.path.join(os.getcwd(), # os.path.dirname(__file__))) # path = os.path.join(__location__, path + 'Compustat/data/') def date_convert(string): return dt.datetime.strptime(string, '%d-%m-%Y') def import_data(): """Import data and save it to the disk. """ zf = zipfile.ZipFile(path + 'short_int.zip', 'r') name = zf.namelist()[0] short_int = pd.read_csv(zf.open(name), converters={'datadate': date_convert}) columns = {'datadate': 'date', 'SHORTINTADJ': 'short_int', 'GVKEY': 'gvkey'} short_int.rename(columns=columns, inplace=True) short_int.set_index(['gvkey', 'date'], inplace=True) short_int.sort_index(inplace=True) short_int.to_hdf(path + 'short_int.h5', key='short_int') print(short_int.head()) print(short_int.dtypes) print('Number of unique companies: ', short_int.index.get_level_values('gvkey').nunique()) print('Number of unique dates: ', short_int.index.get_level_values('date').nunique()) print('Min and Max date: ', short_int.index.get_level_values('date').min().date(), ',', short_int.index.get_level_values('date').max().date()) def load_data(): """Load data from disk and check for sanity. """ return pd.read_hdf(path + 'short_int.h5', 'short_int') def count_companies(short_int): """Plot number of companies over time. """ df = short_int.reset_index().groupby('date')['gvkey'].nunique() sns.set_context('paper') df.plot(figsize=(10, 3)) plt.show() data = df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)] data.plot(figsize=(10, 3)) plt.show() def mean_short_int(short_int): """Mean short interest on each date. """ df = short_int.groupby(level='date')['short_int'].mean() sns.set_context('paper') df.plot(figsize=(10, 3)) plt.show() df.ix[:dt.date(2004, 12, 31)].plot(figsize=(10, 3)) plt.show() df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)].plot(figsize=(10, 3)) plt.show() if __name__ == '__main__': import_data() short_int = load_data() count_companies(short_int) mean_short_int(short_int)
mit
2,111,269,302,578,816,300
24.382353
73
0.596756
false
3.172794
false
false
false
Yethiel/re-volt-addon
io_revolt/parameters_in.py
1
4567
""" Name: parameters_in Purpose: Importing cars using the parameters.txt file Description: Imports entire cars using the carinfo module. """ if "bpy" in locals(): import imp imp.reload(common) imp.reload(carinfo) imp.reload(prm_in) import os import bpy import bmesh from mathutils import Vector from . import common from . import carinfo from . import prm_in from .common import * def import_file(filepath, scene): """ Imports a parameters.txt file and loads car body and wheels. """ PARAMETERS[filepath] = carinfo.read_parameters(filepath) # Imports the car with all supported files import_car(scene, PARAMETERS[filepath], filepath) # Removes parameters from dict so they can be reloaded next time PARAMETERS.pop(filepath) def import_car(scene, params, filepath): body = params["model"][params["body"]["modelnum"]] body_loc = to_blender_coord(params["body"]["offset"]) wheel0loc = to_blender_coord(params["wheel"][0]["offset1"]) wheel1loc = to_blender_coord(params["wheel"][1]["offset1"]) wheel2loc = to_blender_coord(params["wheel"][2]["offset1"]) wheel3loc = to_blender_coord(params["wheel"][3]["offset1"]) folder = os.sep.join(filepath.split(os.sep)[:-1]) # Checks if the wheel models exist wheel0_modelnum = int(params["wheel"][0]["modelnum"]) if wheel0_modelnum >= 0: wheel0 = params["model"][wheel0_modelnum] if wheel0.split(os.sep)[-1] in os.listdir(folder): wheel0path = os.sep.join([folder, wheel0.split(os.sep)[-1]]) else: wheel0 = None wheel1_modelnum = int(params["wheel"][1]["modelnum"]) if wheel1_modelnum >= 0: wheel1 = params["model"][wheel1_modelnum] if wheel1.split(os.sep)[-1] in os.listdir(folder): wheel1path = os.sep.join([folder, wheel1.split(os.sep)[-1]]) else: wheel1 = None wheel2_modelnum = int(params["wheel"][2]["modelnum"]) if wheel2_modelnum >= 0: wheel2 = params["model"][wheel2_modelnum] if wheel2.split(os.sep)[-1] in os.listdir(folder): wheel2path = os.sep.join([folder, wheel2.split(os.sep)[-1]]) else: wheel2 = None wheel3_modelnum = int(params["wheel"][3]["modelnum"]) if wheel3_modelnum >= 0: wheel3 = params["model"][wheel3_modelnum] if wheel3.split(os.sep)[-1] in os.listdir(folder): wheel3path = os.sep.join([folder, wheel3.split(os.sep)[-1]]) else: wheel3 = None # Checks if the body is in the same folder if body.split(os.sep)[-1] in os.listdir(folder): bodypath = os.sep.join([folder, body.split(os.sep)[-1]]) # Creates the car body and sets the offset body_obj = prm_in.import_file(bodypath, scene) body_obj.location = body_loc # Creates the wheel objects or an empty if the wheel file is not present if wheel0: wheel = prm_in.import_file(wheel0path, scene) else: wheel = bpy.data.objects.new("wheel 0", None) scene.objects.link(wheel) wheel.empty_draw_type = "SPHERE" wheel.empty_draw_size = 0.1 wheel.location = wheel0loc wheel.parent = body_obj if wheel1: wheel = prm_in.import_file(wheel1path, scene) else: wheel = bpy.data.objects.new("wheel 1", None) scene.objects.link(wheel) wheel.empty_draw_type = "SPHERE" wheel.empty_draw_size = 0.1 wheel.location = wheel1loc wheel.parent = body_obj if wheel2: wheel = prm_in.import_file(wheel2path, scene) else: wheel = bpy.data.objects.new("wheel 2", None) scene.objects.link(wheel) wheel.empty_draw_type = "SPHERE" wheel.empty_draw_size = 0.1 wheel.location = wheel2loc wheel.parent = body_obj if wheel3: wheel = prm_in.import_file(wheel3path, scene) else: wheel = bpy.data.objects.new("wheel 3", None) scene.objects.link(wheel) wheel.empty_draw_type = "SPHERE" wheel.empty_draw_size = 0.1 wheel.location = wheel3loc wheel.parent = body_obj # Aerial representation aerial_loc = to_blender_coord(params["aerial"]["offset"]) aerial = bpy.data.objects.new( "aerial", None ) scene.objects.link(aerial) aerial.location = aerial_loc aerial.empty_draw_size = 0.1 aerial.empty_draw_type = 'PLAIN_AXES' aerial.parent = body_obj
gpl-3.0
5,301,716,076,450,188,000
30.390071
76
0.611999
false
3.413303
false
false
false
wolcomm/rptk
rptk/base.py
1
3743
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved. # # The contents of this file are licensed under the Apache License version 2.0 # (the "License"); you may not use this file except in compliance with the # License. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """rptk base module.""" from __future__ import print_function from __future__ import unicode_literals import inspect import logging class BaseObject(object): """BaseObject class providing generic logging functionality.""" def __init__(self): """Initialise object.""" self._log = logging.getLogger(self.__module__) def __repr__(self): """Provide generic string representation.""" return "{}() object".format(self.cls_name) def __enter__(self): """Log context manager entry.""" self.log_ready_start() self.log_ready_done() return self def __exit__(self, exc_type, exc_val, exc_tb): """Log context manager exit.""" self.log_exit_start() self.log_exit_done() @property def opts(self): """Get self.opts if it exists.""" return getattr(self, "_opts", None) @property def log(self): """Get the current logger.""" return self._log @property def cls_name(self): """Get the class name of self.""" return self.__class__.__name__ @property def current_method(self): """Get the currently executing method name.""" return inspect.currentframe().f_back.f_code.co_name def log_init(self): """Log entry into the __init__ method.""" self.log.debug(msg="initialising {} instance".format(self.cls_name)) def log_init_done(self): """Log exit from an __init__ method.""" caller = inspect.currentframe().f_back.f_back.f_code.co_name if caller == '__init__': self.log.debug(msg="still initialising {} instance" .format(self.cls_name)) else: self.log.debug(msg="{} instance initialised".format(self.cls_name)) def log_method_enter(self, method=None): """Log entry into a class method.""" self.log.debug(msg="entering method {}.{}" .format(self.cls_name, method)) def log_method_exit(self, method=None): """Log exit from a class method.""" self.log.debug(msg="leaving method {}.{}" .format(self.cls_name, method)) def log_ready_start(self): """Log start of object initialisation.""" self.log.debug(msg="preparing {} for use".format(self)) def log_ready_done(self): """Log end of object initialisation.""" self.log.debug(msg="{} ready for use".format(self)) def log_exit_start(self): """Log start of object cleanup.""" self.log.debug(msg="cleaning up {}".format(self)) def log_exit_done(self): """Log end of object cleanup.""" self.log.debug(msg="finished cleaning up {}".format(self)) def raise_type_error(self, arg=None, cls=None): """Raise a TypeError with useful logging.""" msg = "argument {} ({}) not of type {}".format(arg.__name__, arg, cls) self.log.error(msg=msg) raise TypeError(msg) def raise_runtime_error(self, msg=None): """Raise a RuntimeError with useful logging.""" self.log.error(msg=msg) raise RuntimeError(msg)
apache-2.0
8,040,617,203,420,518,000
32.720721
79
0.606733
false
4.064061
false
false
false
platinhom/DailyTools
scripts/ESES_ElementArea.py
1
3679
#! /usr/bin/env python # -*- coding: utf8 -*- # Author: Platinhom; Last Updated: 2015-09-10 # Calculate each element surface area by MS_Intersection and also match the atomic area results to the pqr file. # Usage: python ESES_ElementArea.py file.pqr # # Need: MS_Intersection (partition version) # Note: Only for PQR format input. # Custom: ESES parameters. import os,sys # Modify the ESES program parameter here. # You can modify to command line input parameter as you like probe=1.4 grid=0.2 buffer=4.0 if (__name__ == '__main__'): fname=sys.argv[1] fnamelist=os.path.splitext(fname) fxyzr=open(fnamelist[0]+".xyzr",'w') fr=open(fname) inlines=fr.readlines(); fr.close(); # All elements/types of input atoms, used in element area summary. atomtypes=[]; # Write out the corresponding xyzr file. for line in inlines: # Each atom if (line[:4]=="ATOM" or line[:6]=="HETATM"): # Atom element here tmp=line.split(); element=tmp[-1].upper(); atomtypes.append(element); # Extract x, y, z, r from pqr to xyzr file radius="%10.5f" % float(line[62:70].strip()); xcoor="%10.5f" % float(line[30:38].strip()); ycoor="%10.5f" % float(line[38:46].strip()); zcoor="%10.5f" % float(line[46:54].strip()); xyzrstr=xcoor+ycoor+zcoor+radius+"\n"; fxyzr.write(xyzrstr); fxyzr.close() # Use external ESES program to generate surface and calculate atom area ## So you have to put the ESES program in the same directory # Output a "partition_area.txt" file saving atom area #os.system('./MS_Intersection_Area '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer)); p=os.popen('./MS_Intersection '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer),'r') totalArea="0" totalVolume="0" while 1: line=p.readline(); if "area:" in line: totalArea=line.split(':')[1].split()[0] if "volume:" in line: totalVolume=line.split(':')[1].split()[0] if not line:break # Analyze output atom area file fa=open("partition_area.txt") atomareas=[];# tmp save atom area by atom number typedefault=["H","C","N","O","F","S","P","CL","BR","I"]; typeareas={"H":0.0,"C":0.0,"N":0.0,"O":0.0,"F":0.0,"S":0.0,"P":0.0,"CL":0.0,"BR":0.0,"I":0.0}; atomnum=0; for line in fa: tmp=line.split(); atomarea="%12.6f" % float(tmp[1]); atomareas.append(atomarea); atype=atomtypes[atomnum]; typeareas[atype]=typeareas.setdefault(atype,0.0)+float(tmp[1]); atomnum=atomnum+1; fa.close() # Write out pqra file saving atom area fwname=fnamelist[0]+"_area.pqra" fw=open(fwname,'w') # Write the total area for each element. ## Notice that here just write out the default elements. ## If you want all elements, use "typeused" for iteration. typeused=["H","C","N","O","F","S","P","CL","BR","I"]; for i in typeareas.iterkeys(): if i not in typeused:typeused.append(i); # For print out the atom area summary outputelearea=fnamelist[0]+" Areas: "+totalArea+" Volumes: "+totalVolume+" "; fw.write("REMARK AREAS "+totalArea+"\n"); fw.write("REMARK VOLUMES "+totalVolume+"\n"); for element in typedefault: # If you want all elements, need to comment the above line and uncomment the following line. #for element in typeused: fw.write("REMARK AREA "+"%2s"%element+" "+"%20.6f"%typeareas.get(element,0.0)+"\n"); outputelearea=outputelearea+element+": "+str(typeareas[element])+" "; print outputelearea fr=open(fname) atomnum=0; for line in fr: if (line[:4]=="ATOM" or line[:6]=="HETATM"): tmp=line.split(); element=tmp[-1].upper(); newline=line.strip('\n')+atomareas[atomnum]+"\n"; fw.write(newline); atomnum=atomnum+1; else: fw.write(line); fr.close(); fw.close() #end main
gpl-2.0
-1,598,104,058,107,988,000
32.144144
112
0.65969
false
2.693265
false
false
false